network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / ath / ath / if_ath.c
CommitLineData
86877dfb
RP
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
193b341d
SZ
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
193b341d
SZ
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 *
86877dfb 29 * $FreeBSD: head/sys/dev/ath/if_ath.c 203751 2010-02-10 11:12:39Z rpaulo $");
193b341d
SZ
30 */
31
32/*
33 * Driver for the Atheros Wireless LAN controller.
34 *
35 * This software is derived from work of Atsushi Onoe; his contribution
36 * is greatly appreciated.
37 */
38
86877dfb 39#include "opt_inet.h"
193b341d 40#include "opt_ath.h"
86877dfb 41#include "opt_wlan.h"
193b341d
SZ
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/sysctl.h>
46#include <sys/mbuf.h>
47#include <sys/malloc.h>
86877dfb
RP
48#include <sys/lock.h>
49#include <sys/mutex.h>
193b341d
SZ
50#include <sys/kernel.h>
51#include <sys/socket.h>
52#include <sys/sockio.h>
53#include <sys/errno.h>
54#include <sys/callout.h>
55#include <sys/bus.h>
56#include <sys/endian.h>
57#include <sys/kthread.h>
86877dfb
RP
58#include <sys/taskqueue.h>
59#include <sys/priv.h>
60
193b341d
SZ
61#include <net/if.h>
62#include <net/if_dl.h>
63#include <net/if_media.h>
64#include <net/if_types.h>
65#include <net/if_arp.h>
193b341d
SZ
66#include <net/if_llc.h>
67#include <net/ifq_var.h>
68
69#include <netproto/802_11/ieee80211_var.h>
86877dfb
RP
70#include <netproto/802_11/ieee80211_regdomain.h>
71#ifdef IEEE80211_SUPPORT_SUPERG
72#include <netproto/802_11/ieee80211_superg.h>
73#endif
74#ifdef IEEE80211_SUPPORT_TDMA
75#include <netproto/802_11/ieee80211_tdma.h>
76#endif
193b341d
SZ
77
78#include <net/bpf.h>
79
86877dfb
RP
80#ifdef INET
81#include <netinet/in.h>
82#include <netinet/if_ether.h>
8982d733 83#endif
193b341d
SZ
84
85#include <dev/netif/ath/ath/if_athvar.h>
3f720b20 86#include <dev/netif/ath/hal/ath_hal/ah_devid.h> /* XXX for softled */
193b341d 87
86877dfb
RP
88#ifdef ATH_TX99_DIAG
89#include <dev/netif/ath_tx99/ath_tx99.h>
90#endif
91
92/*
93 * ATH_BCBUF determines the number of vap's that can transmit
94 * beacons and also (currently) the number of vap's that can
95 * have unique mac addresses/bssid. When staggering beacons
96 * 4 is probably a good max as otherwise the beacons become
97 * very closely spaced and there is limited time for cab q traffic
98 * to go out. You can burst beacons instead but that is not good
99 * for stations in power save and at some point you really want
100 * another radio (and channel).
101 *
102 * The limit on the number of mac addresses is tied to our use of
103 * the U/L bit and tracking addresses in a byte; it would be
104 * worthwhile to allow more for applications like proxy sta.
105 */
106CTASSERT(ATH_BCBUF <= 8);
107
193b341d
SZ
108/* unaligned little endian access */
109#define LE_READ_2(p) \
86877dfb
RP
110 ((u_int16_t) \
111 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
193b341d 112#define LE_READ_4(p) \
86877dfb
RP
113 ((u_int32_t) \
114 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
115 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
116
117static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
118 const char name[IFNAMSIZ], int unit, int opmode,
119 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
120 const uint8_t mac[IEEE80211_ADDR_LEN]);
121static void ath_vap_delete(struct ieee80211vap *);
193b341d 122static void ath_init(void *);
86877dfb 123static void ath_stop_locked(struct ifnet *);
193b341d
SZ
124static void ath_stop(struct ifnet *);
125static void ath_start(struct ifnet *);
126static int ath_reset(struct ifnet *);
86877dfb 127static int ath_reset_vap(struct ieee80211vap *, u_long);
193b341d 128static int ath_media_change(struct ifnet *);
86877dfb 129static void ath_watchdog(void *);
193b341d 130static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86877dfb
RP
131static void ath_fatal_proc(void *, int);
132static void ath_bmiss_vap(struct ieee80211vap *);
133static void ath_bmiss_proc(void *, int);
134static int ath_keyset(struct ath_softc *, const struct ieee80211_key *,
135 struct ieee80211_node *);
136static int ath_key_alloc(struct ieee80211vap *,
137 struct ieee80211_key *,
193b341d 138 ieee80211_keyix *, ieee80211_keyix *);
86877dfb 139static int ath_key_delete(struct ieee80211vap *,
193b341d 140 const struct ieee80211_key *);
86877dfb
RP
141static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
142 const u_int8_t mac[IEEE80211_ADDR_LEN]);
143static void ath_key_update_begin(struct ieee80211vap *);
144static void ath_key_update_end(struct ieee80211vap *);
145static void ath_update_mcast(struct ifnet *);
146static void ath_update_promisc(struct ifnet *);
193b341d
SZ
147static void ath_mode_init(struct ath_softc *);
148static void ath_setslottime(struct ath_softc *);
149static void ath_updateslot(struct ifnet *);
150static int ath_beaconq_setup(struct ath_hal *);
151static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
86877dfb 152static void ath_beacon_update(struct ieee80211vap *, int item);
193b341d 153static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
86877dfb
RP
154static void ath_beacon_proc(void *, int);
155static struct ath_buf *ath_beacon_generate(struct ath_softc *,
156 struct ieee80211vap *);
157static void ath_bstuck_proc(void *, int);
158static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
193b341d 159static void ath_beacon_free(struct ath_softc *);
86877dfb 160static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
193b341d
SZ
161static void ath_descdma_cleanup(struct ath_softc *sc,
162 struct ath_descdma *, ath_bufhead *);
163static int ath_desc_alloc(struct ath_softc *);
164static void ath_desc_free(struct ath_softc *);
86877dfb
RP
165static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
166 const uint8_t [IEEE80211_ADDR_LEN]);
193b341d 167static void ath_node_free(struct ieee80211_node *);
86877dfb
RP
168static void ath_node_getsignal(const struct ieee80211_node *,
169 int8_t *, int8_t *);
193b341d 170static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
86877dfb
RP
171static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
172 int subtype, int rssi, int nf);
193b341d 173static void ath_setdefantenna(struct ath_softc *, u_int);
86877dfb 174static void ath_rx_proc(void *, int);
ed33fa9f 175static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
193b341d
SZ
176static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
177static int ath_tx_setup(struct ath_softc *, int, int);
178static int ath_wme_update(struct ieee80211com *);
179static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
180static void ath_tx_cleanup(struct ath_softc *);
86877dfb 181static void ath_freetx(struct mbuf *);
193b341d
SZ
182static int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
183 struct ath_buf *, struct mbuf *);
86877dfb
RP
184static void ath_tx_proc_q0(void *, int);
185static void ath_tx_proc_q0123(void *, int);
186static void ath_tx_proc(void *, int);
187static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
193b341d
SZ
188static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
189static void ath_draintxq(struct ath_softc *);
190static void ath_stoprecv(struct ath_softc *);
191static int ath_startrecv(struct ath_softc *);
192static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
86877dfb
RP
193static void ath_scan_start(struct ieee80211com *);
194static void ath_scan_end(struct ieee80211com *);
195static void ath_set_channel(struct ieee80211com *);
193b341d 196static void ath_calibrate(void *);
86877dfb 197static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
193b341d
SZ
198static void ath_setup_stationkey(struct ieee80211_node *);
199static void ath_newassoc(struct ieee80211_node *, int);
86877dfb
RP
200static int ath_setregdomain(struct ieee80211com *,
201 struct ieee80211_regdomain *, int,
202 struct ieee80211_channel []);
203static void ath_getradiocaps(struct ieee80211com *, int, int *,
204 struct ieee80211_channel []);
205static int ath_getchannels(struct ath_softc *);
193b341d 206static void ath_led_event(struct ath_softc *, int);
193b341d
SZ
207
208static int ath_rate_setup(struct ath_softc *, u_int mode);
209static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
210
211static void ath_sysctlattach(struct ath_softc *);
86877dfb
RP
212static int ath_raw_xmit(struct ieee80211_node *,
213 struct mbuf *, const struct ieee80211_bpf_params *);
193b341d
SZ
214static void ath_announce(struct ath_softc *);
215
86877dfb
RP
216#ifdef IEEE80211_SUPPORT_TDMA
217static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
218 u_int32_t bintval);
219static void ath_tdma_bintvalsetup(struct ath_softc *sc,
220 const struct ieee80211_tdma_state *tdma);
221static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
222static void ath_tdma_update(struct ieee80211_node *ni,
223 const struct ieee80211_tdma_param *tdma, int);
224static void ath_tdma_beacon_send(struct ath_softc *sc,
225 struct ieee80211vap *vap);
226
227static __inline void
228ath_hal_setcca(struct ath_hal *ah, int ena)
229{
230 /*
231 * NB: fill me in; this is not provided by default because disabling
232 * CCA in most locales violates regulatory.
233 */
234}
235
236static __inline int
237ath_hal_getcca(struct ath_hal *ah)
238{
239 u_int32_t diag;
240 if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
241 return 1;
242 return ((diag & 0x500000) == 0);
243}
244
245#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
246#define TDMA_LPF_LEN 6
247#define TDMA_DUMMY_MARKER 0x127
248#define TDMA_EP_MUL(x, mul) ((x) * (mul))
249#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
250#define TDMA_LPF(x, y, len) \
251 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
252#define TDMA_SAMPLE(x, y) do { \
253 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
254} while (0)
255#define TDMA_EP_RND(x,mul) \
256 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
257#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
258#endif /* IEEE80211_SUPPORT_TDMA */
193b341d
SZ
259
260SYSCTL_DECL(_hw_ath);
261
262/* XXX validate sysctl values */
86877dfb
RP
263static int ath_longcalinterval = 30; /* long cals every 30 secs */
264SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
265 0, "long chip calibration interval (secs)");
266static int ath_shortcalinterval = 100; /* short cals every 100 ms */
267SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
268 0, "short chip calibration interval (msecs)");
269static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
270SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
271 0, "reset chip calibration results (secs)");
193b341d
SZ
272
273static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
86877dfb 274SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
193b341d
SZ
275 0, "rx buffers allocated");
276TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
277static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
86877dfb 278SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
193b341d
SZ
279 0, "tx buffers allocated");
280TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
281
86877dfb
RP
282static int ath_bstuck_threshold = 4; /* max missed beacons */
283SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
284 0, "max missed beacon xmits before chip reset");
285
193b341d 286#ifdef ATH_DEBUG
193b341d
SZ
287enum {
288 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
289 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
290 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
291 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
292 ATH_DEBUG_RATE = 0x00000010, /* rate control */
293 ATH_DEBUG_RESET = 0x00000020, /* reset processing */
294 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */
295 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */
296 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */
297 ATH_DEBUG_INTR = 0x00001000, /* ISR */
298 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */
299 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */
300 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */
301 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */
302 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */
303 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */
304 ATH_DEBUG_NODE = 0x00080000, /* node management */
305 ATH_DEBUG_LED = 0x00100000, /* led management */
306 ATH_DEBUG_FF = 0x00200000, /* fast frames */
307 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */
86877dfb
RP
308 ATH_DEBUG_TDMA = 0x00800000, /* TDMA processing */
309 ATH_DEBUG_TDMA_TIMER = 0x01000000, /* TDMA timer processing */
310 ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */
193b341d
SZ
311 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
312 ATH_DEBUG_ANY = 0xffffffff
313};
86877dfb
RP
314static int ath_debug = 0;
315SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
316 0, "control debugging printfs");
317TUNABLE_INT("hw.ath.debug", &ath_debug);
318
193b341d
SZ
319#define IFF_DUMPPKTS(sc, m) \
320 ((sc->sc_debug & (m)) || \
86877dfb 321 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
193b341d
SZ
322#define DPRINTF(sc, m, fmt, ...) do { \
323 if (sc->sc_debug & (m)) \
2508f206 324 kprintf(fmt, __VA_ARGS__); \
193b341d
SZ
325} while (0)
326#define KEYPRINTF(sc, ix, hk, mac) do { \
327 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
8982d733 328 ath_keyprint(sc, __func__, ix, hk, mac); \
193b341d 329} while (0)
86877dfb
RP
330static void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
331 u_int ix, int);
332static void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
333 u_int qnum, u_int ix, int done);
193b341d
SZ
334#else
335#define IFF_DUMPPKTS(sc, m) \
86877dfb 336 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
193b341d
SZ
337#define DPRINTF(sc, m, fmt, ...) do { \
338 (void) sc; \
339} while (0)
340#define KEYPRINTF(sc, k, ix, mac) do { \
341 (void) sc; \
342} while (0)
343#endif
344
345MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
346
347int
86877dfb 348ath_attach(u_int16_t devid, struct ath_softc *sc)
193b341d 349{
86877dfb
RP
350 struct ifnet *ifp;
351 struct ieee80211com *ic;
193b341d
SZ
352 struct ath_hal *ah = NULL;
353 HAL_STATUS status;
354 int error = 0, i;
86877dfb
RP
355 u_int wmodes;
356 uint8_t macaddr[IEEE80211_ADDR_LEN];
193b341d
SZ
357
358 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
359
86877dfb
RP
360 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
361 if (ifp == NULL) {
362 device_printf(sc->sc_dev, "can not if_alloc()\n");
363 error = ENOSPC;
364 goto bad;
365 }
366 ic = ifp->if_l2com;
367
193b341d
SZ
368 /* set these up early for if_printf use */
369 if_initname(ifp, device_get_name(sc->sc_dev),
86877dfb 370 device_get_unit(sc->sc_dev));
193b341d 371
09da298d
AHJ
372 /* prepare sysctl tree for use in sub modules */
373 sysctl_ctx_init(&sc->sc_sysctl_ctx);
374 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
375 SYSCTL_STATIC_CHILDREN(_hw),
376 OID_AUTO,
377 device_get_nameunit(sc->sc_dev),
378 CTLFLAG_RD, 0, "");
379
193b341d
SZ
380 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
381 if (ah == NULL) {
382 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
383 status);
384 error = ENXIO;
86877dfb 385 goto bad;
193b341d
SZ
386 }
387 sc->sc_ah = ah;
193b341d 388 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
86877dfb
RP
389#ifdef ATH_DEBUG
390 sc->sc_debug = ath_debug;
391#endif
193b341d
SZ
392
393 /*
394 * Check if the MAC has multi-rate retry support.
395 * We do this by trying to setup a fake extended
396 * descriptor. MAC's that don't have support will
397 * return false w/o doing anything. MAC's that do
398 * support it will return true w/o doing anything.
399 */
400 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
401
402 /*
403 * Check if the device has hardware counters for PHY
404 * errors. If so we need to enable the MIB interrupt
405 * so we can act on stat triggers.
406 */
407 if (ath_hal_hwphycounters(ah))
408 sc->sc_needmib = 1;
409
410 /*
411 * Get the hardware key cache size.
412 */
413 sc->sc_keymax = ath_hal_keycachesize(ah);
414 if (sc->sc_keymax > ATH_KEYMAX) {
415 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
416 ATH_KEYMAX, sc->sc_keymax);
417 sc->sc_keymax = ATH_KEYMAX;
418 }
419 /*
420 * Reset the key cache since some parts do not
421 * reset the contents on initial power up.
422 */
423 for (i = 0; i < sc->sc_keymax; i++)
424 ath_hal_keyreset(ah, i);
193b341d
SZ
425
426 /*
86877dfb 427 * Collect the default channel list.
193b341d 428 */
86877dfb
RP
429 error = ath_getchannels(sc);
430 if (error != 0)
431 goto bad;
193b341d
SZ
432
433 /*
434 * Setup rate tables for all potential media types.
435 */
436 ath_rate_setup(sc, IEEE80211_MODE_11A);
437 ath_rate_setup(sc, IEEE80211_MODE_11B);
438 ath_rate_setup(sc, IEEE80211_MODE_11G);
439 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
440 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
86877dfb
RP
441 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
442 ath_rate_setup(sc, IEEE80211_MODE_11NA);
443 ath_rate_setup(sc, IEEE80211_MODE_11NG);
444 ath_rate_setup(sc, IEEE80211_MODE_HALF);
445 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
193b341d
SZ
446
447 /* NB: setup here so ath_rate_update is happy */
448 ath_setcurmode(sc, IEEE80211_MODE_11A);
449
450 /*
451 * Allocate tx+rx descriptors and populate the lists.
452 */
453 error = ath_desc_alloc(sc);
86877dfb 454 if (error != 0) {
193b341d 455 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
86877dfb 456 goto bad;
193b341d 457 }
193b341d 458 callout_init(&sc->sc_cal_ch);
86877dfb
RP
459 callout_init(&sc->sc_wd_ch);
460
461 ATH_TXBUF_LOCK_INIT(sc);
462
a3062ee4 463 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT,
86877dfb
RP
464 taskqueue_thread_enqueue, &sc->sc_tq);
465 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
466 "%s taskq", ifp->if_xname);
467
468 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
469 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
470 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
193b341d
SZ
471
472 /*
473 * Allocate hardware transmit queues: one queue for
474 * beacon frames and one data queue for each QoS
475 * priority. Note that the hal handles reseting
476 * these queues at the needed time.
477 *
478 * XXX PS-Poll
479 */
480 sc->sc_bhalq = ath_beaconq_setup(ah);
86877dfb 481 if (sc->sc_bhalq == (u_int) -1) {
193b341d
SZ
482 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
483 error = EIO;
86877dfb 484 goto bad2;
193b341d 485 }
193b341d
SZ
486 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
487 if (sc->sc_cabq == NULL) {
488 if_printf(ifp, "unable to setup CAB xmit queue!\n");
489 error = EIO;
86877dfb 490 goto bad2;
193b341d 491 }
193b341d
SZ
492 /* NB: insure BK queue is the lowest priority h/w queue */
493 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
494 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
495 ieee80211_wme_acnames[WME_AC_BK]);
496 error = EIO;
86877dfb 497 goto bad2;
193b341d
SZ
498 }
499 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
500 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
501 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
86877dfb 502 /*
193b341d
SZ
503 * Not enough hardware tx queues to properly do WME;
504 * just punt and assign them all to the same h/w queue.
505 * We could do a better job of this if, for example,
506 * we allocate queues when we switch from station to
507 * AP mode.
508 */
509 if (sc->sc_ac2q[WME_AC_VI] != NULL)
510 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
511 if (sc->sc_ac2q[WME_AC_BE] != NULL)
512 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
513 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
514 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
515 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
516 }
517
86877dfb 518 /*
193b341d
SZ
519 * Special case certain configurations. Note the
520 * CAB queue is handled by these specially so don't
521 * include them when checking the txq setup mask.
522 */
523 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
524 case 0x01:
86877dfb 525 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
193b341d
SZ
526 break;
527 case 0x0f:
86877dfb 528 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
193b341d
SZ
529 break;
530 default:
86877dfb 531 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
193b341d
SZ
532 break;
533 }
534
535 /*
536 * Setup rate control. Some rate control modules
537 * call back to change the anntena state so expose
538 * the necessary entry points.
539 * XXX maybe belongs in struct ath_ratectrl?
540 */
541 sc->sc_setdefantenna = ath_setdefantenna;
542 sc->sc_rc = ath_rate_attach(sc);
543 if (sc->sc_rc == NULL) {
544 error = EIO;
86877dfb 545 goto bad2;
193b341d
SZ
546 }
547
548 sc->sc_blinking = 0;
549 sc->sc_ledstate = 1;
550 sc->sc_ledon = 0; /* low true */
86877dfb
RP
551 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
552 callout_init_mp(&sc->sc_ledtimer);
193b341d
SZ
553 /*
554 * Auto-enable soft led processing for IBM cards and for
555 * 5211 minipci cards. Users can also manually enable/disable
556 * support with a sysctl.
557 */
558 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
559 if (sc->sc_softled) {
86877dfb
RP
560 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
561 HAL_GPIO_MUX_MAC_NETWORK_LED);
193b341d
SZ
562 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
563 }
564
565 ifp->if_softc = sc;
566 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
567 ifp->if_start = ath_start;
193b341d
SZ
568 ifp->if_ioctl = ath_ioctl;
569 ifp->if_init = ath_init;
570 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
571 ifq_set_ready(&ifp->if_snd);
572
86877dfb 573 ic->ic_ifp = ifp;
193b341d
SZ
574 /* XXX not right but it's not used anywhere important */
575 ic->ic_phytype = IEEE80211_T_OFDM;
576 ic->ic_opmode = IEEE80211_M_STA;
577 ic->ic_caps =
86877dfb
RP
578 IEEE80211_C_STA /* station mode */
579 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
193b341d
SZ
580 | IEEE80211_C_HOSTAP /* hostap mode */
581 | IEEE80211_C_MONITOR /* monitor mode */
582 | IEEE80211_C_AHDEMO /* adhoc demo mode */
86877dfb
RP
583 | IEEE80211_C_WDS /* 4-address traffic works */
584 | IEEE80211_C_MBSS /* mesh point link mode */
193b341d
SZ
585 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
586 | IEEE80211_C_SHSLOT /* short slot time supported */
587 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
86877dfb
RP
588 | IEEE80211_C_BGSCAN /* capable of bg scanning */
589 | IEEE80211_C_TXFRAG /* handle tx frags */
193b341d
SZ
590 ;
591 /*
592 * Query the hal to figure out h/w crypto support.
593 */
594 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
86877dfb 595 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
193b341d 596 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
86877dfb 597 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
193b341d 598 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
86877dfb 599 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
193b341d 600 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
86877dfb 601 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
193b341d 602 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
86877dfb 603 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
193b341d
SZ
604 /*
605 * Check if h/w does the MIC and/or whether the
606 * separate key cache entries are required to
607 * handle both tx+rx MIC keys.
608 */
609 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
86877dfb 610 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
8982d733
SZ
611 /*
612 * If the h/w supports storing tx+rx MIC keys
613 * in one cache slot automatically enable use.
614 */
615 if (ath_hal_hastkipsplit(ah) ||
616 !ath_hal_settkipsplit(ah, AH_FALSE))
193b341d 617 sc->sc_splitmic = 1;
86877dfb
RP
618 /*
619 * If the h/w can do TKIP MIC together with WME then
620 * we use it; otherwise we force the MIC to be done
621 * in software by the net80211 layer.
622 */
623 if (ath_hal_haswmetkipmic(ah))
624 sc->sc_wmetkipmic = 1;
193b341d
SZ
625 }
626 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
86877dfb
RP
627 /*
628 * Check for multicast key search support.
629 */
630 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
631 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
632 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
633 }
193b341d 634 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
86877dfb 635 /*
8982d733
SZ
636 * Mark key cache slots associated with global keys
637 * as in use. If we knew TKIP was not to be used we
638 * could leave the +32, +64, and +32+64 slots free.
639 */
640 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
641 setbit(sc->sc_keymap, i);
642 setbit(sc->sc_keymap, i+64);
643 if (sc->sc_splitmic) {
644 setbit(sc->sc_keymap, i+32);
645 setbit(sc->sc_keymap, i+32+64);
646 }
647 }
193b341d
SZ
648 /*
649 * TPC support can be done either with a global cap or
650 * per-packet support. The latter is not available on
651 * all parts. We're a bit pedantic here as all parts
652 * support a global cap.
653 */
654 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
655 ic->ic_caps |= IEEE80211_C_TXPMGT;
656
657 /*
658 * Mark WME capability only if we have sufficient
659 * hardware queues to do proper priority scheduling.
660 */
661 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
662 ic->ic_caps |= IEEE80211_C_WME;
663 /*
664 * Check for misc other capabilities.
665 */
666 if (ath_hal_hasbursting(ah))
667 ic->ic_caps |= IEEE80211_C_BURST;
86877dfb
RP
668 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
669 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
670 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
671 if (ath_hal_hasfastframes(ah))
672 ic->ic_caps |= IEEE80211_C_FF;
673 wmodes = ath_hal_getwirelessmodes(ah);
674 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
675 ic->ic_caps |= IEEE80211_C_TURBOP;
676#ifdef IEEE80211_SUPPORT_TDMA
677 if (ath_hal_macversion(ah) > 0x78) {
678 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
679 ic->ic_tdma_update = ath_tdma_update;
680 }
681#endif
193b341d
SZ
682 /*
683 * Indicate we need the 802.11 header padded to a
684 * 32-bit boundary for 4-address and QoS frames.
685 */
686 ic->ic_flags |= IEEE80211_F_DATAPAD;
687
688 /*
689 * Query the hal about antenna support.
690 */
691 sc->sc_defant = ath_hal_getdefantenna(ah);
692
693 /*
694 * Not all chips have the VEOL support we want to
695 * use with IBSS beacons; check here for it.
696 */
697 sc->sc_hasveol = ath_hal_hasveol(ah);
698
699 /* get mac address from hardware */
86877dfb
RP
700 ath_hal_getmac(ah, macaddr);
701 if (sc->sc_hasbmask)
702 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
193b341d 703
86877dfb
RP
704 /* NB: used to size node table key mapping array */
705 ic->ic_max_keyix = sc->sc_keymax;
193b341d 706 /* call MI attach routine. */
86877dfb
RP
707 ieee80211_ifattach(ic, macaddr);
708 ic->ic_setregdomain = ath_setregdomain;
709 ic->ic_getradiocaps = ath_getradiocaps;
710 sc->sc_opmode = HAL_M_STA;
711
193b341d 712 /* override default methods */
86877dfb
RP
713 ic->ic_newassoc = ath_newassoc;
714 ic->ic_updateslot = ath_updateslot;
715 ic->ic_wme.wme_update = ath_wme_update;
716 ic->ic_vap_create = ath_vap_create;
717 ic->ic_vap_delete = ath_vap_delete;
718 ic->ic_raw_xmit = ath_raw_xmit;
719 ic->ic_update_mcast = ath_update_mcast;
720 ic->ic_update_promisc = ath_update_promisc;
193b341d
SZ
721 ic->ic_node_alloc = ath_node_alloc;
722 sc->sc_node_free = ic->ic_node_free;
723 ic->ic_node_free = ath_node_free;
86877dfb
RP
724 ic->ic_node_getsignal = ath_node_getsignal;
725 ic->ic_scan_start = ath_scan_start;
726 ic->ic_scan_end = ath_scan_end;
727 ic->ic_set_channel = ath_set_channel;
728
729 ieee80211_radiotap_attach(ic,
730 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
731 ATH_TX_RADIOTAP_PRESENT,
732 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
733 ATH_RX_RADIOTAP_PRESENT);
734
193b341d
SZ
735 /*
736 * Setup dynamic sysctl's now that country code and
737 * regdomain are available from the hal.
738 */
739 ath_sysctlattach(sc);
740
193b341d
SZ
741 if (bootverbose)
742 ieee80211_announce(ic);
743 ath_announce(sc);
193b341d 744 return 0;
86877dfb
RP
745bad2:
746 ath_tx_cleanup(sc);
747 ath_desc_free(sc);
748bad:
749 if (ah)
750 ath_hal_detach(ah);
751 if (ifp != NULL)
752 if_free(ifp);
753 sc->sc_invalid = 1;
193b341d
SZ
754 return error;
755}
756
757int
758ath_detach(struct ath_softc *sc)
759{
86877dfb 760 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
761
762 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
763 __func__, ifp->if_flags);
764
765 /*
766 * NB: the order of these is important:
86877dfb 767 * o stop the chip so no more interrupts will fire
193b341d
SZ
768 * o call the 802.11 layer before detaching the hal to
769 * insure callbacks into the driver to delete global
770 * key cache entries can be handled
86877dfb 771 * o free the taskqueue which drains any pending tasks
193b341d
SZ
772 * o reclaim the tx queue data structures after calling
773 * the 802.11 layer as we'll get called back to reclaim
774 * node state and potentially want to use them
775 * o to cleanup the tx queues the hal is called, so detach
776 * it last
777 * Other than that, it's straightforward...
778 */
86877dfb
RP
779 ath_stop(ifp);
780 ieee80211_ifdetach(ifp->if_l2com);
781 taskqueue_free(sc->sc_tq);
782#ifdef ATH_TX99_DIAG
783 if (sc->sc_tx99 != NULL)
784 sc->sc_tx99->detach(sc->sc_tx99);
785#endif
786 ath_rate_detach(sc->sc_rc);
787 ath_desc_free(sc);
788 ath_tx_cleanup(sc);
789 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
09da298d
AHJ
790 if (sc->sc_sysctl_tree) {
791 sysctl_ctx_free(&sc->sc_sysctl_ctx);
792 sc->sc_sysctl_tree = NULL;
793 }
86877dfb 794 if_free(ifp);
193b341d 795
86877dfb
RP
796 return 0;
797}
193b341d 798
86877dfb
RP
799/*
800 * MAC address handling for multiple BSS on the same radio.
801 * The first vap uses the MAC address from the EEPROM. For
802 * subsequent vap's we set the U/L bit (bit 1) in the MAC
803 * address and use the next six bits as an index.
804 */
805static void
806assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
807{
808 int i;
193b341d 809
86877dfb
RP
810 if (clone && sc->sc_hasbmask) {
811 /* NB: we only do this if h/w supports multiple bssid */
812 for (i = 0; i < 8; i++)
813 if ((sc->sc_bssidmask & (1<<i)) == 0)
814 break;
815 if (i != 0)
816 mac[0] |= (i << 2)|0x2;
817 } else
818 i = 0;
819 sc->sc_bssidmask |= 1<<i;
820 sc->sc_hwbssidmask[0] &= ~mac[0];
821 if (i == 0)
822 sc->sc_nbssid0++;
823}
824
825static void
826reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
827{
828 int i = mac[0] >> 2;
829 uint8_t mask;
830
831 if (i != 0 || --sc->sc_nbssid0 == 0) {
832 sc->sc_bssidmask &= ~(1<<i);
833 /* recalculate bssid mask from remaining addresses */
834 mask = 0xff;
835 for (i = 1; i < 8; i++)
836 if (sc->sc_bssidmask & (1<<i))
837 mask &= ~((i<<2)|0x2);
838 sc->sc_hwbssidmask[0] |= mask;
839 }
840}
193b341d 841
86877dfb
RP
842/*
843 * Assign a beacon xmit slot. We try to space out
844 * assignments so when beacons are staggered the
845 * traffic coming out of the cab q has maximal time
846 * to go out before the next beacon is scheduled.
847 */
848static int
849assign_bslot(struct ath_softc *sc)
850{
851 u_int slot, free;
852
853 free = 0;
854 for (slot = 0; slot < ATH_BCBUF; slot++)
855 if (sc->sc_bslot[slot] == NULL) {
856 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
857 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
858 return slot;
859 free = slot;
860 /* NB: keep looking for a double slot */
861 }
862 return free;
863}
193b341d 864
86877dfb
RP
865static struct ieee80211vap *
866ath_vap_create(struct ieee80211com *ic,
867 const char name[IFNAMSIZ], int unit, int opmode, int flags,
868 const uint8_t bssid[IEEE80211_ADDR_LEN],
869 const uint8_t mac0[IEEE80211_ADDR_LEN])
870{
871 struct ath_softc *sc = ic->ic_ifp->if_softc;
872 struct ath_vap *avp;
873 struct ieee80211vap *vap;
874 uint8_t mac[IEEE80211_ADDR_LEN];
875 int ic_opmode, needbeacon, error;
876
877 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap),
878 M_80211_VAP, M_WAITOK | M_ZERO);
879 needbeacon = 0;
880 IEEE80211_ADDR_COPY(mac, mac0);
881
882 ATH_LOCK(sc);
883 ic_opmode = opmode; /* default to opmode of new vap */
884 switch (opmode) {
885 case IEEE80211_M_STA:
886 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
887 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
888 goto bad;
889 }
890 if (sc->sc_nvaps) {
891 /*
892 * With multiple vaps we must fall back
893 * to s/w beacon miss handling.
894 */
895 flags |= IEEE80211_CLONE_NOBEACONS;
896 }
897 if (flags & IEEE80211_CLONE_NOBEACONS) {
898 /*
899 * Station mode w/o beacons are implemented w/ AP mode.
900 */
901 ic_opmode = IEEE80211_M_HOSTAP;
902 }
903 break;
904 case IEEE80211_M_IBSS:
905 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
906 device_printf(sc->sc_dev,
907 "only 1 ibss vap supported\n");
908 goto bad;
909 }
910 needbeacon = 1;
911 break;
912 case IEEE80211_M_AHDEMO:
913#ifdef IEEE80211_SUPPORT_TDMA
914 if (flags & IEEE80211_CLONE_TDMA) {
915 if (sc->sc_nvaps != 0) {
916 device_printf(sc->sc_dev,
917 "only 1 tdma vap supported\n");
918 goto bad;
919 }
920 needbeacon = 1;
921 flags |= IEEE80211_CLONE_NOBEACONS;
922 }
923 /* fall thru... */
924#endif
925 case IEEE80211_M_MONITOR:
926 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
927 /*
928 * Adopt existing mode. Adding a monitor or ahdemo
929 * vap to an existing configuration is of dubious
930 * value but should be ok.
931 */
932 /* XXX not right for monitor mode */
933 ic_opmode = ic->ic_opmode;
934 }
935 break;
936 case IEEE80211_M_HOSTAP:
937 case IEEE80211_M_MBSS:
938 needbeacon = 1;
939 break;
940 case IEEE80211_M_WDS:
941 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
942 device_printf(sc->sc_dev,
943 "wds not supported in sta mode\n");
944 goto bad;
945 }
946 /*
947 * Silently remove any request for a unique
948 * bssid; WDS vap's always share the local
949 * mac address.
950 */
951 flags &= ~IEEE80211_CLONE_BSSID;
952 if (sc->sc_nvaps == 0)
953 ic_opmode = IEEE80211_M_HOSTAP;
954 else
955 ic_opmode = ic->ic_opmode;
956 break;
957 default:
958 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
959 goto bad;
960 }
961 /*
962 * Check that a beacon buffer is available; the code below assumes it.
963 */
964 if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
965 device_printf(sc->sc_dev, "no beacon buffer available\n");
966 goto bad;
193b341d
SZ
967 }
968
86877dfb
RP
969 /* STA, AHDEMO? */
970 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
971 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
972 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
973 }
974
975 vap = &avp->av_vap;
976 /* XXX can't hold mutex across if_alloc */
977 ATH_UNLOCK(sc);
978 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
979 bssid, mac);
980 ATH_LOCK(sc);
981 if (error != 0) {
982 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
983 __func__, error);
984 goto bad2;
985 }
986
987 /* h/w crypto support */
988 vap->iv_key_alloc = ath_key_alloc;
989 vap->iv_key_delete = ath_key_delete;
990 vap->iv_key_set = ath_key_set;
991 vap->iv_key_update_begin = ath_key_update_begin;
992 vap->iv_key_update_end = ath_key_update_end;
993
994 /* override various methods */
995 avp->av_recv_mgmt = vap->iv_recv_mgmt;
996 vap->iv_recv_mgmt = ath_recv_mgmt;
997 vap->iv_reset = ath_reset_vap;
998 vap->iv_update_beacon = ath_beacon_update;
999 avp->av_newstate = vap->iv_newstate;
1000 vap->iv_newstate = ath_newstate;
1001 avp->av_bmiss = vap->iv_bmiss;
1002 vap->iv_bmiss = ath_bmiss_vap;
1003
1004 avp->av_bslot = -1;
1005 if (needbeacon) {
1006 /*
1007 * Allocate beacon state and setup the q for buffered
1008 * multicast frames. We know a beacon buffer is
1009 * available because we checked above.
1010 */
1011 avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
1012 STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
1013 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1014 /*
1015 * Assign the vap to a beacon xmit slot. As above
1016 * this cannot fail to find a free one.
1017 */
1018 avp->av_bslot = assign_bslot(sc);
1019 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1020 ("beacon slot %u not empty", avp->av_bslot));
1021 sc->sc_bslot[avp->av_bslot] = vap;
1022 sc->sc_nbcnvaps++;
1023 }
1024 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1025 /*
1026 * Multple vaps are to transmit beacons and we
1027 * have h/w support for TSF adjusting; enable
1028 * use of staggered beacons.
1029 */
1030 sc->sc_stagbeacons = 1;
1031 }
1032 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1033 }
193b341d 1034
86877dfb
RP
1035 ic->ic_opmode = ic_opmode;
1036 if (opmode != IEEE80211_M_WDS) {
1037 sc->sc_nvaps++;
1038 if (opmode == IEEE80211_M_STA)
1039 sc->sc_nstavaps++;
1040 if (opmode == IEEE80211_M_MBSS)
1041 sc->sc_nmeshvaps++;
1042 }
1043 switch (ic_opmode) {
1044 case IEEE80211_M_IBSS:
1045 sc->sc_opmode = HAL_M_IBSS;
1046 break;
1047 case IEEE80211_M_STA:
1048 sc->sc_opmode = HAL_M_STA;
1049 break;
1050 case IEEE80211_M_AHDEMO:
1051#ifdef IEEE80211_SUPPORT_TDMA
1052 if (vap->iv_caps & IEEE80211_C_TDMA) {
1053 sc->sc_tdma = 1;
1054 /* NB: disable tsf adjust */
1055 sc->sc_stagbeacons = 0;
1056 }
1057 /*
1058 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1059 * just ap mode.
1060 */
1061 /* fall thru... */
1062#endif
1063 case IEEE80211_M_HOSTAP:
1064 case IEEE80211_M_MBSS:
1065 sc->sc_opmode = HAL_M_HOSTAP;
1066 break;
1067 case IEEE80211_M_MONITOR:
1068 sc->sc_opmode = HAL_M_MONITOR;
1069 break;
1070 default:
1071 /* XXX should not happen */
1072 break;
1073 }
1074 if (sc->sc_hastsfadd) {
1075 /*
1076 * Configure whether or not TSF adjust should be done.
1077 */
1078 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1079 }
1080 if (flags & IEEE80211_CLONE_NOBEACONS) {
1081 /*
1082 * Enable s/w beacon miss handling.
1083 */
1084 sc->sc_swbmiss = 1;
1085 }
1086 ATH_UNLOCK(sc);
193b341d 1087
86877dfb
RP
1088 /* complete setup */
1089 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1090 return vap;
1091bad2:
1092 reclaim_address(sc, mac);
1093 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1094bad:
1095 kfree(avp, M_80211_VAP);
1096 ATH_UNLOCK(sc);
1097 return NULL;
1098}
193b341d 1099
86877dfb
RP
1100static void
1101ath_vap_delete(struct ieee80211vap *vap)
1102{
1103 struct ieee80211com *ic = vap->iv_ic;
1104 struct ifnet *ifp = ic->ic_ifp;
1105 struct ath_softc *sc = ifp->if_softc;
1106 struct ath_hal *ah = sc->sc_ah;
1107 struct ath_vap *avp = ATH_VAP(vap);
193b341d 1108
86877dfb
RP
1109 if (ifp->if_flags & IFF_RUNNING) {
1110 /*
1111 * Quiesce the hardware while we remove the vap. In
1112 * particular we need to reclaim all references to
1113 * the vap state by any frames pending on the tx queues.
1114 */
1115 ath_hal_intrset(ah, 0); /* disable interrupts */
1116 ath_draintxq(sc); /* stop xmit side */
1117 ath_stoprecv(sc); /* stop recv side */
193b341d
SZ
1118 }
1119
86877dfb
RP
1120 ieee80211_vap_detach(vap);
1121 ATH_LOCK(sc);
1122 /*
1123 * Reclaim beacon state. Note this must be done before
1124 * the vap instance is reclaimed as we may have a reference
1125 * to it in the buffer for the beacon frame.
1126 */
1127 if (avp->av_bcbuf != NULL) {
1128 if (avp->av_bslot != -1) {
1129 sc->sc_bslot[avp->av_bslot] = NULL;
1130 sc->sc_nbcnvaps--;
1131 }
1132 ath_beacon_return(sc, avp->av_bcbuf);
1133 avp->av_bcbuf = NULL;
1134 if (sc->sc_nbcnvaps == 0) {
1135 sc->sc_stagbeacons = 0;
1136 if (sc->sc_hastsfadd)
1137 ath_hal_settsfadjust(sc->sc_ah, 0);
1138 }
1139 /*
1140 * Reclaim any pending mcast frames for the vap.
1141 */
1142 ath_tx_draintxq(sc, &avp->av_mcastq);
1143 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1144 }
1145 /*
1146 * Update bookkeeping.
1147 */
1148 if (vap->iv_opmode == IEEE80211_M_STA) {
1149 sc->sc_nstavaps--;
1150 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1151 sc->sc_swbmiss = 0;
1152 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1153 vap->iv_opmode == IEEE80211_M_MBSS) {
1154 reclaim_address(sc, vap->iv_myaddr);
1155 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1156 if (vap->iv_opmode == IEEE80211_M_MBSS)
1157 sc->sc_nmeshvaps--;
1158 }
1159 if (vap->iv_opmode != IEEE80211_M_WDS)
1160 sc->sc_nvaps--;
1161#ifdef IEEE80211_SUPPORT_TDMA
1162 /* TDMA operation ceases when the last vap is destroyed */
1163 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1164 sc->sc_tdma = 0;
1165 sc->sc_swbmiss = 0;
1166 }
1167#endif
1168 ATH_UNLOCK(sc);
1169 kfree(avp, M_80211_VAP);
193b341d 1170
86877dfb
RP
1171 if (ifp->if_flags & IFF_RUNNING) {
1172 /*
1173 * Restart rx+tx machines if still running (RUNNING will
1174 * be reset if we just destroyed the last vap).
1175 */
1176 if (ath_startrecv(sc) != 0)
1177 if_printf(ifp, "%s: unable to restart recv logic\n",
1178 __func__);
1179 if (sc->sc_beacons) { /* restart beacons */
1180#ifdef IEEE80211_SUPPORT_TDMA
1181 if (sc->sc_tdma)
1182 ath_tdma_config(sc, NULL);
1183 else
1184#endif
1185 ath_beacon_config(sc, NULL);
1186 }
1187 ath_hal_intrset(ah, sc->sc_imask);
1188 }
193b341d
SZ
1189}
1190
1191void
1192ath_suspend(struct ath_softc *sc)
1193{
86877dfb
RP
1194 struct ifnet *ifp = sc->sc_ifp;
1195 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
1196
1197 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1198 __func__, ifp->if_flags);
193b341d 1199
86877dfb
RP
1200 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1201 if (ic->ic_opmode == IEEE80211_M_STA)
1202 ath_stop(ifp);
1203 else
1204 ieee80211_suspend_all(ic);
1205 /*
1206 * NB: don't worry about putting the chip in low power
1207 * mode; pci will power off our socket on suspend and
1208 * CardBus detaches the device.
1209 */
1210}
1211
1212/*
1213 * Reset the key cache since some parts do not reset the
1214 * contents on resume. First we clear all entries, then
1215 * re-load keys that the 802.11 layer assumes are setup
1216 * in h/w.
1217 */
1218static void
1219ath_reset_keycache(struct ath_softc *sc)
1220{
1221 struct ifnet *ifp = sc->sc_ifp;
1222 struct ieee80211com *ic = ifp->if_l2com;
1223 struct ath_hal *ah = sc->sc_ah;
1224 int i;
1225
1226 for (i = 0; i < sc->sc_keymax; i++)
1227 ath_hal_keyreset(ah, i);
1228 ieee80211_crypto_reload_keys(ic);
193b341d
SZ
1229}
1230
1231void
1232ath_resume(struct ath_softc *sc)
1233{
86877dfb
RP
1234 struct ifnet *ifp = sc->sc_ifp;
1235 struct ieee80211com *ic = ifp->if_l2com;
1236 struct ath_hal *ah = sc->sc_ah;
1237 HAL_STATUS status;
193b341d
SZ
1238
1239 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1240 __func__, ifp->if_flags);
1241
86877dfb
RP
1242 /*
1243 * Must reset the chip before we reload the
1244 * keycache as we were powered down on suspend.
1245 */
1246 ath_hal_reset(ah, sc->sc_opmode,
1247 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1248 AH_FALSE, &status);
1249 ath_reset_keycache(sc);
1250 if (sc->sc_resume_up) {
1251 if (ic->ic_opmode == IEEE80211_M_STA) {
1252 ath_init(sc);
1253 /*
1254 * Program the beacon registers using the last rx'd
1255 * beacon frame and enable sync on the next beacon
1256 * we see. This should handle the case where we
1257 * wakeup and find the same AP and also the case where
1258 * we wakeup and need to roam. For the latter we
1259 * should get bmiss events that trigger a roam.
1260 */
1261 ath_beacon_config(sc, NULL);
1262 sc->sc_syncbeacon = 1;
1263 } else
1264 ieee80211_resume_all(ic);
193b341d
SZ
1265 }
1266 if (sc->sc_softled) {
86877dfb
RP
1267 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1268 HAL_GPIO_MUX_MAC_NETWORK_LED);
1269 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
193b341d 1270 }
193b341d
SZ
1271}
1272
1273void
1274ath_shutdown(struct ath_softc *sc)
1275{
86877dfb 1276 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1277
1278 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1279 __func__, ifp->if_flags);
193b341d 1280
86877dfb
RP
1281 ath_stop(ifp);
1282 /* NB: no point powering down chip as we're about to reboot */
193b341d
SZ
1283}
1284
1285/*
1286 * Interrupt handler. Most of the actual processing is deferred.
1287 */
1288void
1289ath_intr(void *arg)
1290{
1291 struct ath_softc *sc = arg;
86877dfb 1292 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1293 struct ath_hal *ah = sc->sc_ah;
1294 HAL_INT status;
1295
1296 if (sc->sc_invalid) {
1297 /*
1298 * The hardware is not ready/present, don't touch anything.
1299 * Note this can happen early on if the IRQ is shared.
1300 */
1301 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1302 return;
1303 }
1304 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
1305 return;
86877dfb
RP
1306 if ((ifp->if_flags & IFF_UP) == 0 ||
1307 (ifp->if_flags & IFF_RUNNING) == 0) {
1308 HAL_INT status;
1309
193b341d
SZ
1310 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1311 __func__, ifp->if_flags);
1312 ath_hal_getisr(ah, &status); /* clear ISR */
1313 ath_hal_intrset(ah, 0); /* disable further intr's */
1314 return;
1315 }
1316 /*
1317 * Figure out the reason(s) for the interrupt. Note
1318 * that the hal returns a pseudo-ISR that may include
1319 * bits we haven't explicitly enabled so we mask the
1320 * value to insure we only process bits we requested.
1321 */
1322 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1323 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1324 status &= sc->sc_imask; /* discard unasked for bits */
1325 if (status & HAL_INT_FATAL) {
193b341d
SZ
1326 sc->sc_stats.ast_hardware++;
1327 ath_hal_intrset(ah, 0); /* disable intr's until reset */
86877dfb 1328 ath_fatal_proc(sc, 0);
193b341d
SZ
1329 } else {
1330 if (status & HAL_INT_SWBA) {
1331 /*
1332 * Software beacon alert--time to send a beacon.
1333 * Handle beacon transmission directly; deferring
1334 * this is too slow to meet timing constraints
1335 * under load.
1336 */
86877dfb
RP
1337#ifdef IEEE80211_SUPPORT_TDMA
1338 if (sc->sc_tdma) {
1339 if (sc->sc_tdmaswba == 0) {
1340 struct ieee80211com *ic = ifp->if_l2com;
1341 struct ieee80211vap *vap =
1342 TAILQ_FIRST(&ic->ic_vaps);
1343 ath_tdma_beacon_send(sc, vap);
1344 sc->sc_tdmaswba =
1345 vap->iv_tdma->tdma_bintval;
1346 } else
1347 sc->sc_tdmaswba--;
1348 } else
1349#endif
1350 {
1351 ath_beacon_proc(sc, 0);
1352#ifdef IEEE80211_SUPPORT_SUPERG
1353 /*
1354 * Schedule the rx taskq in case there's no
1355 * traffic so any frames held on the staging
1356 * queue are aged and potentially flushed.
1357 */
1358 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1359#endif
1360 }
193b341d
SZ
1361 }
1362 if (status & HAL_INT_RXEOL) {
1363 /*
1364 * NB: the hardware should re-read the link when
1365 * RXE bit is written, but it doesn't work at
1366 * least on older hardware revs.
1367 */
1368 sc->sc_stats.ast_rxeol++;
1369 sc->sc_rxlink = NULL;
1370 }
1371 if (status & HAL_INT_TXURN) {
1372 sc->sc_stats.ast_txurn++;
1373 /* bump tx trigger level */
1374 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1375 }
1376 if (status & HAL_INT_RX)
86877dfb 1377 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
193b341d 1378 if (status & HAL_INT_TX)
86877dfb 1379 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
193b341d
SZ
1380 if (status & HAL_INT_BMISS) {
1381 sc->sc_stats.ast_bmiss++;
86877dfb 1382 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
193b341d
SZ
1383 }
1384 if (status & HAL_INT_MIB) {
1385 sc->sc_stats.ast_mib++;
1386 /*
1387 * Disable interrupts until we service the MIB
1388 * interrupt; otherwise it will continue to fire.
1389 */
1390 ath_hal_intrset(ah, 0);
1391 /*
1392 * Let the hal handle the event. We assume it will
1393 * clear whatever condition caused the interrupt.
1394 */
1395 ath_hal_mibevent(ah, &sc->sc_halstats);
1396 ath_hal_intrset(ah, sc->sc_imask);
1397 }
86877dfb
RP
1398 if (status & HAL_INT_RXORN) {
1399 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1400 sc->sc_stats.ast_rxorn++;
1401 }
193b341d
SZ
1402 }
1403}
1404
1405static void
86877dfb 1406ath_fatal_proc(void *arg, int pending)
193b341d 1407{
86877dfb
RP
1408 struct ath_softc *sc = arg;
1409 struct ifnet *ifp = sc->sc_ifp;
1410 u_int32_t *state;
1411 u_int32_t len;
1412 void *sp;
193b341d
SZ
1413
1414 if_printf(ifp, "hardware error; resetting\n");
ed33fa9f
SW
1415 /*
1416 * Fatal errors are unrecoverable. Typically these
1417 * are caused by DMA errors. Collect h/w state from
1418 * the hal so we can diagnose what's going on.
1419 */
86877dfb
RP
1420 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1421 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1422 state = sp;
ed33fa9f
SW
1423 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1424 state[0], state[1] , state[2], state[3],
1425 state[4], state[5]);
1426 }
193b341d
SZ
1427 ath_reset(ifp);
1428}
1429
1430static void
86877dfb 1431ath_bmiss_vap(struct ieee80211vap *vap)
193b341d 1432{
86877dfb
RP
1433 /*
1434 * Workaround phantom bmiss interrupts by sanity-checking
1435 * the time of our last rx'd frame. If it is within the
1436 * beacon miss interval then ignore the interrupt. If it's
1437 * truly a bmiss we'll get another interrupt soon and that'll
1438 * be dispatched up for processing. Note this applies only
1439 * for h/w beacon miss events.
1440 */
1441 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1442 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1443 struct ath_softc *sc = ifp->if_softc;
1444 u_int64_t lastrx = sc->sc_lastrx;
1445 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
193b341d 1446 u_int bmisstimeout =
86877dfb 1447 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
193b341d
SZ
1448
1449 DPRINTF(sc, ATH_DEBUG_BEACON,
1450 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1451 __func__, (unsigned long long) tsf,
1452 (unsigned long long)(tsf - lastrx),
1453 (unsigned long long) lastrx, bmisstimeout);
86877dfb
RP
1454
1455 if (tsf - lastrx <= bmisstimeout) {
193b341d 1456 sc->sc_stats.ast_bmiss_phantom++;
86877dfb
RP
1457 return;
1458 }
193b341d 1459 }
86877dfb
RP
1460 ATH_VAP(vap)->av_bmiss(vap);
1461}
1462
1463static int
1464ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1465{
1466 uint32_t rsize;
1467 void *sp;
1468
1469 if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(mask), &sp, &rsize))
1470 return 0;
1471 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1472 *hangs = *(uint32_t *)sp;
1473 return 1;
193b341d
SZ
1474}
1475
86877dfb
RP
1476static void
1477ath_bmiss_proc(void *arg, int pending)
193b341d 1478{
86877dfb
RP
1479 struct ath_softc *sc = arg;
1480 struct ifnet *ifp = sc->sc_ifp;
1481 uint32_t hangs;
193b341d 1482
86877dfb
RP
1483 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1484
1485 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1486 if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1487 ath_reset(ifp);
1488 } else
1489 ieee80211_beacon_miss(ifp->if_l2com);
1490}
1491
1492/*
1493 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1494 * calcs together with WME. If necessary disable the crypto
1495 * hardware and mark the 802.11 state so keys will be setup
1496 * with the MIC work done in software.
1497 */
1498static void
1499ath_settkipmic(struct ath_softc *sc)
1500{
1501 struct ifnet *ifp = sc->sc_ifp;
1502 struct ieee80211com *ic = ifp->if_l2com;
1503
1504 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1505 if (ic->ic_flags & IEEE80211_F_WME) {
1506 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1507 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1508 } else {
1509 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1510 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1511 }
1512 }
193b341d
SZ
1513}
1514
193b341d
SZ
1515static void
1516ath_init(void *arg)
1517{
86877dfb
RP
1518 struct ath_softc *sc = (struct ath_softc *) arg;
1519 struct ifnet *ifp = sc->sc_ifp;
1520 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
1521 struct ath_hal *ah = sc->sc_ah;
1522 HAL_STATUS status;
1523
193b341d
SZ
1524 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1525 __func__, ifp->if_flags);
1526
86877dfb 1527 ATH_LOCK(sc);
193b341d
SZ
1528 /*
1529 * Stop anything previously setup. This is safe
1530 * whether this is the first time through or not.
1531 */
86877dfb 1532 ath_stop_locked(ifp);
193b341d
SZ
1533
1534 /*
1535 * The basic interface to setting the hardware in a good
1536 * state is ``reset''. On return the hardware is known to
1537 * be powered up and with interrupts disabled. This must
1538 * be followed by initialization of the appropriate bits
1539 * and then setup of the interrupt mask.
1540 */
86877dfb
RP
1541 ath_settkipmic(sc);
1542 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
193b341d
SZ
1543 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1544 status);
86877dfb 1545 ATH_UNLOCK(sc);
193b341d
SZ
1546 return;
1547 }
86877dfb 1548 ath_chan_change(sc, ic->ic_curchan);
193b341d 1549
193b341d
SZ
1550 /*
1551 * Likewise this is set during reset so update
1552 * state cached in the driver.
1553 */
1554 sc->sc_diversity = ath_hal_getdiversity(ah);
86877dfb
RP
1555 sc->sc_lastlongcal = 0;
1556 sc->sc_resetcal = 1;
1557 sc->sc_lastcalreset = 0;
193b341d
SZ
1558
1559 /*
1560 * Setup the hardware after reset: the key cache
1561 * is filled as needed and the receive engine is
1562 * set going. Frame transmit is handled entirely
1563 * in the frame output path; there's nothing to do
1564 * here except setup the interrupt mask.
1565 */
1566 if (ath_startrecv(sc) != 0) {
1567 if_printf(ifp, "unable to start recv logic\n");
86877dfb 1568 ATH_UNLOCK(sc);
193b341d
SZ
1569 return;
1570 }
1571
1572 /*
1573 * Enable interrupts.
1574 */
1575 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1576 | HAL_INT_RXEOL | HAL_INT_RXORN
1577 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1578 /*
1579 * Enable MIB interrupts when there are hardware phy counters.
1580 * Note we only do this (at the moment) for station mode.
1581 */
1582 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1583 sc->sc_imask |= HAL_INT_MIB;
193b341d
SZ
1584
1585 ifp->if_flags |= IFF_RUNNING;
86877dfb
RP
1586 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1587 ath_hal_intrset(ah, sc->sc_imask);
193b341d 1588
86877dfb
RP
1589 ATH_UNLOCK(sc);
1590
1591#ifdef ATH_TX99_DIAG
1592 if (sc->sc_tx99 != NULL)
1593 sc->sc_tx99->start(sc->sc_tx99);
1594 else
1595#endif
1596 ieee80211_start_all(ic); /* start all vap's */
193b341d
SZ
1597}
1598
1599static void
86877dfb 1600ath_stop_locked(struct ifnet *ifp)
193b341d
SZ
1601{
1602 struct ath_softc *sc = ifp->if_softc;
193b341d
SZ
1603 struct ath_hal *ah = sc->sc_ah;
1604
193b341d
SZ
1605 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1606 __func__, sc->sc_invalid, ifp->if_flags);
1607
86877dfb 1608 ATH_LOCK_ASSERT(sc);
193b341d
SZ
1609 if (ifp->if_flags & IFF_RUNNING) {
1610 /*
1611 * Shutdown the hardware and driver:
1612 * reset 802.11 state machine
1613 * turn off timers
1614 * disable interrupts
1615 * turn off the radio
1616 * clear transmit machinery
1617 * clear receive machinery
1618 * drain and release tx queues
1619 * reclaim beacon resources
1620 * power down hardware
1621 *
1622 * Note that some of this work is not possible if the
1623 * hardware is gone (invalid).
1624 */
86877dfb
RP
1625#ifdef ATH_TX99_DIAG
1626 if (sc->sc_tx99 != NULL)
1627 sc->sc_tx99->stop(sc->sc_tx99);
1628#endif
1629 callout_stop(&sc->sc_wd_ch);
1630 sc->sc_wd_timer = 0;
193b341d 1631 ifp->if_flags &= ~IFF_RUNNING;
193b341d
SZ
1632 if (!sc->sc_invalid) {
1633 if (sc->sc_softled) {
1634 callout_stop(&sc->sc_ledtimer);
1635 ath_hal_gpioset(ah, sc->sc_ledpin,
1636 !sc->sc_ledon);
1637 sc->sc_blinking = 0;
1638 }
1639 ath_hal_intrset(ah, 0);
1640 }
1641 ath_draintxq(sc);
1642 if (!sc->sc_invalid) {
1643 ath_stoprecv(sc);
1644 ath_hal_phydisable(ah);
1645 } else
1646 sc->sc_rxlink = NULL;
86877dfb 1647 ath_beacon_free(sc); /* XXX not needed */
193b341d
SZ
1648 }
1649}
1650
1651static void
1652ath_stop(struct ifnet *ifp)
1653{
1654 struct ath_softc *sc = ifp->if_softc;
1655
86877dfb
RP
1656 ATH_LOCK(sc);
1657 ath_stop_locked(ifp);
1658 ATH_UNLOCK(sc);
193b341d
SZ
1659}
1660
1661/*
1662 * Reset the hardware w/o losing operational state. This is
1663 * basically a more efficient way of doing ath_stop, ath_init,
1664 * followed by state transitions to the current 802.11
1665 * operational state. Used to recover from various errors and
1666 * to reset or reload hardware state.
1667 */
1668static int
1669ath_reset(struct ifnet *ifp)
1670{
1671 struct ath_softc *sc = ifp->if_softc;
86877dfb 1672 struct ieee80211com *ic = ifp->if_l2com;
193b341d 1673 struct ath_hal *ah = sc->sc_ah;
193b341d
SZ
1674 HAL_STATUS status;
1675
193b341d
SZ
1676 ath_hal_intrset(ah, 0); /* disable interrupts */
1677 ath_draintxq(sc); /* stop xmit side */
1678 ath_stoprecv(sc); /* stop recv side */
86877dfb 1679 ath_settkipmic(sc); /* configure TKIP MIC handling */
193b341d 1680 /* NB: indicate channel change so we do a full reset */
86877dfb 1681 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
193b341d
SZ
1682 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1683 __func__, status);
193b341d 1684 sc->sc_diversity = ath_hal_getdiversity(ah);
86877dfb
RP
1685 if (ath_startrecv(sc) != 0) /* restart recv */
1686 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
193b341d
SZ
1687 /*
1688 * We may be doing a reset in response to an ioctl
1689 * that changes the channel so update any state that
1690 * might change as a result.
1691 */
86877dfb
RP
1692 ath_chan_change(sc, ic->ic_curchan);
1693 if (sc->sc_beacons) { /* restart beacons */
1694#ifdef IEEE80211_SUPPORT_TDMA
1695 if (sc->sc_tdma)
1696 ath_tdma_config(sc, NULL);
1697 else
1698#endif
1699 ath_beacon_config(sc, NULL);
1700 }
193b341d
SZ
1701 ath_hal_intrset(ah, sc->sc_imask);
1702
86877dfb 1703 ath_start(ifp); /* restart xmit */
193b341d
SZ
1704 return 0;
1705}
1706
86877dfb
RP
1707static int
1708ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
193b341d 1709{
86877dfb
RP
1710 struct ieee80211com *ic = vap->iv_ic;
1711 struct ifnet *ifp = ic->ic_ifp;
193b341d
SZ
1712 struct ath_softc *sc = ifp->if_softc;
1713 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
1714
1715 switch (cmd) {
1716 case IEEE80211_IOC_TXPOWER:
1717 /*
1718 * If per-packet TPC is enabled, then we have nothing
1719 * to do; otherwise we need to force the global limit.
1720 * All this can happen directly; no need to reset.
1721 */
1722 if (!ath_hal_gettpc(ah))
1723 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1724 return 0;
1725 }
1726 return ath_reset(ifp);
1727}
1728
1729static struct ath_buf *
1730_ath_getbuf_locked(struct ath_softc *sc)
1731{
1732 struct ath_buf *bf;
1733
1734 ATH_TXBUF_LOCK_ASSERT(sc);
1735
1736 bf = STAILQ_FIRST(&sc->sc_txbuf);
1737 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1738 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1739 else
1740 bf = NULL;
1741 if (bf == NULL) {
1742 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1743 STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1744 "out of xmit buffers" : "xmit buffer busy");
1745 }
1746 return bf;
1747}
1748
1749static struct ath_buf *
1750ath_getbuf(struct ath_softc *sc)
1751{
193b341d 1752 struct ath_buf *bf;
86877dfb
RP
1753
1754 ATH_TXBUF_LOCK(sc);
1755 bf = _ath_getbuf_locked(sc);
1756 if (bf == NULL) {
1757 struct ifnet *ifp = sc->sc_ifp;
1758
1759 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
1760 sc->sc_stats.ast_tx_qstop++;
1761 ifp->if_flags |= IFF_OACTIVE;
1762 }
1763 ATH_TXBUF_UNLOCK(sc);
1764 return bf;
1765}
1766
1767/*
1768 * Cleanup driver resources when we run out of buffers
1769 * while processing fragments; return the tx buffers
1770 * allocated and drop node references.
1771 */
1772static void
1773ath_txfrag_cleanup(struct ath_softc *sc,
1774 ath_bufhead *frags, struct ieee80211_node *ni)
1775{
1776 struct ath_buf *bf, *next;
1777
1778 ATH_TXBUF_LOCK_ASSERT(sc);
1779
1780 STAILQ_FOREACH_MUTABLE(bf, frags, bf_list, next) {
1781 /* NB: bf assumed clean */
1782 STAILQ_REMOVE_HEAD(frags, bf_list);
1783 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1784 ieee80211_node_decref(ni);
1785 }
1786}
1787
1788/*
1789 * Setup xmit of a fragmented frame. Allocate a buffer
1790 * for each frag and bump the node reference count to
1791 * reflect the held reference to be setup by ath_tx_start.
1792 */
1793static int
1794ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
1795 struct mbuf *m0, struct ieee80211_node *ni)
1796{
193b341d 1797 struct mbuf *m;
86877dfb 1798 struct ath_buf *bf;
193b341d 1799
86877dfb
RP
1800 ATH_TXBUF_LOCK(sc);
1801 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
1802 bf = _ath_getbuf_locked(sc);
1803 if (bf == NULL) { /* out of buffers, cleanup */
1804 ath_txfrag_cleanup(sc, frags, ni);
1805 break;
1806 }
1807 ieee80211_node_incref(ni);
1808 STAILQ_INSERT_TAIL(frags, bf, bf_list);
9db4b353 1809 }
86877dfb 1810 ATH_TXBUF_UNLOCK(sc);
9db4b353 1811
86877dfb
RP
1812 return !STAILQ_EMPTY(frags);
1813}
1814
1815static void
1816ath_start(struct ifnet *ifp)
1817{
1818 struct ath_softc *sc = ifp->if_softc;
1819 struct ieee80211_node *ni;
1820 struct ath_buf *bf;
1821 struct mbuf *m, *next;
1822 ath_bufhead frags;
193b341d 1823
2508f206
RP
1824 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
1825 ifq_purge(&ifp->if_snd);
86877dfb 1826 return;
2508f206 1827 }
193b341d
SZ
1828 for (;;) {
1829 /*
1830 * Grab a TX buffer and associated resources.
1831 */
86877dfb
RP
1832 bf = ath_getbuf(sc);
1833 if (bf == NULL)
1834 break;
1835
1836 IF_DEQUEUE(&ifp->if_snd, m);
1837 if (m == NULL) {
1838 ATH_TXBUF_LOCK(sc);
1839 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1840 ATH_TXBUF_UNLOCK(sc);
193b341d
SZ
1841 break;
1842 }
86877dfb 1843 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
193b341d 1844 /*
86877dfb
RP
1845 * Check for fragmentation. If this frame
1846 * has been broken up verify we have enough
1847 * buffers to send all the fragments so all
1848 * go out or none...
193b341d 1849 */
86877dfb
RP
1850 STAILQ_INIT(&frags);
1851 if ((m->m_flags & M_FRAG) &&
1852 !ath_txfrag_setup(sc, &frags, m, ni)) {
1853 DPRINTF(sc, ATH_DEBUG_XMIT,
1854 "%s: out of txfrag buffers\n", __func__);
1855 sc->sc_stats.ast_tx_nofrag++;
1856 ifp->if_oerrors++;
1857 ath_freetx(m);
1858 goto bad;
193b341d 1859 }
86877dfb
RP
1860 ifp->if_opackets++;
1861 nextfrag:
1862 /*
1863 * Pass the frame to the h/w for transmission.
1864 * Fragmented frames have each frag chained together
1865 * with m_nextpkt. We know there are sufficient ath_buf's
1866 * to send all the frags because of work done by
1867 * ath_txfrag_setup. We leave m_nextpkt set while
1868 * calling ath_tx_start so it can use it to extend the
1869 * the tx duration to cover the subsequent frag and
1870 * so it can reclaim all the mbufs in case of an error;
1871 * ath_tx_start clears m_nextpkt once it commits to
1872 * handing the frame to the hardware.
1873 */
1874 next = m->m_nextpkt;
193b341d 1875 if (ath_tx_start(sc, ni, bf, m)) {
86877dfb 1876 bad:
193b341d 1877 ifp->if_oerrors++;
86877dfb
RP
1878 reclaim:
1879 bf->bf_m = NULL;
1880 bf->bf_node = NULL;
1881 ATH_TXBUF_LOCK(sc);
1882 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1883 ath_txfrag_cleanup(sc, &frags, ni);
1884 ATH_TXBUF_UNLOCK(sc);
193b341d
SZ
1885 if (ni != NULL)
1886 ieee80211_free_node(ni);
1887 continue;
1888 }
86877dfb
RP
1889 if (next != NULL) {
1890 /*
1891 * Beware of state changing between frags.
1892 * XXX check sta power-save state?
1893 */
1894 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
1895 DPRINTF(sc, ATH_DEBUG_XMIT,
1896 "%s: flush fragmented packet, state %s\n",
1897 __func__,
1898 ieee80211_state_name[ni->ni_vap->iv_state]);
1899 ath_freetx(next);
1900 goto reclaim;
1901 }
1902 m = next;
1903 bf = STAILQ_FIRST(&frags);
1904 KASSERT(bf != NULL, ("no buf for txfrag"));
1905 STAILQ_REMOVE_HEAD(&frags, bf_list);
1906 goto nextfrag;
1907 }
193b341d 1908
86877dfb 1909 sc->sc_wd_timer = 5;
193b341d
SZ
1910 }
1911}
1912
1913static int
1914ath_media_change(struct ifnet *ifp)
1915{
86877dfb
RP
1916 int error = ieee80211_media_change(ifp);
1917 /* NB: only the fixed rate can change and that doesn't need a reset */
1918 return (error == ENETRESET ? 0 : error);
193b341d
SZ
1919}
1920
1921#ifdef ATH_DEBUG
1922static void
8982d733 1923ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
86877dfb 1924 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d
SZ
1925{
1926 static const char *ciphers[] = {
1927 "WEP",
1928 "AES-OCB",
1929 "AES-CCM",
1930 "CKIP",
1931 "TKIP",
1932 "CLR",
1933 };
1934 int i, n;
1935
2508f206 1936 kprintf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
193b341d 1937 for (i = 0, n = hk->kv_len; i < n; i++)
2508f206 1938 kprintf("%02x", hk->kv_val[i]);
638601ca 1939 kprintf(" mac %6D", mac, ":");
193b341d 1940 if (hk->kv_type == HAL_CIPHER_TKIP) {
2508f206 1941 kprintf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
193b341d 1942 for (i = 0; i < sizeof(hk->kv_mic); i++)
2508f206 1943 kprintf("%02x", hk->kv_mic[i]);
8982d733 1944 if (!sc->sc_splitmic) {
2508f206 1945 kprintf(" txmic ");
8982d733 1946 for (i = 0; i < sizeof(hk->kv_txmic); i++)
2508f206 1947 kprintf("%02x", hk->kv_txmic[i]);
8982d733 1948 }
193b341d 1949 }
2508f206 1950 kprintf("\n");
193b341d
SZ
1951}
1952#endif
1953
1954/*
1955 * Set a TKIP key into the hardware. This handles the
1956 * potential distribution of key state to multiple key
1957 * cache slots for TKIP.
1958 */
1959static int
1960ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
86877dfb 1961 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d
SZ
1962{
1963#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
86877dfb 1964 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
193b341d
SZ
1965 struct ath_hal *ah = sc->sc_ah;
1966
1967 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
1968 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
193b341d 1969 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
8982d733
SZ
1970 if (sc->sc_splitmic) {
1971 /*
1972 * TX key goes at first index, RX key at the rx index.
1973 * The hal handles the MIC keys at index+64.
1974 */
1975 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
1976 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
1977 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
1978 return 0;
1979
1980 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1981 KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
1982 /* XXX delete tx key on failure? */
1983 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
1984 } else {
1985 /*
1986 * Room for both TX+RX MIC keys in one key cache
1987 * slot, just set key at the first index; the hal
86877dfb 1988 * will handle the rest.
8982d733
SZ
1989 */
1990 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
8982d733 1991 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
8982d733
SZ
1992 KEYPRINTF(sc, k->wk_keyix, hk, mac);
1993 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1994 }
86877dfb
RP
1995 } else if (k->wk_flags & IEEE80211_KEY_XMIT) {
1996 if (sc->sc_splitmic) {
1997 /*
1998 * NB: must pass MIC key in expected location when
1999 * the keycache only holds one MIC key per entry.
2000 */
2001 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2002 } else
2003 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2004 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2005 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2006 } else if (k->wk_flags & IEEE80211_KEY_RECV) {
2007 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
193b341d
SZ
2008 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2009 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2010 }
2011 return 0;
2012#undef IEEE80211_KEY_XR
2013}
2014
2015/*
2016 * Set a net80211 key into the hardware. This handles the
2017 * potential distribution of key state to multiple key
2018 * cache slots for TKIP with hardware MIC support.
2019 */
2020static int
2021ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
193b341d
SZ
2022 struct ieee80211_node *bss)
2023{
2024#define N(a) (sizeof(a)/sizeof(a[0]))
86877dfb 2025 static const u_int8_t ciphermap[] = {
193b341d
SZ
2026 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2027 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2028 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2029 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
86877dfb 2030 (u_int8_t) -1, /* 4 is not allocated */
193b341d
SZ
2031 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2032 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2033 };
2034 struct ath_hal *ah = sc->sc_ah;
2035 const struct ieee80211_cipher *cip = k->wk_cipher;
86877dfb
RP
2036 u_int8_t gmac[IEEE80211_ADDR_LEN];
2037 const u_int8_t *mac;
193b341d
SZ
2038 HAL_KEYVAL hk;
2039
2040 memset(&hk, 0, sizeof(hk));
2041 /*
2042 * Software crypto uses a "clear key" so non-crypto
2043 * state kept in the key cache are maintained and
2044 * so that rx frames have an entry to match.
2045 */
2046 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2047 KASSERT(cip->ic_cipher < N(ciphermap),
2048 ("invalid cipher type %u", cip->ic_cipher));
2049 hk.kv_type = ciphermap[cip->ic_cipher];
2050 hk.kv_len = k->wk_keylen;
2051 memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2052 } else
2053 hk.kv_type = HAL_CIPHER_CLR;
2054
2055 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2056 /*
2057 * Group keys on hardware that supports multicast frame
86877dfb 2058 * key search use a MAC that is the sender's address with
193b341d
SZ
2059 * the high bit set instead of the app-specified address.
2060 */
2061 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2062 gmac[0] |= 0x80;
2063 mac = gmac;
2064 } else
86877dfb 2065 mac = k->wk_macaddr;
193b341d
SZ
2066
2067 if (hk.kv_type == HAL_CIPHER_TKIP &&
8982d733 2068 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
193b341d
SZ
2069 return ath_keyset_tkip(sc, k, &hk, mac);
2070 } else {
2071 KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2072 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2073 }
2074#undef N
2075}
2076
2077/*
2078 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2079 * each key, one for decrypt/encrypt and the other for the MIC.
2080 */
86877dfb 2081static u_int16_t
193b341d
SZ
2082key_alloc_2pair(struct ath_softc *sc,
2083 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2084{
2085#define N(a) (sizeof(a)/sizeof(a[0]))
2086 u_int i, keyix;
2087
2088 KASSERT(sc->sc_splitmic, ("key cache !split"));
2089 /* XXX could optimize */
2090 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
86877dfb 2091 u_int8_t b = sc->sc_keymap[i];
193b341d
SZ
2092 if (b != 0xff) {
2093 /*
2094 * One or more slots in this byte are free.
2095 */
2096 keyix = i*NBBY;
2097 while (b & 1) {
2098 again:
2099 keyix++;
2100 b >>= 1;
2101 }
2102 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2103 if (isset(sc->sc_keymap, keyix+32) ||
2104 isset(sc->sc_keymap, keyix+64) ||
2105 isset(sc->sc_keymap, keyix+32+64)) {
2106 /* full pair unavailable */
2107 /* XXX statistic */
2108 if (keyix == (i+1)*NBBY) {
2109 /* no slots were appropriate, advance */
2110 continue;
2111 }
2112 goto again;
2113 }
2114 setbit(sc->sc_keymap, keyix);
2115 setbit(sc->sc_keymap, keyix+64);
2116 setbit(sc->sc_keymap, keyix+32);
2117 setbit(sc->sc_keymap, keyix+32+64);
2118 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2119 "%s: key pair %u,%u %u,%u\n",
2120 __func__, keyix, keyix+64,
2121 keyix+32, keyix+32+64);
2122 *txkeyix = keyix;
2123 *rxkeyix = keyix+32;
2124 return 1;
2125 }
2126 }
2127 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2128 return 0;
2129#undef N
2130}
2131
8982d733
SZ
2132/*
2133 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2134 * each key, one for decrypt/encrypt and the other for the MIC.
2135 */
86877dfb 2136static u_int16_t
8982d733
SZ
2137key_alloc_pair(struct ath_softc *sc,
2138 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2139{
2140#define N(a) (sizeof(a)/sizeof(a[0]))
2141 u_int i, keyix;
2142
2143 KASSERT(!sc->sc_splitmic, ("key cache split"));
2144 /* XXX could optimize */
2145 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
86877dfb 2146 u_int8_t b = sc->sc_keymap[i];
8982d733
SZ
2147 if (b != 0xff) {
2148 /*
2149 * One or more slots in this byte are free.
2150 */
2151 keyix = i*NBBY;
2152 while (b & 1) {
2153 again:
2154 keyix++;
2155 b >>= 1;
2156 }
2157 if (isset(sc->sc_keymap, keyix+64)) {
2158 /* full pair unavailable */
2159 /* XXX statistic */
2160 if (keyix == (i+1)*NBBY) {
2161 /* no slots were appropriate, advance */
2162 continue;
2163 }
2164 goto again;
2165 }
2166 setbit(sc->sc_keymap, keyix);
2167 setbit(sc->sc_keymap, keyix+64);
2168 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2169 "%s: key pair %u,%u\n",
2170 __func__, keyix, keyix+64);
2171 *txkeyix = *rxkeyix = keyix;
2172 return 1;
2173 }
2174 }
2175 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2176 return 0;
2177#undef N
2178}
2179
193b341d
SZ
2180/*
2181 * Allocate a single key cache slot.
2182 */
2183static int
2184key_alloc_single(struct ath_softc *sc,
2185 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2186{
2187#define N(a) (sizeof(a)/sizeof(a[0]))
2188 u_int i, keyix;
2189
2190 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2191 for (i = 0; i < N(sc->sc_keymap); i++) {
86877dfb 2192 u_int8_t b = sc->sc_keymap[i];
193b341d
SZ
2193 if (b != 0xff) {
2194 /*
2195 * One or more slots are free.
2196 */
2197 keyix = i*NBBY;
2198 while (b & 1)
2199 keyix++, b >>= 1;
2200 setbit(sc->sc_keymap, keyix);
2201 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2202 __func__, keyix);
2203 *txkeyix = *rxkeyix = keyix;
2204 return 1;
2205 }
2206 }
2207 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2208 return 0;
2209#undef N
2210}
2211
2212/*
2213 * Allocate one or more key cache slots for a uniacst key. The
2214 * key itself is needed only to identify the cipher. For hardware
2215 * TKIP with split cipher+MIC keys we allocate two key cache slot
2216 * pairs so that we can setup separate TX and RX MIC keys. Note
2217 * that the MIC key for a TKIP key at slot i is assumed by the
2218 * hardware to be at slot i+64. This limits TKIP keys to the first
2219 * 64 entries.
2220 */
2221static int
86877dfb 2222ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
193b341d
SZ
2223 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2224{
86877dfb 2225 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d
SZ
2226
2227 /*
2228 * Group key allocation must be handled specially for
2229 * parts that do not support multicast key cache search
2230 * functionality. For those parts the key id must match
2231 * the h/w key index so lookups find the right key. On
2232 * parts w/ the key search facility we install the sender's
2233 * mac address (with the high bit set) and let the hardware
2234 * find the key w/o using the key id. This is preferred as
2235 * it permits us to support multiple users for adhoc and/or
2236 * multi-station operation.
2237 */
86877dfb
RP
2238 if (k->wk_keyix != IEEE80211_KEYIX_NONE) {
2239 /*
2240 * Only global keys should have key index assigned.
2241 */
2242 if (!(&vap->iv_nw_keys[0] <= k &&
2243 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
193b341d
SZ
2244 /* should not happen */
2245 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2246 "%s: bogus group key\n", __func__);
2247 return 0;
2248 }
86877dfb
RP
2249 if (vap->iv_opmode != IEEE80211_M_HOSTAP ||
2250 !(k->wk_flags & IEEE80211_KEY_GROUP) ||
2251 !sc->sc_mcastkey) {
2252 /*
2253 * XXX we pre-allocate the global keys so
2254 * have no way to check if they've already
2255 * been allocated.
2256 */
2257 *keyix = *rxkeyix = k - vap->iv_nw_keys;
2258 return 1;
2259 }
193b341d 2260 /*
86877dfb 2261 * Group key and device supports multicast key search.
193b341d 2262 */
86877dfb 2263 k->wk_keyix = IEEE80211_KEYIX_NONE;
193b341d
SZ
2264 }
2265
2266 /*
2267 * We allocate two pair for TKIP when using the h/w to do
2268 * the MIC. For everything else, including software crypto,
2269 * we allocate a single entry. Note that s/w crypto requires
2270 * a pass-through slot on the 5211 and 5212. The 5210 does
2271 * not support pass-through cache entries and we map all
2272 * those requests to slot 0.
2273 */
2274 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2275 return key_alloc_single(sc, keyix, rxkeyix);
2276 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
8982d733
SZ
2277 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2278 if (sc->sc_splitmic)
2279 return key_alloc_2pair(sc, keyix, rxkeyix);
2280 else
2281 return key_alloc_pair(sc, keyix, rxkeyix);
193b341d
SZ
2282 } else {
2283 return key_alloc_single(sc, keyix, rxkeyix);
2284 }
2285}
2286
2287/*
2288 * Delete an entry in the key cache allocated by ath_key_alloc.
2289 */
2290static int
86877dfb 2291ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
193b341d 2292{
86877dfb 2293 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d
SZ
2294 struct ath_hal *ah = sc->sc_ah;
2295 const struct ieee80211_cipher *cip = k->wk_cipher;
2296 u_int keyix = k->wk_keyix;
2297
2298 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2299
2300 ath_hal_keyreset(ah, keyix);
2301 /*
2302 * Handle split tx/rx keying required for TKIP with h/w MIC.
2303 */
2304 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2305 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2306 ath_hal_keyreset(ah, keyix+32); /* RX key */
2307 if (keyix >= IEEE80211_WEP_NKID) {
2308 /*
2309 * Don't touch keymap entries for global keys so
2310 * they are never considered for dynamic allocation.
2311 */
2312 clrbit(sc->sc_keymap, keyix);
2313 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
8982d733 2314 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
193b341d 2315 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */
8982d733
SZ
2316 if (sc->sc_splitmic) {
2317 /* +32 for RX key, +32+64 for RX key MIC */
2318 clrbit(sc->sc_keymap, keyix+32);
2319 clrbit(sc->sc_keymap, keyix+32+64);
2320 }
193b341d
SZ
2321 }
2322 }
2323 return 1;
2324}
2325
2326/*
2327 * Set the key cache contents for the specified key. Key cache
2328 * slot(s) must already have been allocated by ath_key_alloc.
2329 */
2330static int
86877dfb
RP
2331ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2332 const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d 2333{
86877dfb 2334 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d 2335
86877dfb 2336 return ath_keyset(sc, k, vap->iv_bss);
193b341d
SZ
2337}
2338
2339/*
2340 * Block/unblock tx+rx processing while a key change is done.
2341 * We assume the caller serializes key management operations
2342 * so we only need to worry about synchronization with other
2343 * uses that originate in the driver.
2344 */
2345static void
86877dfb 2346ath_key_update_begin(struct ieee80211vap *vap)
193b341d 2347{
86877dfb 2348 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
2349 struct ath_softc *sc = ifp->if_softc;
2350
2351 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
86877dfb 2352 taskqueue_block(sc->sc_tq);
193b341d 2353 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
193b341d
SZ
2354}
2355
2356static void
86877dfb 2357ath_key_update_end(struct ieee80211vap *vap)
193b341d 2358{
86877dfb 2359 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
2360 struct ath_softc *sc = ifp->if_softc;
2361
2362 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
193b341d 2363 IF_UNLOCK(&ifp->if_snd);
86877dfb 2364 taskqueue_unblock(sc->sc_tq);
193b341d
SZ
2365}
2366
2367/*
2368 * Calculate the receive filter according to the
2369 * operating mode and state:
2370 *
2371 * o always accept unicast, broadcast, and multicast traffic
86877dfb
RP
2372 * o accept PHY error frames when hardware doesn't have MIB support
2373 * to count and we need them for ANI (sta mode only until recently)
2374 * and we are not scanning (ANI is disabled)
2375 * NB: older hal's add rx filter bits out of sight and we need to
2376 * blindly preserve them
193b341d 2377 * o probe request frames are accepted only when operating in
86877dfb
RP
2378 * hostap, adhoc, mesh, or monitor modes
2379 * o enable promiscuous mode
2380 * - when in monitor mode
2381 * - if interface marked PROMISC (assumes bridge setting is filtered)
193b341d 2382 * o accept beacons:
193b341d
SZ
2383 * - when operating in station mode for collecting rssi data when
2384 * the station is otherwise quiet, or
86877dfb
RP
2385 * - when operating in adhoc mode so the 802.11 layer creates
2386 * node table entries for peers,
193b341d 2387 * - when scanning
86877dfb
RP
2388 * - when doing s/w beacon miss (e.g. for ap+sta)
2389 * - when operating in ap mode in 11g to detect overlapping bss that
2390 * require protection
2391 * - when operating in mesh mode to detect neighbors
ed33fa9f
SW
2392 * o accept control frames:
2393 * - when in monitor mode
86877dfb
RP
2394 * XXX BAR frames for 11n
2395 * XXX HT protection for 11n
193b341d 2396 */
86877dfb
RP
2397static u_int32_t
2398ath_calcrxfilter(struct ath_softc *sc)
193b341d 2399{
86877dfb
RP
2400 struct ifnet *ifp = sc->sc_ifp;
2401 struct ieee80211com *ic = ifp->if_l2com;
2402 u_int32_t rfilt;
193b341d 2403
86877dfb
RP
2404 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2405 if (!sc->sc_needmib && !sc->sc_scanning)
2406 rfilt |= HAL_RX_FILTER_PHYERR;
193b341d
SZ
2407 if (ic->ic_opmode != IEEE80211_M_STA)
2408 rfilt |= HAL_RX_FILTER_PROBEREQ;
86877dfb
RP
2409 /* XXX ic->ic_monvaps != 0? */
2410 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
193b341d
SZ
2411 rfilt |= HAL_RX_FILTER_PROM;
2412 if (ic->ic_opmode == IEEE80211_M_STA ||
2413 ic->ic_opmode == IEEE80211_M_IBSS ||
86877dfb
RP
2414 sc->sc_swbmiss || sc->sc_scanning)
2415 rfilt |= HAL_RX_FILTER_BEACON;
2416 /*
2417 * NB: We don't recalculate the rx filter when
2418 * ic_protmode changes; otherwise we could do
2419 * this only when ic_protmode != NONE.
2420 */
2421 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2422 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2423 rfilt |= HAL_RX_FILTER_BEACON;
2424 if (sc->sc_nmeshvaps) {
193b341d 2425 rfilt |= HAL_RX_FILTER_BEACON;
86877dfb
RP
2426 if (sc->sc_hasbmatch)
2427 rfilt |= HAL_RX_FILTER_BSSID;
2428 else
2429 rfilt |= HAL_RX_FILTER_PROM;
2430 }
ed33fa9f
SW
2431 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2432 rfilt |= HAL_RX_FILTER_CONTROL;
86877dfb
RP
2433 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2434 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
193b341d 2435 return rfilt;
193b341d
SZ
2436}
2437
2438static void
86877dfb 2439ath_update_promisc(struct ifnet *ifp)
193b341d 2440{
86877dfb
RP
2441 struct ath_softc *sc = ifp->if_softc;
2442 u_int32_t rfilt;
193b341d
SZ
2443
2444 /* configure rx filter */
86877dfb
RP
2445 rfilt = ath_calcrxfilter(sc);
2446 ath_hal_setrxfilter(sc->sc_ah, rfilt);
193b341d 2447
86877dfb
RP
2448 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2449}
193b341d 2450
86877dfb
RP
2451static void
2452ath_update_mcast(struct ifnet *ifp)
2453{
2454 struct ath_softc *sc = ifp->if_softc;
2455 u_int32_t mfilt[2];
193b341d
SZ
2456
2457 /* calculate and install multicast filter */
2458 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
86877dfb
RP
2459 struct ifmultiaddr *ifma;
2460 /*
2461 * Merge multicast addresses to form the hardware filter.
2462 */
193b341d 2463 mfilt[0] = mfilt[1] = 0;
86877dfb
RP
2464#ifdef __FreeBSD__
2465 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2466#endif
441d34b2 2467 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
193b341d 2468 caddr_t dl;
86877dfb
RP
2469 u_int32_t val;
2470 u_int8_t pos;
193b341d
SZ
2471
2472 /* calculate XOR of eight 6bit values */
86877dfb 2473 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
193b341d
SZ
2474 val = LE_READ_4(dl + 0);
2475 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2476 val = LE_READ_4(dl + 3);
2477 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2478 pos &= 0x3f;
2479 mfilt[pos / 32] |= (1 << (pos % 32));
2480 }
86877dfb
RP
2481#ifdef __FreeBSD__
2482 if_maddr_runlock(ifp);
2483#endif
2484 } else
193b341d 2485 mfilt[0] = mfilt[1] = ~0;
86877dfb
RP
2486 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2487 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2488 __func__, mfilt[0], mfilt[1]);
2489}
2490
2491static void
2492ath_mode_init(struct ath_softc *sc)
2493{
2494 struct ifnet *ifp = sc->sc_ifp;
2495 struct ath_hal *ah = sc->sc_ah;
2496 u_int32_t rfilt;
2497
2498 /* configure rx filter */
2499 rfilt = ath_calcrxfilter(sc);
2500 ath_hal_setrxfilter(ah, rfilt);
2501
2502 /* configure operational mode */
2503 ath_hal_setopmode(ah);
2504
2505 /* handle any link-level address change */
2506 ath_hal_setmac(ah, IF_LLADDR(ifp));
2507
2508 /* calculate and install multicast filter */
2509 ath_update_mcast(ifp);
193b341d
SZ
2510}
2511
2512/*
2513 * Set the slot time based on the current setting.
2514 */
2515static void
2516ath_setslottime(struct ath_softc *sc)
2517{
86877dfb 2518 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
193b341d 2519 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
2520 u_int usec;
2521
2522 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2523 usec = 13;
2524 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2525 usec = 21;
2526 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2527 /* honor short/long slot time only in 11g */
2528 /* XXX shouldn't honor on pure g or turbo g channel */
2529 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2530 usec = HAL_SLOT_TIME_9;
2531 else
2532 usec = HAL_SLOT_TIME_20;
2533 } else
2534 usec = HAL_SLOT_TIME_9;
193b341d 2535
86877dfb
RP
2536 DPRINTF(sc, ATH_DEBUG_RESET,
2537 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2538 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2539 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2540
2541 ath_hal_setslottime(ah, usec);
193b341d
SZ
2542 sc->sc_updateslot = OK;
2543}
2544
2545/*
2546 * Callback from the 802.11 layer to update the
2547 * slot time based on the current setting.
2548 */
2549static void
2550ath_updateslot(struct ifnet *ifp)
2551{
2552 struct ath_softc *sc = ifp->if_softc;
86877dfb 2553 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
2554
2555 /*
2556 * When not coordinating the BSS, change the hardware
2557 * immediately. For other operation we defer the change
2558 * until beacon updates have propagated to the stations.
2559 */
86877dfb
RP
2560 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2561 ic->ic_opmode == IEEE80211_M_MBSS)
193b341d
SZ
2562 sc->sc_updateslot = UPDATE;
2563 else
2564 ath_setslottime(sc);
2565}
2566
2567/*
2568 * Setup a h/w transmit queue for beacons.
2569 */
2570static int
2571ath_beaconq_setup(struct ath_hal *ah)
2572{
2573 HAL_TXQ_INFO qi;
2574
2575 memset(&qi, 0, sizeof(qi));
2576 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2577 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2578 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2579 /* NB: for dynamic turbo, don't enable any other interrupts */
2580 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2581 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2582}
2583
2584/*
2585 * Setup the transmit queue parameters for the beacon queue.
2586 */
2587static int
2588ath_beaconq_config(struct ath_softc *sc)
2589{
2590#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
86877dfb 2591 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
193b341d
SZ
2592 struct ath_hal *ah = sc->sc_ah;
2593 HAL_TXQ_INFO qi;
2594
2595 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
86877dfb
RP
2596 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2597 ic->ic_opmode == IEEE80211_M_MBSS) {
193b341d
SZ
2598 /*
2599 * Always burst out beacon and CAB traffic.
2600 */
2601 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2602 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2603 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2604 } else {
2605 struct wmeParams *wmep =
2606 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2607 /*
2608 * Adhoc mode; important thing is to use 2x cwmin.
2609 */
2610 qi.tqi_aifs = wmep->wmep_aifsn;
2611 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2612 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2613 }
2614
2615 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2616 device_printf(sc->sc_dev, "unable to update parameters for "
2617 "beacon hardware queue!\n");
2618 return 0;
2619 } else {
2620 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2621 return 1;
2622 }
2623#undef ATH_EXPONENT_TO_VALUE
2624}
2625
2626/*
2627 * Allocate and setup an initial beacon frame.
2628 */
2629static int
2630ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2631{
86877dfb
RP
2632 struct ieee80211vap *vap = ni->ni_vap;
2633 struct ath_vap *avp = ATH_VAP(vap);
193b341d
SZ
2634 struct ath_buf *bf;
2635 struct mbuf *m;
2636 int error;
2637
86877dfb
RP
2638 bf = avp->av_bcbuf;
2639 if (bf->bf_m != NULL) {
2640 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2641 m_freem(bf->bf_m);
2642 bf->bf_m = NULL;
2643 }
2644 if (bf->bf_node != NULL) {
2645 ieee80211_free_node(bf->bf_node);
2646 bf->bf_node = NULL;
193b341d 2647 }
86877dfb 2648
193b341d
SZ
2649 /*
2650 * NB: the beacon data buffer must be 32-bit aligned;
2651 * we assume the mbuf routines will return us something
2652 * with this alignment (perhaps should assert).
2653 */
86877dfb 2654 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
193b341d 2655 if (m == NULL) {
86877dfb 2656 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
193b341d
SZ
2657 sc->sc_stats.ast_be_nombuf++;
2658 return ENOMEM;
2659 }
04522223
RP
2660 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
2661 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
2662 BUS_DMA_NOWAIT);
2663 if (error != 0) {
2664 device_printf(sc->sc_dev,
04522223 2665 "%s: cannot map mbuf, bus_dmamap_load_mbuf_segment returns %d\n",
86877dfb 2666 __func__, error);
193b341d 2667 m_freem(m);
86877dfb 2668 return error;
193b341d 2669 }
86877dfb
RP
2670
2671 /*
2672 * Calculate a TSF adjustment factor required for staggered
2673 * beacons. Note that we assume the format of the beacon
2674 * frame leaves the tstamp field immediately following the
2675 * header.
2676 */
2677 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2678 uint64_t tsfadjust;
2679 struct ieee80211_frame *wh;
2680
2681 /*
2682 * The beacon interval is in TU's; the TSF is in usecs.
2683 * We figure out how many TU's to add to align the timestamp
2684 * then convert to TSF units and handle byte swapping before
2685 * inserting it in the frame. The hardware will then add this
2686 * each time a beacon frame is sent. Note that we align vap's
2687 * 1..N and leave vap 0 untouched. This means vap 0 has a
2688 * timestamp in one beacon interval while the others get a
2689 * timstamp aligned to the next interval.
2690 */
2691 tsfadjust = ni->ni_intval *
2692 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2693 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2694
2695 DPRINTF(sc, ATH_DEBUG_BEACON,
2696 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2697 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2698 avp->av_bslot, ni->ni_intval,
2699 (long long unsigned) le64toh(tsfadjust));
2700
2701 wh = mtod(m, struct ieee80211_frame *);
2702 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2703 }
2704 bf->bf_m = m;
2705 bf->bf_node = ieee80211_ref_node(ni);
2706
2707 return 0;
193b341d
SZ
2708}
2709
2710/*
2711 * Setup the beacon frame for transmit.
2712 */
2713static void
2714ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2715{
2716#define USE_SHPREAMBLE(_ic) \
2717 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2718 == IEEE80211_F_SHPREAMBLE)
2719 struct ieee80211_node *ni = bf->bf_node;
2720 struct ieee80211com *ic = ni->ni_ic;
2721 struct mbuf *m = bf->bf_m;
2722 struct ath_hal *ah = sc->sc_ah;
2723 struct ath_desc *ds;
2724 int flags, antenna;
2725 const HAL_RATE_TABLE *rt;
86877dfb 2726 u_int8_t rix, rate;
193b341d 2727
ed33fa9f 2728 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
193b341d
SZ
2729 __func__, m, m->m_len);
2730
2731 /* setup descriptors */
2732 ds = bf->bf_desc;
2733
2734 flags = HAL_TXDESC_NOACK;
2735 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2736 ds->ds_link = bf->bf_daddr; /* self-linked */
2737 flags |= HAL_TXDESC_VEOL;
2738 /*
2739 * Let hardware handle antenna switching.
2740 */
2741 antenna = sc->sc_txantenna;
2742 } else {
2743 ds->ds_link = 0;
2744 /*
2745 * Switch antenna every 4 beacons.
2746 * XXX assumes two antenna
2747 */
86877dfb
RP
2748 if (sc->sc_txantenna != 0)
2749 antenna = sc->sc_txantenna;
2750 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2751 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2752 else
2753 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
193b341d
SZ
2754 }
2755
2756 KASSERT(bf->bf_nseg == 1,
2757 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2758 ds->ds_data = bf->bf_segs[0].ds_addr;
2759 /*
2760 * Calculate rate code.
2761 * XXX everything at min xmit rate
2762 */
86877dfb 2763 rix = 0;
193b341d
SZ
2764 rt = sc->sc_currates;
2765 rate = rt->info[rix].rateCode;
2766 if (USE_SHPREAMBLE(ic))
2767 rate |= rt->info[rix].shortPreamble;
2768 ath_hal_setuptxdesc(ah, ds
2769 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2770 , sizeof(struct ieee80211_frame)/* header length */
2771 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2772 , ni->ni_txpower /* txpower XXX */
2773 , rate, 1 /* series 0 rate/tries */
2774 , HAL_TXKEYIX_INVALID /* no encryption */
2775 , antenna /* antenna mode */
2776 , flags /* no ack, veol for beacons */
2777 , 0 /* rts/cts rate */
2778 , 0 /* rts/cts duration */
2779 );
2780 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2781 ath_hal_filltxdesc(ah, ds
2782 , roundup(m->m_len, 4) /* buffer length */
2783 , AH_TRUE /* first segment */
2784 , AH_TRUE /* last segment */
2785 , ds /* first descriptor */
2786 );
86877dfb
RP
2787#if 0
2788 ath_desc_swap(ds);
2789#endif
193b341d
SZ
2790#undef USE_SHPREAMBLE
2791}
2792
86877dfb
RP
2793static void
2794ath_beacon_update(struct ieee80211vap *vap, int item)
2795{
2796 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2797
2798 setbit(bo->bo_flags, item);
2799}
2800
ed33fa9f
SW
2801/*
2802 * Append the contents of src to dst; both queues
2803 * are assumed to be locked.
2804 */
2805static void
2806ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2807{
2808 STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2809 dst->axq_link = src->axq_link;
2810 src->axq_link = NULL;
2811 dst->axq_depth += src->axq_depth;
2812 src->axq_depth = 0;
2813}
2814
193b341d
SZ
2815/*
2816 * Transmit a beacon frame at SWBA. Dynamic updates to the
2817 * frame contents are done as needed and the slot time is
2818 * also adjusted based on current state.
2819 */
2820static void
86877dfb 2821ath_beacon_proc(void *arg, int pending)
193b341d 2822{
86877dfb 2823 struct ath_softc *sc = arg;
193b341d 2824 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
2825 struct ieee80211vap *vap;
2826 struct ath_buf *bf;
2827 int slot, otherant;
2828 uint32_t bfaddr;
193b341d 2829
86877dfb
RP
2830 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2831 __func__, pending);
193b341d
SZ
2832 /*
2833 * Check if the previous beacon has gone out. If
8982d733
SZ
2834 * not don't try to post another, skip this period
2835 * and wait for the next. Missed beacons indicate
2836 * a problem and should not occur. If we miss too
2837 * many consecutive beacons reset the device.
193b341d
SZ
2838 */
2839 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2840 sc->sc_bmisscount++;
ed33fa9f 2841 DPRINTF(sc, ATH_DEBUG_BEACON,
193b341d
SZ
2842 "%s: missed %u consecutive beacons\n",
2843 __func__, sc->sc_bmisscount);
86877dfb
RP
2844 if (sc->sc_bmisscount >= ath_bstuck_threshold)
2845 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
193b341d
SZ
2846 return;
2847 }
2848 if (sc->sc_bmisscount != 0) {
2849 DPRINTF(sc, ATH_DEBUG_BEACON,
2850 "%s: resume beacon xmit after %u misses\n",
2851 __func__, sc->sc_bmisscount);
2852 sc->sc_bmisscount = 0;
2853 }
2854
86877dfb
RP
2855 if (sc->sc_stagbeacons) { /* staggered beacons */
2856 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2857 uint32_t tsftu;
193b341d 2858
86877dfb
RP
2859 tsftu = ath_hal_gettsf32(ah) >> 10;
2860 /* XXX lintval */
2861 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2862 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2863 bfaddr = 0;
2864 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2865 bf = ath_beacon_generate(sc, vap);
2866 if (bf != NULL)
2867 bfaddr = bf->bf_daddr;
2868 }
2869 } else { /* burst'd beacons */
2870 uint32_t *bflink = &bfaddr;
2871
2872 for (slot = 0; slot < ATH_BCBUF; slot++) {
2873 vap = sc->sc_bslot[slot];
2874 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2875 bf = ath_beacon_generate(sc, vap);
2876 if (bf != NULL) {
2877 *bflink = bf->bf_daddr;
2878 bflink = &bf->bf_desc->ds_link;
2879 }
2880 }
2881 }
2882 *bflink = 0; /* terminate list */
ed33fa9f
SW
2883 }
2884
193b341d
SZ
2885 /*
2886 * Handle slot time change when a non-ERP station joins/leaves
2887 * an 11g network. The 802.11 layer notifies us via callback,
2888 * we mark updateslot, then wait one beacon before effecting
2889 * the change. This gives associated stations at least one
2890 * beacon interval to note the state change.
2891 */
2892 /* XXX locking */
86877dfb 2893 if (sc->sc_updateslot == UPDATE) {
193b341d 2894 sc->sc_updateslot = COMMIT; /* commit next beacon */
86877dfb
RP
2895 sc->sc_slotupdate = slot;
2896 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
193b341d
SZ
2897 ath_setslottime(sc); /* commit change to h/w */
2898
2899 /*
2900 * Check recent per-antenna transmit statistics and flip
2901 * the default antenna if noticeably more frames went out
2902 * on the non-default antenna.
2903 * XXX assumes 2 anntenae
2904 */
86877dfb
RP
2905 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2906 otherant = sc->sc_defant & 1 ? 2 : 1;
2907 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2908 ath_setdefantenna(sc, otherant);
2909 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2910 }
193b341d 2911
86877dfb
RP
2912 if (bfaddr != 0) {
2913 /*
2914 * Stop any current dma and put the new frame on the queue.
2915 * This should never fail since we check above that no frames
2916 * are still pending on the queue.
2917 */
2918 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2919 DPRINTF(sc, ATH_DEBUG_ANY,
2920 "%s: beacon queue %u did not stop?\n",
2921 __func__, sc->sc_bhalq);
2922 }
2923 /* NB: cabq traffic should already be queued and primed */
2924 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2925 ath_hal_txstart(ah, sc->sc_bhalq);
2926
2927 sc->sc_stats.ast_be_xmit++;
2928 }
2929}
2930
2931static struct ath_buf *
2932ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2933{
2934 struct ath_vap *avp = ATH_VAP(vap);
2935 struct ath_txq *cabq = sc->sc_cabq;
2936 struct ath_buf *bf;
2937 struct mbuf *m;
2938 int nmcastq, error;
2939
2940 KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2941 ("not running, state %d", vap->iv_state));
2942 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
193b341d
SZ
2943
2944 /*
86877dfb
RP
2945 * Update dynamic beacon contents. If this returns
2946 * non-zero then we need to remap the memory because
2947 * the beacon frame changed size (probably because
2948 * of the TIM bitmap).
193b341d 2949 */
86877dfb
RP
2950 bf = avp->av_bcbuf;
2951 m = bf->bf_m;
2952 nmcastq = avp->av_mcastq.axq_depth;
2953 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2954 /* XXX too conservative? */
2955 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
04522223
RP
2956 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
2957 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
2958 BUS_DMA_NOWAIT);
2959 if (error != 0) {
2960 if_printf(vap->iv_ifp,
04522223 2961 "%s: bus_dmamap_load_mbuf_segment failed, error %u\n",
86877dfb
RP
2962 __func__, error);
2963 return NULL;
2964 }
2965 }
2966 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
2967 DPRINTF(sc, ATH_DEBUG_BEACON,
2968 "%s: cabq did not drain, mcastq %u cabq %u\n",
2969 __func__, nmcastq, cabq->axq_depth);
2970 sc->sc_stats.ast_cabq_busy++;
2971 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
2972 /*
2973 * CABQ traffic from a previous vap is still pending.
2974 * We must drain the q before this beacon frame goes
2975 * out as otherwise this vap's stations will get cab
2976 * frames from a different vap.
2977 * XXX could be slow causing us to miss DBA
2978 */
2979 ath_tx_draintxq(sc, cabq);
2980 }
193b341d 2981 }
86877dfb 2982 ath_beacon_setup(sc, bf);
193b341d
SZ
2983 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2984
2985 /*
2986 * Enable the CAB queue before the beacon queue to
2987 * insure cab frames are triggered by this beacon.
2988 */
86877dfb
RP
2989 if (avp->av_boff.bo_tim[4] & 1) {
2990 struct ath_hal *ah = sc->sc_ah;
2991
8982d733 2992 /* NB: only at DTIM */
86877dfb
RP
2993 ATH_TXQ_LOCK(cabq);
2994 ATH_TXQ_LOCK(&avp->av_mcastq);
ed33fa9f
SW
2995 if (nmcastq) {
2996 struct ath_buf *bfm;
2997
2998 /*
2999 * Move frames from the s/w mcast q to the h/w cab q.
86877dfb 3000 * XXX MORE_DATA bit
ed33fa9f 3001 */
86877dfb 3002 bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
ed33fa9f
SW
3003 if (cabq->axq_link != NULL) {
3004 *cabq->axq_link = bfm->bf_daddr;
86877dfb 3005 } else
ed33fa9f
SW
3006 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3007 bfm->bf_daddr);
86877dfb 3008 ath_txqmove(cabq, &avp->av_mcastq);
ed33fa9f
SW
3009
3010 sc->sc_stats.ast_cabq_xmit += nmcastq;
3011 }
3012 /* NB: gated by beacon so safe to start here */
3013 ath_hal_txstart(ah, cabq->axq_qnum);
86877dfb
RP
3014 ATH_TXQ_UNLOCK(cabq);
3015 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3016 }
3017 return bf;
3018}
3019
3020static void
3021ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3022{
3023 struct ath_vap *avp = ATH_VAP(vap);
3024 struct ath_hal *ah = sc->sc_ah;
3025 struct ath_buf *bf;
3026 struct mbuf *m;
3027 int error;
3028
3029 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3030
3031 /*
3032 * Update dynamic beacon contents. If this returns
3033 * non-zero then we need to remap the memory because
3034 * the beacon frame changed size (probably because
3035 * of the TIM bitmap).
3036 */
3037 bf = avp->av_bcbuf;
3038 m = bf->bf_m;
3039 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3040 /* XXX too conservative? */
3041 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
04522223
RP
3042 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
3043 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
3044 BUS_DMA_NOWAIT);
3045 if (error != 0) {
3046 if_printf(vap->iv_ifp,
04522223 3047 "%s: bus_dmamap_load_mbuf_segment failed, error %u\n",
86877dfb
RP
3048 __func__, error);
3049 return;
3050 }
ed33fa9f 3051 }
86877dfb
RP
3052 ath_beacon_setup(sc, bf);
3053 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3054
3055 /* NB: caller is known to have already stopped tx dma */
193b341d
SZ
3056 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3057 ath_hal_txstart(ah, sc->sc_bhalq);
193b341d
SZ
3058}
3059
3060/*
3061 * Reset the hardware after detecting beacons have stopped.
3062 */
3063static void
86877dfb 3064ath_bstuck_proc(void *arg, int pending)
193b341d 3065{
86877dfb
RP
3066 struct ath_softc *sc = arg;
3067 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
3068
3069 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
86877dfb
RP
3070 sc->sc_bmisscount);
3071 sc->sc_stats.ast_bstuck++;
193b341d
SZ
3072 ath_reset(ifp);
3073}
3074
86877dfb
RP
3075/*
3076 * Reclaim beacon resources and return buffer to the pool.
3077 */
3078static void
3079ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3080{
3081
3082 if (bf->bf_m != NULL) {
3083 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3084 m_freem(bf->bf_m);
3085 bf->bf_m = NULL;
3086 }
3087 if (bf->bf_node != NULL) {
3088 ieee80211_free_node(bf->bf_node);
3089 bf->bf_node = NULL;
3090 }
3091 STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3092}
3093
193b341d
SZ
3094/*
3095 * Reclaim beacon resources.
3096 */
3097static void
3098ath_beacon_free(struct ath_softc *sc)
3099{
3100 struct ath_buf *bf;
3101
193b341d
SZ
3102 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3103 if (bf->bf_m != NULL) {
3104 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3105 m_freem(bf->bf_m);
3106 bf->bf_m = NULL;
3107 }
3108 if (bf->bf_node != NULL) {
3109 ieee80211_free_node(bf->bf_node);
3110 bf->bf_node = NULL;
3111 }
3112 }
3113}
3114
3115/*
3116 * Configure the beacon and sleep timers.
3117 *
3118 * When operating as an AP this resets the TSF and sets
3119 * up the hardware to notify us when we need to issue beacons.
3120 *
3121 * When operating in station mode this sets up the beacon
3122 * timers according to the timestamp of the last received
3123 * beacon and the current TSF, configures PCF and DTIM
3124 * handling, programs the sleep registers so the hardware
3125 * will wakeup in time to receive beacons, and configures
3126 * the beacon miss handling so we'll receive a BMISS
3127 * interrupt when we stop seeing beacons from the AP
3128 * we've associated with.
3129 */
3130static void
86877dfb 3131ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
193b341d
SZ
3132{
3133#define TSF_TO_TU(_h,_l) \
86877dfb 3134 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
193b341d
SZ
3135#define FUDGE 2
3136 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
3137 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3138 struct ieee80211_node *ni;
3139 u_int32_t nexttbtt, intval, tsftu;
3140 u_int64_t tsf;
3141
3142 if (vap == NULL)
3143 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3144 ni = vap->iv_bss;
193b341d
SZ
3145
3146 /* extract tstamp from last beacon and convert to TU */
3147 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3148 LE_READ_4(ni->ni_tstamp.data));
86877dfb
RP
3149 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3150 ic->ic_opmode == IEEE80211_M_MBSS) {
3151 /*
3152 * For multi-bss ap/mesh support beacons are either staggered
3153 * evenly over N slots or burst together. For the former
3154 * arrange for the SWBA to be delivered for each slot.
3155 * Slots that are not occupied will generate nothing.
3156 */
3157 /* NB: the beacon interval is kept internally in TU's */
3158 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3159 if (sc->sc_stagbeacons)
3160 intval /= ATH_BCBUF;
3161 } else {
3162 /* NB: the beacon interval is kept internally in TU's */
3163 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3164 }
193b341d
SZ
3165 if (nexttbtt == 0) /* e.g. for ap mode */
3166 nexttbtt = intval;
3167 else if (intval) /* NB: can be 0 for monitor mode */
3168 nexttbtt = roundup(nexttbtt, intval);
3169 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3170 __func__, nexttbtt, intval, ni->ni_intval);
86877dfb 3171 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
193b341d
SZ
3172 HAL_BEACON_STATE bs;
3173 int dtimperiod, dtimcount;
3174 int cfpperiod, cfpcount;
3175
3176 /*
3177 * Setup dtim and cfp parameters according to
3178 * last beacon we received (which may be none).
3179 */
3180 dtimperiod = ni->ni_dtim_period;
3181 if (dtimperiod <= 0) /* NB: 0 if not known */
3182 dtimperiod = 1;
3183 dtimcount = ni->ni_dtim_count;
3184 if (dtimcount >= dtimperiod) /* NB: sanity check */
3185 dtimcount = 0; /* XXX? */
3186 cfpperiod = 1; /* NB: no PCF support yet */
3187 cfpcount = 0;
3188 /*
3189 * Pull nexttbtt forward to reflect the current
3190 * TSF and calculate dtim+cfp state for the result.
3191 */
3192 tsf = ath_hal_gettsf64(ah);
3193 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3194 do {
3195 nexttbtt += intval;
3196 if (--dtimcount < 0) {
3197 dtimcount = dtimperiod - 1;
3198 if (--cfpcount < 0)
3199 cfpcount = cfpperiod - 1;
3200 }
3201 } while (nexttbtt < tsftu);
3202 memset(&bs, 0, sizeof(bs));
3203 bs.bs_intval = intval;
3204 bs.bs_nexttbtt = nexttbtt;
3205 bs.bs_dtimperiod = dtimperiod*intval;
3206 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3207 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3208 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3209 bs.bs_cfpmaxduration = 0;
3210#if 0
3211 /*
3212 * The 802.11 layer records the offset to the DTIM
3213 * bitmap while receiving beacons; use it here to
3214 * enable h/w detection of our AID being marked in
3215 * the bitmap vector (to indicate frames for us are
3216 * pending at the AP).
3217 * XXX do DTIM handling in s/w to WAR old h/w bugs
3218 * XXX enable based on h/w rev for newer chips
3219 */
3220 bs.bs_timoffset = ni->ni_timoff;
3221#endif
3222 /*
3223 * Calculate the number of consecutive beacons to miss
86877dfb 3224 * before taking a BMISS interrupt.
193b341d
SZ
3225 * Note that we clamp the result to at most 10 beacons.
3226 */
86877dfb 3227 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
193b341d
SZ
3228 if (bs.bs_bmissthreshold > 10)
3229 bs.bs_bmissthreshold = 10;
3230 else if (bs.bs_bmissthreshold <= 0)
3231 bs.bs_bmissthreshold = 1;
3232
3233 /*
3234 * Calculate sleep duration. The configuration is
3235 * given in ms. We insure a multiple of the beacon
3236 * period is used. Also, if the sleep duration is
3237 * greater than the DTIM period then it makes senses
3238 * to make it a multiple of that.
3239 *
3240 * XXX fixed at 100ms
3241 */
3242 bs.bs_sleepduration =
3243 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3244 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3245 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3246
86877dfb 3247 DPRINTF(sc, ATH_DEBUG_BEACON,
193b341d
SZ
3248 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3249 , __func__
3250 , tsf, tsftu
3251 , bs.bs_intval
3252 , bs.bs_nexttbtt
3253 , bs.bs_dtimperiod
3254 , bs.bs_nextdtim
3255 , bs.bs_bmissthreshold
3256 , bs.bs_sleepduration
3257 , bs.bs_cfpperiod
3258 , bs.bs_cfpmaxduration
3259 , bs.bs_cfpnext
3260 , bs.bs_timoffset
3261 );
3262 ath_hal_intrset(ah, 0);
3263 ath_hal_beacontimers(ah, &bs);
3264 sc->sc_imask |= HAL_INT_BMISS;
3265 ath_hal_intrset(ah, sc->sc_imask);
3266 } else {
3267 ath_hal_intrset(ah, 0);
3268 if (nexttbtt == intval)
3269 intval |= HAL_BEACON_RESET_TSF;
3270 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3271 /*
3272 * In IBSS mode enable the beacon timers but only
3273 * enable SWBA interrupts if we need to manually
3274 * prepare beacon frames. Otherwise we use a
3275 * self-linked tx descriptor and let the hardware
3276 * deal with things.
3277 */
3278 intval |= HAL_BEACON_ENA;
3279 if (!sc->sc_hasveol)
3280 sc->sc_imask |= HAL_INT_SWBA;
3281 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3282 /*
3283 * Pull nexttbtt forward to reflect
3284 * the current TSF.
3285 */
3286 tsf = ath_hal_gettsf64(ah);
3287 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3288 do {
3289 nexttbtt += intval;
3290 } while (nexttbtt < tsftu);
3291 }
3292 ath_beaconq_config(sc);
86877dfb
RP
3293 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3294 ic->ic_opmode == IEEE80211_M_MBSS) {
193b341d 3295 /*
86877dfb
RP
3296 * In AP/mesh mode we enable the beacon timers
3297 * and SWBA interrupts to prepare beacon frames.
193b341d
SZ
3298 */
3299 intval |= HAL_BEACON_ENA;
3300 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3301 ath_beaconq_config(sc);
3302 }
3303 ath_hal_beaconinit(ah, nexttbtt, intval);
3304 sc->sc_bmisscount = 0;
3305 ath_hal_intrset(ah, sc->sc_imask);
3306 /*
3307 * When using a self-linked beacon descriptor in
3308 * ibss mode load it once here.
3309 */
3310 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
86877dfb 3311 ath_beacon_start_adhoc(sc, vap);
193b341d
SZ
3312 }
3313 sc->sc_syncbeacon = 0;
3314#undef FUDGE
3315#undef TSF_TO_TU
3316}
3317
3318static void
3319ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3320{
3321 bus_addr_t *paddr = (bus_addr_t*) arg;
3322 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3323 *paddr = segs->ds_addr;
3324}
3325
3326static int
86877dfb
RP
3327ath_descdma_setup(struct ath_softc *sc,
3328 struct ath_descdma *dd, ath_bufhead *head,
3329 const char *name, int nbuf, int ndesc)
193b341d
SZ
3330{
3331#define DS2PHYS(_dd, _ds) \
3332 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
86877dfb 3333 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
3334 struct ath_desc *ds;
3335 struct ath_buf *bf;
3336 int i, bsize, error;
3337
3338 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3339 __func__, name, nbuf, ndesc);
3340
3341 dd->dd_name = name;
3342 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
193b341d
SZ
3343
3344 /*
3345 * Setup DMA descriptor area.
3346 */
86877dfb 3347 error = bus_dma_tag_create(dd->dd_dmat, /* parent */
193b341d
SZ
3348 PAGE_SIZE, 0, /* alignment, bounds */
3349 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3350 BUS_SPACE_MAXADDR, /* highaddr */
3351 NULL, NULL, /* filter, filterarg */
3352 dd->dd_desc_len, /* maxsize */
3353 1, /* nsegments */
ed33fa9f 3354 dd->dd_desc_len, /* maxsegsize */
193b341d
SZ
3355 BUS_DMA_ALLOCNOW, /* flags */
3356 &dd->dd_dmat);
86877dfb 3357 if (error != 0) {
193b341d
SZ
3358 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3359 return error;
3360 }
3361
3362 /* allocate descriptors */
86877dfb
RP
3363 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3364 if (error != 0) {
193b341d
SZ
3365 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3366 "error %u\n", dd->dd_name, error);
86877dfb 3367 goto fail0;
193b341d
SZ
3368 }
3369
86877dfb
RP
3370 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3371 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3372 &dd->dd_dmamap);
3373 if (error != 0) {
193b341d
SZ
3374 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3375 "error %u\n", nbuf * ndesc, dd->dd_name, error);
86877dfb 3376 goto fail1;
193b341d
SZ
3377 }
3378
3379 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3380 dd->dd_desc, dd->dd_desc_len,
3381 ath_load_cb, &dd->dd_desc_paddr,
86877dfb
RP
3382 BUS_DMA_NOWAIT);
3383 if (error != 0) {
193b341d
SZ
3384 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3385 dd->dd_name, error);
86877dfb 3386 goto fail2;
193b341d
SZ
3387 }
3388
3389 ds = dd->dd_desc;
3390 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3391 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3392 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3393
3394 /* allocate rx buffers */
3395 bsize = sizeof(struct ath_buf) * nbuf;
a3062ee4 3396 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
86877dfb
RP
3397 if (bf == NULL) {
3398 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3399 dd->dd_name, bsize);
3400 goto fail3;
3401 }
193b341d
SZ
3402 dd->dd_bufptr = bf;
3403
86877dfb 3404 STAILQ_INIT(head);
193b341d
SZ
3405 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3406 bf->bf_desc = ds;
3407 bf->bf_daddr = DS2PHYS(dd, ds);
86877dfb
RP
3408 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3409 &bf->bf_dmamap);
3410 if (error != 0) {
193b341d
SZ
3411 if_printf(ifp, "unable to create dmamap for %s "
3412 "buffer %u, error %u\n", dd->dd_name, i, error);
3413 ath_descdma_cleanup(sc, dd, head);
3414 return error;
3415 }
3416 STAILQ_INSERT_TAIL(head, bf, bf_list);
3417 }
3418 return 0;
193b341d
SZ
3419fail3:
3420 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3421fail2:
3422 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3423fail1:
3424 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3425fail0:
3426 bus_dma_tag_destroy(dd->dd_dmat);
3427 memset(dd, 0, sizeof(*dd));
3428 return error;
193b341d
SZ
3429#undef DS2PHYS
3430}
3431
3432static void
3433ath_descdma_cleanup(struct ath_softc *sc,
3434 struct ath_descdma *dd, ath_bufhead *head)
3435{
3436 struct ath_buf *bf;
3437 struct ieee80211_node *ni;
3438
86877dfb
RP
3439 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3440 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3441 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3442 bus_dma_tag_destroy(dd->dd_dmat);
193b341d
SZ
3443
3444 STAILQ_FOREACH(bf, head, bf_list) {
3445 if (bf->bf_m) {
3446 m_freem(bf->bf_m);
3447 bf->bf_m = NULL;
3448 }
3449 if (bf->bf_dmamap != NULL) {
3450 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3451 bf->bf_dmamap = NULL;
3452 }
3453 ni = bf->bf_node;
3454 bf->bf_node = NULL;
3455 if (ni != NULL) {
3456 /*
3457 * Reclaim node reference.
3458 */
3459 ieee80211_free_node(ni);
3460 }
3461 }
193b341d 3462
86877dfb
RP
3463 STAILQ_INIT(head);
3464 kfree(dd->dd_bufptr, M_ATHDEV);
193b341d
SZ
3465 memset(dd, 0, sizeof(*dd));
3466}
3467
3468static int
3469ath_desc_alloc(struct ath_softc *sc)
3470{
3471 int error;
3472
3473 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
86877dfb
RP
3474 "rx", ath_rxbuf, 1);
3475 if (error != 0)
193b341d
SZ
3476 return error;
3477
3478 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
86877dfb
RP
3479 "tx", ath_txbuf, ATH_TXDESC);
3480 if (error != 0) {
3481 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d 3482 return error;
86877dfb 3483 }
193b341d
SZ
3484
3485 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
86877dfb
RP
3486 "beacon", ATH_BCBUF, 1);
3487 if (error != 0) {
3488 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3489 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d 3490 return error;
86877dfb 3491 }
193b341d
SZ
3492 return 0;
3493}
3494
3495static void
3496ath_desc_free(struct ath_softc *sc)
3497{
3498
86877dfb 3499 if (sc->sc_bdma.dd_desc_len != 0)
193b341d 3500 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
86877dfb 3501 if (sc->sc_txdma.dd_desc_len != 0)
193b341d 3502 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
86877dfb 3503 if (sc->sc_rxdma.dd_desc_len != 0)
193b341d 3504 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d
SZ
3505}
3506
3507static struct ieee80211_node *
86877dfb 3508ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
193b341d 3509{
86877dfb 3510 struct ieee80211com *ic = vap->iv_ic;
193b341d
SZ
3511 struct ath_softc *sc = ic->ic_ifp->if_softc;
3512 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3513 struct ath_node *an;
3514
a3062ee4 3515 an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO);
193b341d
SZ
3516 if (an == NULL) {
3517 /* XXX stat+msg */
3518 return NULL;
3519 }
193b341d
SZ
3520 ath_rate_node_init(sc, an);
3521
3522 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3523 return &an->an_node;
3524}
3525
3526static void
3527ath_node_free(struct ieee80211_node *ni)
3528{
3529 struct ieee80211com *ic = ni->ni_ic;
3530 struct ath_softc *sc = ic->ic_ifp->if_softc;
3531
3532 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3533
3534 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3535 sc->sc_node_free(ni);
3536}
3537
86877dfb
RP
3538static void
3539ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
193b341d 3540{
86877dfb
RP
3541 struct ieee80211com *ic = ni->ni_ic;
3542 struct ath_softc *sc = ic->ic_ifp->if_softc;
3543 struct ath_hal *ah = sc->sc_ah;
193b341d 3544
86877dfb
RP
3545 *rssi = ic->ic_node_getrssi(ni);
3546 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3547 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
193b341d 3548 else
86877dfb 3549 *noise = -95; /* nominally correct */
193b341d
SZ
3550}
3551
3552static int
3553ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3554{
3555 struct ath_hal *ah = sc->sc_ah;
3556 int error;
3557 struct mbuf *m;
3558 struct ath_desc *ds;
3559
3560 m = bf->bf_m;
3561 if (m == NULL) {
3562 /*
3563 * NB: by assigning a page to the rx dma buffer we
3564 * implicitly satisfy the Atheros requirement that
3565 * this buffer be cache-line-aligned and sized to be
3566 * multiple of the cache line size. Not doing this
3567 * causes weird stuff to happen (for the 5210 at least).
3568 */
3569 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3570 if (m == NULL) {
3571 DPRINTF(sc, ATH_DEBUG_ANY,
3572 "%s: no mbuf/cluster\n", __func__);
3573 sc->sc_stats.ast_rx_nombuf++;
3574 return ENOMEM;
3575 }
3576 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3577
04522223 3578 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat,
86877dfb 3579 bf->bf_dmamap, m,
04522223 3580 bf->bf_segs, 1, &bf->bf_nseg,
193b341d
SZ
3581 BUS_DMA_NOWAIT);
3582 if (error != 0) {
3583 DPRINTF(sc, ATH_DEBUG_ANY,
04522223 3584 "%s: bus_dmamap_load_mbuf_segment failed; error %d\n",
193b341d
SZ
3585 __func__, error);
3586 sc->sc_stats.ast_rx_busdma++;
3587 m_freem(m);
3588 return error;
3589 }
3590 KASSERT(bf->bf_nseg == 1,
3591 ("multi-segment packet; nseg %u", bf->bf_nseg));