if_ath - Properly remove sysctls on detach.
[dragonfly.git] / sys / dev / netif / ath / ath / if_ath.c
CommitLineData
86877dfb
RP
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
193b341d
SZ
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
193b341d
SZ
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 *
86877dfb
RP
29 * $FreeBSD: head/sys/dev/ath/if_ath.c 203751 2010-02-10 11:12:39Z rpaulo $");
30 * $DragonFly$
193b341d
SZ
31 */
32
33/*
34 * Driver for the Atheros Wireless LAN controller.
35 *
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
38 */
39
86877dfb 40#include "opt_inet.h"
193b341d 41#include "opt_ath.h"
86877dfb 42#include "opt_wlan.h"
193b341d
SZ
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysctl.h>
47#include <sys/mbuf.h>
48#include <sys/malloc.h>
86877dfb
RP
49#include <sys/lock.h>
50#include <sys/mutex.h>
193b341d
SZ
51#include <sys/kernel.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/errno.h>
55#include <sys/callout.h>
56#include <sys/bus.h>
57#include <sys/endian.h>
58#include <sys/kthread.h>
86877dfb
RP
59#include <sys/taskqueue.h>
60#include <sys/priv.h>
61
193b341d
SZ
62#include <net/if.h>
63#include <net/if_dl.h>
64#include <net/if_media.h>
65#include <net/if_types.h>
66#include <net/if_arp.h>
67#include <net/ethernet.h>
68#include <net/if_llc.h>
69#include <net/ifq_var.h>
70
71#include <netproto/802_11/ieee80211_var.h>
86877dfb
RP
72#include <netproto/802_11/ieee80211_regdomain.h>
73#ifdef IEEE80211_SUPPORT_SUPERG
74#include <netproto/802_11/ieee80211_superg.h>
75#endif
76#ifdef IEEE80211_SUPPORT_TDMA
77#include <netproto/802_11/ieee80211_tdma.h>
78#endif
193b341d
SZ
79
80#include <net/bpf.h>
81
86877dfb
RP
82#ifdef INET
83#include <netinet/in.h>
84#include <netinet/if_ether.h>
8982d733 85#endif
193b341d
SZ
86
87#include <dev/netif/ath/ath/if_athvar.h>
3f720b20 88#include <dev/netif/ath/hal/ath_hal/ah_devid.h> /* XXX for softled */
193b341d 89
86877dfb
RP
90#ifdef ATH_TX99_DIAG
91#include <dev/netif/ath_tx99/ath_tx99.h>
92#endif
93
94/*
95 * ATH_BCBUF determines the number of vap's that can transmit
96 * beacons and also (currently) the number of vap's that can
97 * have unique mac addresses/bssid. When staggering beacons
98 * 4 is probably a good max as otherwise the beacons become
99 * very closely spaced and there is limited time for cab q traffic
100 * to go out. You can burst beacons instead but that is not good
101 * for stations in power save and at some point you really want
102 * another radio (and channel).
103 *
104 * The limit on the number of mac addresses is tied to our use of
105 * the U/L bit and tracking addresses in a byte; it would be
106 * worthwhile to allow more for applications like proxy sta.
107 */
108CTASSERT(ATH_BCBUF <= 8);
109
193b341d
SZ
110/* unaligned little endian access */
111#define LE_READ_2(p) \
86877dfb
RP
112 ((u_int16_t) \
113 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)))
193b341d 114#define LE_READ_4(p) \
86877dfb
RP
115 ((u_int32_t) \
116 ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
117 (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
118
119static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
120 const char name[IFNAMSIZ], int unit, int opmode,
121 int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
122 const uint8_t mac[IEEE80211_ADDR_LEN]);
123static void ath_vap_delete(struct ieee80211vap *);
193b341d 124static void ath_init(void *);
86877dfb 125static void ath_stop_locked(struct ifnet *);
193b341d
SZ
126static void ath_stop(struct ifnet *);
127static void ath_start(struct ifnet *);
128static int ath_reset(struct ifnet *);
86877dfb 129static int ath_reset_vap(struct ieee80211vap *, u_long);
193b341d 130static int ath_media_change(struct ifnet *);
86877dfb 131static void ath_watchdog(void *);
193b341d 132static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86877dfb
RP
133static void ath_fatal_proc(void *, int);
134static void ath_bmiss_vap(struct ieee80211vap *);
135static void ath_bmiss_proc(void *, int);
136static int ath_keyset(struct ath_softc *, const struct ieee80211_key *,
137 struct ieee80211_node *);
138static int ath_key_alloc(struct ieee80211vap *,
139 struct ieee80211_key *,
193b341d 140 ieee80211_keyix *, ieee80211_keyix *);
86877dfb 141static int ath_key_delete(struct ieee80211vap *,
193b341d 142 const struct ieee80211_key *);
86877dfb
RP
143static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
144 const u_int8_t mac[IEEE80211_ADDR_LEN]);
145static void ath_key_update_begin(struct ieee80211vap *);
146static void ath_key_update_end(struct ieee80211vap *);
147static void ath_update_mcast(struct ifnet *);
148static void ath_update_promisc(struct ifnet *);
193b341d
SZ
149static void ath_mode_init(struct ath_softc *);
150static void ath_setslottime(struct ath_softc *);
151static void ath_updateslot(struct ifnet *);
152static int ath_beaconq_setup(struct ath_hal *);
153static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
86877dfb 154static void ath_beacon_update(struct ieee80211vap *, int item);
193b341d 155static void ath_beacon_setup(struct ath_softc *, struct ath_buf *);
86877dfb
RP
156static void ath_beacon_proc(void *, int);
157static struct ath_buf *ath_beacon_generate(struct ath_softc *,
158 struct ieee80211vap *);
159static void ath_bstuck_proc(void *, int);
160static void ath_beacon_return(struct ath_softc *, struct ath_buf *);
193b341d 161static void ath_beacon_free(struct ath_softc *);
86877dfb 162static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
193b341d
SZ
163static void ath_descdma_cleanup(struct ath_softc *sc,
164 struct ath_descdma *, ath_bufhead *);
165static int ath_desc_alloc(struct ath_softc *);
166static void ath_desc_free(struct ath_softc *);
86877dfb
RP
167static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
168 const uint8_t [IEEE80211_ADDR_LEN]);
193b341d 169static void ath_node_free(struct ieee80211_node *);
86877dfb
RP
170static void ath_node_getsignal(const struct ieee80211_node *,
171 int8_t *, int8_t *);
193b341d 172static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
86877dfb
RP
173static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
174 int subtype, int rssi, int nf);
193b341d 175static void ath_setdefantenna(struct ath_softc *, u_int);
86877dfb 176static void ath_rx_proc(void *, int);
ed33fa9f 177static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
193b341d
SZ
178static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
179static int ath_tx_setup(struct ath_softc *, int, int);
180static int ath_wme_update(struct ieee80211com *);
181static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
182static void ath_tx_cleanup(struct ath_softc *);
86877dfb 183static void ath_freetx(struct mbuf *);
193b341d
SZ
184static int ath_tx_start(struct ath_softc *, struct ieee80211_node *,
185 struct ath_buf *, struct mbuf *);
86877dfb
RP
186static void ath_tx_proc_q0(void *, int);
187static void ath_tx_proc_q0123(void *, int);
188static void ath_tx_proc(void *, int);
189static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
193b341d
SZ
190static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
191static void ath_draintxq(struct ath_softc *);
192static void ath_stoprecv(struct ath_softc *);
193static int ath_startrecv(struct ath_softc *);
194static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
86877dfb
RP
195static void ath_scan_start(struct ieee80211com *);
196static void ath_scan_end(struct ieee80211com *);
197static void ath_set_channel(struct ieee80211com *);
193b341d 198static void ath_calibrate(void *);
86877dfb 199static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
193b341d
SZ
200static void ath_setup_stationkey(struct ieee80211_node *);
201static void ath_newassoc(struct ieee80211_node *, int);
86877dfb
RP
202static int ath_setregdomain(struct ieee80211com *,
203 struct ieee80211_regdomain *, int,
204 struct ieee80211_channel []);
205static void ath_getradiocaps(struct ieee80211com *, int, int *,
206 struct ieee80211_channel []);
207static int ath_getchannels(struct ath_softc *);
193b341d 208static void ath_led_event(struct ath_softc *, int);
193b341d
SZ
209
210static int ath_rate_setup(struct ath_softc *, u_int mode);
211static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
212
213static void ath_sysctlattach(struct ath_softc *);
86877dfb
RP
214static int ath_raw_xmit(struct ieee80211_node *,
215 struct mbuf *, const struct ieee80211_bpf_params *);
193b341d
SZ
216static void ath_announce(struct ath_softc *);
217
86877dfb
RP
218#ifdef IEEE80211_SUPPORT_TDMA
219static void ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
220 u_int32_t bintval);
221static void ath_tdma_bintvalsetup(struct ath_softc *sc,
222 const struct ieee80211_tdma_state *tdma);
223static void ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
224static void ath_tdma_update(struct ieee80211_node *ni,
225 const struct ieee80211_tdma_param *tdma, int);
226static void ath_tdma_beacon_send(struct ath_softc *sc,
227 struct ieee80211vap *vap);
228
229static __inline void
230ath_hal_setcca(struct ath_hal *ah, int ena)
231{
232 /*
233 * NB: fill me in; this is not provided by default because disabling
234 * CCA in most locales violates regulatory.
235 */
236}
237
238static __inline int
239ath_hal_getcca(struct ath_hal *ah)
240{
241 u_int32_t diag;
242 if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
243 return 1;
244 return ((diag & 0x500000) == 0);
245}
246
247#define TDMA_EP_MULTIPLIER (1<<10) /* pow2 to optimize out * and / */
248#define TDMA_LPF_LEN 6
249#define TDMA_DUMMY_MARKER 0x127
250#define TDMA_EP_MUL(x, mul) ((x) * (mul))
251#define TDMA_IN(x) (TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
252#define TDMA_LPF(x, y, len) \
253 ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
254#define TDMA_SAMPLE(x, y) do { \
255 x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN); \
256} while (0)
257#define TDMA_EP_RND(x,mul) \
258 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
259#define TDMA_AVG(x) TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
260#endif /* IEEE80211_SUPPORT_TDMA */
193b341d
SZ
261
262SYSCTL_DECL(_hw_ath);
263
264/* XXX validate sysctl values */
86877dfb
RP
265static int ath_longcalinterval = 30; /* long cals every 30 secs */
266SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
267 0, "long chip calibration interval (secs)");
268static int ath_shortcalinterval = 100; /* short cals every 100 ms */
269SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
270 0, "short chip calibration interval (msecs)");
271static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
272SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
273 0, "reset chip calibration results (secs)");
193b341d
SZ
274
275static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
86877dfb 276SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
193b341d
SZ
277 0, "rx buffers allocated");
278TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
279static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
86877dfb 280SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
193b341d
SZ
281 0, "tx buffers allocated");
282TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
283
86877dfb
RP
284static int ath_bstuck_threshold = 4; /* max missed beacons */
285SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
286 0, "max missed beacon xmits before chip reset");
287
193b341d 288#ifdef ATH_DEBUG
193b341d
SZ
289enum {
290 ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
291 ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
292 ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */
293 ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
294 ATH_DEBUG_RATE = 0x00000010, /* rate control */
295 ATH_DEBUG_RESET = 0x00000020, /* reset processing */
296 ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */
297 ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */
298 ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */
299 ATH_DEBUG_INTR = 0x00001000, /* ISR */
300 ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */
301 ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */
302 ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */
303 ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */
304 ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */
305 ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */
306 ATH_DEBUG_NODE = 0x00080000, /* node management */
307 ATH_DEBUG_LED = 0x00100000, /* led management */
308 ATH_DEBUG_FF = 0x00200000, /* fast frames */
309 ATH_DEBUG_DFS = 0x00400000, /* DFS processing */
86877dfb
RP
310 ATH_DEBUG_TDMA = 0x00800000, /* TDMA processing */
311 ATH_DEBUG_TDMA_TIMER = 0x01000000, /* TDMA timer processing */
312 ATH_DEBUG_REGDOMAIN = 0x02000000, /* regulatory processing */
193b341d
SZ
313 ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */
314 ATH_DEBUG_ANY = 0xffffffff
315};
86877dfb
RP
316static int ath_debug = 0;
317SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
318 0, "control debugging printfs");
319TUNABLE_INT("hw.ath.debug", &ath_debug);
320
193b341d
SZ
321#define IFF_DUMPPKTS(sc, m) \
322 ((sc->sc_debug & (m)) || \
86877dfb 323 (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
193b341d
SZ
324#define DPRINTF(sc, m, fmt, ...) do { \
325 if (sc->sc_debug & (m)) \
2508f206 326 kprintf(fmt, __VA_ARGS__); \
193b341d 327} while (0)
2508f206 328#define ether_sprintf(x) "<dummy>"
193b341d
SZ
329#define KEYPRINTF(sc, ix, hk, mac) do { \
330 if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \
8982d733 331 ath_keyprint(sc, __func__, ix, hk, mac); \
193b341d 332} while (0)
86877dfb
RP
333static void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
334 u_int ix, int);
335static void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
336 u_int qnum, u_int ix, int done);
193b341d
SZ
337#else
338#define IFF_DUMPPKTS(sc, m) \
86877dfb 339 ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
193b341d
SZ
340#define DPRINTF(sc, m, fmt, ...) do { \
341 (void) sc; \
342} while (0)
343#define KEYPRINTF(sc, k, ix, mac) do { \
344 (void) sc; \
345} while (0)
346#endif
347
348MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
349
350int
86877dfb 351ath_attach(u_int16_t devid, struct ath_softc *sc)
193b341d 352{
86877dfb
RP
353 struct ifnet *ifp;
354 struct ieee80211com *ic;
193b341d
SZ
355 struct ath_hal *ah = NULL;
356 HAL_STATUS status;
357 int error = 0, i;
86877dfb
RP
358 u_int wmodes;
359 uint8_t macaddr[IEEE80211_ADDR_LEN];
193b341d
SZ
360
361 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
362
86877dfb
RP
363 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
364 if (ifp == NULL) {
365 device_printf(sc->sc_dev, "can not if_alloc()\n");
366 error = ENOSPC;
367 goto bad;
368 }
369 ic = ifp->if_l2com;
370
193b341d
SZ
371 /* set these up early for if_printf use */
372 if_initname(ifp, device_get_name(sc->sc_dev),
86877dfb 373 device_get_unit(sc->sc_dev));
193b341d 374
09da298d
AHJ
375 /* prepare sysctl tree for use in sub modules */
376 sysctl_ctx_init(&sc->sc_sysctl_ctx);
377 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
378 SYSCTL_STATIC_CHILDREN(_hw),
379 OID_AUTO,
380 device_get_nameunit(sc->sc_dev),
381 CTLFLAG_RD, 0, "");
382
193b341d
SZ
383 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
384 if (ah == NULL) {
385 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
386 status);
387 error = ENXIO;
86877dfb 388 goto bad;
193b341d
SZ
389 }
390 sc->sc_ah = ah;
193b341d 391 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
86877dfb
RP
392#ifdef ATH_DEBUG
393 sc->sc_debug = ath_debug;
394#endif
193b341d
SZ
395
396 /*
397 * Check if the MAC has multi-rate retry support.
398 * We do this by trying to setup a fake extended
399 * descriptor. MAC's that don't have support will
400 * return false w/o doing anything. MAC's that do
401 * support it will return true w/o doing anything.
402 */
403 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
404
405 /*
406 * Check if the device has hardware counters for PHY
407 * errors. If so we need to enable the MIB interrupt
408 * so we can act on stat triggers.
409 */
410 if (ath_hal_hwphycounters(ah))
411 sc->sc_needmib = 1;
412
413 /*
414 * Get the hardware key cache size.
415 */
416 sc->sc_keymax = ath_hal_keycachesize(ah);
417 if (sc->sc_keymax > ATH_KEYMAX) {
418 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
419 ATH_KEYMAX, sc->sc_keymax);
420 sc->sc_keymax = ATH_KEYMAX;
421 }
422 /*
423 * Reset the key cache since some parts do not
424 * reset the contents on initial power up.
425 */
426 for (i = 0; i < sc->sc_keymax; i++)
427 ath_hal_keyreset(ah, i);
193b341d
SZ
428
429 /*
86877dfb 430 * Collect the default channel list.
193b341d 431 */
86877dfb
RP
432 error = ath_getchannels(sc);
433 if (error != 0)
434 goto bad;
193b341d
SZ
435
436 /*
437 * Setup rate tables for all potential media types.
438 */
439 ath_rate_setup(sc, IEEE80211_MODE_11A);
440 ath_rate_setup(sc, IEEE80211_MODE_11B);
441 ath_rate_setup(sc, IEEE80211_MODE_11G);
442 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
443 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
86877dfb
RP
444 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
445 ath_rate_setup(sc, IEEE80211_MODE_11NA);
446 ath_rate_setup(sc, IEEE80211_MODE_11NG);
447 ath_rate_setup(sc, IEEE80211_MODE_HALF);
448 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
193b341d
SZ
449
450 /* NB: setup here so ath_rate_update is happy */
451 ath_setcurmode(sc, IEEE80211_MODE_11A);
452
453 /*
454 * Allocate tx+rx descriptors and populate the lists.
455 */
456 error = ath_desc_alloc(sc);
86877dfb 457 if (error != 0) {
193b341d 458 if_printf(ifp, "failed to allocate descriptors: %d\n", error);
86877dfb 459 goto bad;
193b341d 460 }
193b341d 461 callout_init(&sc->sc_cal_ch);
86877dfb
RP
462 callout_init(&sc->sc_wd_ch);
463
464 ATH_TXBUF_LOCK_INIT(sc);
465
a3062ee4 466 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT,
86877dfb
RP
467 taskqueue_thread_enqueue, &sc->sc_tq);
468 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
469 "%s taskq", ifp->if_xname);
470
471 TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
472 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
473 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
193b341d
SZ
474
475 /*
476 * Allocate hardware transmit queues: one queue for
477 * beacon frames and one data queue for each QoS
478 * priority. Note that the hal handles reseting
479 * these queues at the needed time.
480 *
481 * XXX PS-Poll
482 */
483 sc->sc_bhalq = ath_beaconq_setup(ah);
86877dfb 484 if (sc->sc_bhalq == (u_int) -1) {
193b341d
SZ
485 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
486 error = EIO;
86877dfb 487 goto bad2;
193b341d 488 }
193b341d
SZ
489 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
490 if (sc->sc_cabq == NULL) {
491 if_printf(ifp, "unable to setup CAB xmit queue!\n");
492 error = EIO;
86877dfb 493 goto bad2;
193b341d 494 }
193b341d
SZ
495 /* NB: insure BK queue is the lowest priority h/w queue */
496 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
497 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
498 ieee80211_wme_acnames[WME_AC_BK]);
499 error = EIO;
86877dfb 500 goto bad2;
193b341d
SZ
501 }
502 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
503 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
504 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
86877dfb 505 /*
193b341d
SZ
506 * Not enough hardware tx queues to properly do WME;
507 * just punt and assign them all to the same h/w queue.
508 * We could do a better job of this if, for example,
509 * we allocate queues when we switch from station to
510 * AP mode.
511 */
512 if (sc->sc_ac2q[WME_AC_VI] != NULL)
513 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
514 if (sc->sc_ac2q[WME_AC_BE] != NULL)
515 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
516 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
517 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
518 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
519 }
520
86877dfb 521 /*
193b341d
SZ
522 * Special case certain configurations. Note the
523 * CAB queue is handled by these specially so don't
524 * include them when checking the txq setup mask.
525 */
526 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
527 case 0x01:
86877dfb 528 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
193b341d
SZ
529 break;
530 case 0x0f:
86877dfb 531 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
193b341d
SZ
532 break;
533 default:
86877dfb 534 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
193b341d
SZ
535 break;
536 }
537
538 /*
539 * Setup rate control. Some rate control modules
540 * call back to change the anntena state so expose
541 * the necessary entry points.
542 * XXX maybe belongs in struct ath_ratectrl?
543 */
544 sc->sc_setdefantenna = ath_setdefantenna;
545 sc->sc_rc = ath_rate_attach(sc);
546 if (sc->sc_rc == NULL) {
547 error = EIO;
86877dfb 548 goto bad2;
193b341d
SZ
549 }
550
551 sc->sc_blinking = 0;
552 sc->sc_ledstate = 1;
553 sc->sc_ledon = 0; /* low true */
86877dfb
RP
554 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
555 callout_init_mp(&sc->sc_ledtimer);
193b341d
SZ
556 /*
557 * Auto-enable soft led processing for IBM cards and for
558 * 5211 minipci cards. Users can also manually enable/disable
559 * support with a sysctl.
560 */
561 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
562 if (sc->sc_softled) {
86877dfb
RP
563 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
564 HAL_GPIO_MUX_MAC_NETWORK_LED);
193b341d
SZ
565 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
566 }
567
568 ifp->if_softc = sc;
569 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
570 ifp->if_start = ath_start;
193b341d
SZ
571 ifp->if_ioctl = ath_ioctl;
572 ifp->if_init = ath_init;
573 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
574 ifq_set_ready(&ifp->if_snd);
575
86877dfb 576 ic->ic_ifp = ifp;
193b341d
SZ
577 /* XXX not right but it's not used anywhere important */
578 ic->ic_phytype = IEEE80211_T_OFDM;
579 ic->ic_opmode = IEEE80211_M_STA;
580 ic->ic_caps =
86877dfb
RP
581 IEEE80211_C_STA /* station mode */
582 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
193b341d
SZ
583 | IEEE80211_C_HOSTAP /* hostap mode */
584 | IEEE80211_C_MONITOR /* monitor mode */
585 | IEEE80211_C_AHDEMO /* adhoc demo mode */
86877dfb
RP
586 | IEEE80211_C_WDS /* 4-address traffic works */
587 | IEEE80211_C_MBSS /* mesh point link mode */
193b341d
SZ
588 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
589 | IEEE80211_C_SHSLOT /* short slot time supported */
590 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
86877dfb
RP
591 | IEEE80211_C_BGSCAN /* capable of bg scanning */
592 | IEEE80211_C_TXFRAG /* handle tx frags */
193b341d
SZ
593 ;
594 /*
595 * Query the hal to figure out h/w crypto support.
596 */
597 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
86877dfb 598 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
193b341d 599 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
86877dfb 600 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
193b341d 601 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
86877dfb 602 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
193b341d 603 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
86877dfb 604 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
193b341d 605 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
86877dfb 606 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
193b341d
SZ
607 /*
608 * Check if h/w does the MIC and/or whether the
609 * separate key cache entries are required to
610 * handle both tx+rx MIC keys.
611 */
612 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
86877dfb 613 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
8982d733
SZ
614 /*
615 * If the h/w supports storing tx+rx MIC keys
616 * in one cache slot automatically enable use.
617 */
618 if (ath_hal_hastkipsplit(ah) ||
619 !ath_hal_settkipsplit(ah, AH_FALSE))
193b341d 620 sc->sc_splitmic = 1;
86877dfb
RP
621 /*
622 * If the h/w can do TKIP MIC together with WME then
623 * we use it; otherwise we force the MIC to be done
624 * in software by the net80211 layer.
625 */
626 if (ath_hal_haswmetkipmic(ah))
627 sc->sc_wmetkipmic = 1;
193b341d
SZ
628 }
629 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
86877dfb
RP
630 /*
631 * Check for multicast key search support.
632 */
633 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
634 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
635 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
636 }
193b341d 637 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
86877dfb 638 /*
8982d733
SZ
639 * Mark key cache slots associated with global keys
640 * as in use. If we knew TKIP was not to be used we
641 * could leave the +32, +64, and +32+64 slots free.
642 */
643 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
644 setbit(sc->sc_keymap, i);
645 setbit(sc->sc_keymap, i+64);
646 if (sc->sc_splitmic) {
647 setbit(sc->sc_keymap, i+32);
648 setbit(sc->sc_keymap, i+32+64);
649 }
650 }
193b341d
SZ
651 /*
652 * TPC support can be done either with a global cap or
653 * per-packet support. The latter is not available on
654 * all parts. We're a bit pedantic here as all parts
655 * support a global cap.
656 */
657 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
658 ic->ic_caps |= IEEE80211_C_TXPMGT;
659
660 /*
661 * Mark WME capability only if we have sufficient
662 * hardware queues to do proper priority scheduling.
663 */
664 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
665 ic->ic_caps |= IEEE80211_C_WME;
666 /*
667 * Check for misc other capabilities.
668 */
669 if (ath_hal_hasbursting(ah))
670 ic->ic_caps |= IEEE80211_C_BURST;
86877dfb
RP
671 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
672 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
673 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
674 if (ath_hal_hasfastframes(ah))
675 ic->ic_caps |= IEEE80211_C_FF;
676 wmodes = ath_hal_getwirelessmodes(ah);
677 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
678 ic->ic_caps |= IEEE80211_C_TURBOP;
679#ifdef IEEE80211_SUPPORT_TDMA
680 if (ath_hal_macversion(ah) > 0x78) {
681 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
682 ic->ic_tdma_update = ath_tdma_update;
683 }
684#endif
193b341d
SZ
685 /*
686 * Indicate we need the 802.11 header padded to a
687 * 32-bit boundary for 4-address and QoS frames.
688 */
689 ic->ic_flags |= IEEE80211_F_DATAPAD;
690
691 /*
692 * Query the hal about antenna support.
693 */
694 sc->sc_defant = ath_hal_getdefantenna(ah);
695
696 /*
697 * Not all chips have the VEOL support we want to
698 * use with IBSS beacons; check here for it.
699 */
700 sc->sc_hasveol = ath_hal_hasveol(ah);
701
702 /* get mac address from hardware */
86877dfb
RP
703 ath_hal_getmac(ah, macaddr);
704 if (sc->sc_hasbmask)
705 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
193b341d 706
86877dfb
RP
707 /* NB: used to size node table key mapping array */
708 ic->ic_max_keyix = sc->sc_keymax;
193b341d 709 /* call MI attach routine. */
86877dfb
RP
710 ieee80211_ifattach(ic, macaddr);
711 ic->ic_setregdomain = ath_setregdomain;
712 ic->ic_getradiocaps = ath_getradiocaps;
713 sc->sc_opmode = HAL_M_STA;
714
193b341d 715 /* override default methods */
86877dfb
RP
716 ic->ic_newassoc = ath_newassoc;
717 ic->ic_updateslot = ath_updateslot;
718 ic->ic_wme.wme_update = ath_wme_update;
719 ic->ic_vap_create = ath_vap_create;
720 ic->ic_vap_delete = ath_vap_delete;
721 ic->ic_raw_xmit = ath_raw_xmit;
722 ic->ic_update_mcast = ath_update_mcast;
723 ic->ic_update_promisc = ath_update_promisc;
193b341d
SZ
724 ic->ic_node_alloc = ath_node_alloc;
725 sc->sc_node_free = ic->ic_node_free;
726 ic->ic_node_free = ath_node_free;
86877dfb
RP
727 ic->ic_node_getsignal = ath_node_getsignal;
728 ic->ic_scan_start = ath_scan_start;
729 ic->ic_scan_end = ath_scan_end;
730 ic->ic_set_channel = ath_set_channel;
731
732 ieee80211_radiotap_attach(ic,
733 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
734 ATH_TX_RADIOTAP_PRESENT,
735 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
736 ATH_RX_RADIOTAP_PRESENT);
737
193b341d
SZ
738 /*
739 * Setup dynamic sysctl's now that country code and
740 * regdomain are available from the hal.
741 */
742 ath_sysctlattach(sc);
743
193b341d
SZ
744 if (bootverbose)
745 ieee80211_announce(ic);
746 ath_announce(sc);
193b341d 747 return 0;
86877dfb
RP
748bad2:
749 ath_tx_cleanup(sc);
750 ath_desc_free(sc);
751bad:
752 if (ah)
753 ath_hal_detach(ah);
754 if (ifp != NULL)
755 if_free(ifp);
756 sc->sc_invalid = 1;
193b341d
SZ
757 return error;
758}
759
760int
761ath_detach(struct ath_softc *sc)
762{
86877dfb 763 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
764
765 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
766 __func__, ifp->if_flags);
767
768 /*
769 * NB: the order of these is important:
86877dfb 770 * o stop the chip so no more interrupts will fire
193b341d
SZ
771 * o call the 802.11 layer before detaching the hal to
772 * insure callbacks into the driver to delete global
773 * key cache entries can be handled
86877dfb 774 * o free the taskqueue which drains any pending tasks
193b341d
SZ
775 * o reclaim the tx queue data structures after calling
776 * the 802.11 layer as we'll get called back to reclaim
777 * node state and potentially want to use them
778 * o to cleanup the tx queues the hal is called, so detach
779 * it last
780 * Other than that, it's straightforward...
781 */
86877dfb
RP
782 ath_stop(ifp);
783 ieee80211_ifdetach(ifp->if_l2com);
784 taskqueue_free(sc->sc_tq);
785#ifdef ATH_TX99_DIAG
786 if (sc->sc_tx99 != NULL)
787 sc->sc_tx99->detach(sc->sc_tx99);
788#endif
789 ath_rate_detach(sc->sc_rc);
790 ath_desc_free(sc);
791 ath_tx_cleanup(sc);
792 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
09da298d
AHJ
793 if (sc->sc_sysctl_tree) {
794 sysctl_ctx_free(&sc->sc_sysctl_ctx);
795 sc->sc_sysctl_tree = NULL;
796 }
86877dfb 797 if_free(ifp);
193b341d 798
86877dfb
RP
799 return 0;
800}
193b341d 801
86877dfb
RP
802/*
803 * MAC address handling for multiple BSS on the same radio.
804 * The first vap uses the MAC address from the EEPROM. For
805 * subsequent vap's we set the U/L bit (bit 1) in the MAC
806 * address and use the next six bits as an index.
807 */
808static void
809assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
810{
811 int i;
193b341d 812
86877dfb
RP
813 if (clone && sc->sc_hasbmask) {
814 /* NB: we only do this if h/w supports multiple bssid */
815 for (i = 0; i < 8; i++)
816 if ((sc->sc_bssidmask & (1<<i)) == 0)
817 break;
818 if (i != 0)
819 mac[0] |= (i << 2)|0x2;
820 } else
821 i = 0;
822 sc->sc_bssidmask |= 1<<i;
823 sc->sc_hwbssidmask[0] &= ~mac[0];
824 if (i == 0)
825 sc->sc_nbssid0++;
826}
827
828static void
829reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
830{
831 int i = mac[0] >> 2;
832 uint8_t mask;
833
834 if (i != 0 || --sc->sc_nbssid0 == 0) {
835 sc->sc_bssidmask &= ~(1<<i);
836 /* recalculate bssid mask from remaining addresses */
837 mask = 0xff;
838 for (i = 1; i < 8; i++)
839 if (sc->sc_bssidmask & (1<<i))
840 mask &= ~((i<<2)|0x2);
841 sc->sc_hwbssidmask[0] |= mask;
842 }
843}
193b341d 844
86877dfb
RP
845/*
846 * Assign a beacon xmit slot. We try to space out
847 * assignments so when beacons are staggered the
848 * traffic coming out of the cab q has maximal time
849 * to go out before the next beacon is scheduled.
850 */
851static int
852assign_bslot(struct ath_softc *sc)
853{
854 u_int slot, free;
855
856 free = 0;
857 for (slot = 0; slot < ATH_BCBUF; slot++)
858 if (sc->sc_bslot[slot] == NULL) {
859 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
860 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
861 return slot;
862 free = slot;
863 /* NB: keep looking for a double slot */
864 }
865 return free;
866}
193b341d 867
86877dfb
RP
868static struct ieee80211vap *
869ath_vap_create(struct ieee80211com *ic,
870 const char name[IFNAMSIZ], int unit, int opmode, int flags,
871 const uint8_t bssid[IEEE80211_ADDR_LEN],
872 const uint8_t mac0[IEEE80211_ADDR_LEN])
873{
874 struct ath_softc *sc = ic->ic_ifp->if_softc;
875 struct ath_vap *avp;
876 struct ieee80211vap *vap;
877 uint8_t mac[IEEE80211_ADDR_LEN];
878 int ic_opmode, needbeacon, error;
879
880 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap),
881 M_80211_VAP, M_WAITOK | M_ZERO);
882 needbeacon = 0;
883 IEEE80211_ADDR_COPY(mac, mac0);
884
885 ATH_LOCK(sc);
886 ic_opmode = opmode; /* default to opmode of new vap */
887 switch (opmode) {
888 case IEEE80211_M_STA:
889 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
890 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
891 goto bad;
892 }
893 if (sc->sc_nvaps) {
894 /*
895 * With multiple vaps we must fall back
896 * to s/w beacon miss handling.
897 */
898 flags |= IEEE80211_CLONE_NOBEACONS;
899 }
900 if (flags & IEEE80211_CLONE_NOBEACONS) {
901 /*
902 * Station mode w/o beacons are implemented w/ AP mode.
903 */
904 ic_opmode = IEEE80211_M_HOSTAP;
905 }
906 break;
907 case IEEE80211_M_IBSS:
908 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
909 device_printf(sc->sc_dev,
910 "only 1 ibss vap supported\n");
911 goto bad;
912 }
913 needbeacon = 1;
914 break;
915 case IEEE80211_M_AHDEMO:
916#ifdef IEEE80211_SUPPORT_TDMA
917 if (flags & IEEE80211_CLONE_TDMA) {
918 if (sc->sc_nvaps != 0) {
919 device_printf(sc->sc_dev,
920 "only 1 tdma vap supported\n");
921 goto bad;
922 }
923 needbeacon = 1;
924 flags |= IEEE80211_CLONE_NOBEACONS;
925 }
926 /* fall thru... */
927#endif
928 case IEEE80211_M_MONITOR:
929 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
930 /*
931 * Adopt existing mode. Adding a monitor or ahdemo
932 * vap to an existing configuration is of dubious
933 * value but should be ok.
934 */
935 /* XXX not right for monitor mode */
936 ic_opmode = ic->ic_opmode;
937 }
938 break;
939 case IEEE80211_M_HOSTAP:
940 case IEEE80211_M_MBSS:
941 needbeacon = 1;
942 break;
943 case IEEE80211_M_WDS:
944 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
945 device_printf(sc->sc_dev,
946 "wds not supported in sta mode\n");
947 goto bad;
948 }
949 /*
950 * Silently remove any request for a unique
951 * bssid; WDS vap's always share the local
952 * mac address.
953 */
954 flags &= ~IEEE80211_CLONE_BSSID;
955 if (sc->sc_nvaps == 0)
956 ic_opmode = IEEE80211_M_HOSTAP;
957 else
958 ic_opmode = ic->ic_opmode;
959 break;
960 default:
961 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
962 goto bad;
963 }
964 /*
965 * Check that a beacon buffer is available; the code below assumes it.
966 */
967 if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
968 device_printf(sc->sc_dev, "no beacon buffer available\n");
969 goto bad;
193b341d
SZ
970 }
971
86877dfb
RP
972 /* STA, AHDEMO? */
973 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
974 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
975 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
976 }
977
978 vap = &avp->av_vap;
979 /* XXX can't hold mutex across if_alloc */
980 ATH_UNLOCK(sc);
981 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
982 bssid, mac);
983 ATH_LOCK(sc);
984 if (error != 0) {
985 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
986 __func__, error);
987 goto bad2;
988 }
989
990 /* h/w crypto support */
991 vap->iv_key_alloc = ath_key_alloc;
992 vap->iv_key_delete = ath_key_delete;
993 vap->iv_key_set = ath_key_set;
994 vap->iv_key_update_begin = ath_key_update_begin;
995 vap->iv_key_update_end = ath_key_update_end;
996
997 /* override various methods */
998 avp->av_recv_mgmt = vap->iv_recv_mgmt;
999 vap->iv_recv_mgmt = ath_recv_mgmt;
1000 vap->iv_reset = ath_reset_vap;
1001 vap->iv_update_beacon = ath_beacon_update;
1002 avp->av_newstate = vap->iv_newstate;
1003 vap->iv_newstate = ath_newstate;
1004 avp->av_bmiss = vap->iv_bmiss;
1005 vap->iv_bmiss = ath_bmiss_vap;
1006
1007 avp->av_bslot = -1;
1008 if (needbeacon) {
1009 /*
1010 * Allocate beacon state and setup the q for buffered
1011 * multicast frames. We know a beacon buffer is
1012 * available because we checked above.
1013 */
1014 avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
1015 STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
1016 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1017 /*
1018 * Assign the vap to a beacon xmit slot. As above
1019 * this cannot fail to find a free one.
1020 */
1021 avp->av_bslot = assign_bslot(sc);
1022 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1023 ("beacon slot %u not empty", avp->av_bslot));
1024 sc->sc_bslot[avp->av_bslot] = vap;
1025 sc->sc_nbcnvaps++;
1026 }
1027 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1028 /*
1029 * Multple vaps are to transmit beacons and we
1030 * have h/w support for TSF adjusting; enable
1031 * use of staggered beacons.
1032 */
1033 sc->sc_stagbeacons = 1;
1034 }
1035 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1036 }
193b341d 1037
86877dfb
RP
1038 ic->ic_opmode = ic_opmode;
1039 if (opmode != IEEE80211_M_WDS) {
1040 sc->sc_nvaps++;
1041 if (opmode == IEEE80211_M_STA)
1042 sc->sc_nstavaps++;
1043 if (opmode == IEEE80211_M_MBSS)
1044 sc->sc_nmeshvaps++;
1045 }
1046 switch (ic_opmode) {
1047 case IEEE80211_M_IBSS:
1048 sc->sc_opmode = HAL_M_IBSS;
1049 break;
1050 case IEEE80211_M_STA:
1051 sc->sc_opmode = HAL_M_STA;
1052 break;
1053 case IEEE80211_M_AHDEMO:
1054#ifdef IEEE80211_SUPPORT_TDMA
1055 if (vap->iv_caps & IEEE80211_C_TDMA) {
1056 sc->sc_tdma = 1;
1057 /* NB: disable tsf adjust */
1058 sc->sc_stagbeacons = 0;
1059 }
1060 /*
1061 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1062 * just ap mode.
1063 */
1064 /* fall thru... */
1065#endif
1066 case IEEE80211_M_HOSTAP:
1067 case IEEE80211_M_MBSS:
1068 sc->sc_opmode = HAL_M_HOSTAP;
1069 break;
1070 case IEEE80211_M_MONITOR:
1071 sc->sc_opmode = HAL_M_MONITOR;
1072 break;
1073 default:
1074 /* XXX should not happen */
1075 break;
1076 }
1077 if (sc->sc_hastsfadd) {
1078 /*
1079 * Configure whether or not TSF adjust should be done.
1080 */
1081 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1082 }
1083 if (flags & IEEE80211_CLONE_NOBEACONS) {
1084 /*
1085 * Enable s/w beacon miss handling.
1086 */
1087 sc->sc_swbmiss = 1;
1088 }
1089 ATH_UNLOCK(sc);
193b341d 1090
86877dfb
RP
1091 /* complete setup */
1092 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1093 return vap;
1094bad2:
1095 reclaim_address(sc, mac);
1096 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1097bad:
1098 kfree(avp, M_80211_VAP);
1099 ATH_UNLOCK(sc);
1100 return NULL;
1101}
193b341d 1102
86877dfb
RP
1103static void
1104ath_vap_delete(struct ieee80211vap *vap)
1105{
1106 struct ieee80211com *ic = vap->iv_ic;
1107 struct ifnet *ifp = ic->ic_ifp;
1108 struct ath_softc *sc = ifp->if_softc;
1109 struct ath_hal *ah = sc->sc_ah;
1110 struct ath_vap *avp = ATH_VAP(vap);
193b341d 1111
86877dfb
RP
1112 if (ifp->if_flags & IFF_RUNNING) {
1113 /*
1114 * Quiesce the hardware while we remove the vap. In
1115 * particular we need to reclaim all references to
1116 * the vap state by any frames pending on the tx queues.
1117 */
1118 ath_hal_intrset(ah, 0); /* disable interrupts */
1119 ath_draintxq(sc); /* stop xmit side */
1120 ath_stoprecv(sc); /* stop recv side */
193b341d
SZ
1121 }
1122
86877dfb
RP
1123 ieee80211_vap_detach(vap);
1124 ATH_LOCK(sc);
1125 /*
1126 * Reclaim beacon state. Note this must be done before
1127 * the vap instance is reclaimed as we may have a reference
1128 * to it in the buffer for the beacon frame.
1129 */
1130 if (avp->av_bcbuf != NULL) {
1131 if (avp->av_bslot != -1) {
1132 sc->sc_bslot[avp->av_bslot] = NULL;
1133 sc->sc_nbcnvaps--;
1134 }
1135 ath_beacon_return(sc, avp->av_bcbuf);
1136 avp->av_bcbuf = NULL;
1137 if (sc->sc_nbcnvaps == 0) {
1138 sc->sc_stagbeacons = 0;
1139 if (sc->sc_hastsfadd)
1140 ath_hal_settsfadjust(sc->sc_ah, 0);
1141 }
1142 /*
1143 * Reclaim any pending mcast frames for the vap.
1144 */
1145 ath_tx_draintxq(sc, &avp->av_mcastq);
1146 ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1147 }
1148 /*
1149 * Update bookkeeping.
1150 */
1151 if (vap->iv_opmode == IEEE80211_M_STA) {
1152 sc->sc_nstavaps--;
1153 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1154 sc->sc_swbmiss = 0;
1155 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1156 vap->iv_opmode == IEEE80211_M_MBSS) {
1157 reclaim_address(sc, vap->iv_myaddr);
1158 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1159 if (vap->iv_opmode == IEEE80211_M_MBSS)
1160 sc->sc_nmeshvaps--;
1161 }
1162 if (vap->iv_opmode != IEEE80211_M_WDS)
1163 sc->sc_nvaps--;
1164#ifdef IEEE80211_SUPPORT_TDMA
1165 /* TDMA operation ceases when the last vap is destroyed */
1166 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1167 sc->sc_tdma = 0;
1168 sc->sc_swbmiss = 0;
1169 }
1170#endif
1171 ATH_UNLOCK(sc);
1172 kfree(avp, M_80211_VAP);
193b341d 1173
86877dfb
RP
1174 if (ifp->if_flags & IFF_RUNNING) {
1175 /*
1176 * Restart rx+tx machines if still running (RUNNING will
1177 * be reset if we just destroyed the last vap).
1178 */
1179 if (ath_startrecv(sc) != 0)
1180 if_printf(ifp, "%s: unable to restart recv logic\n",
1181 __func__);
1182 if (sc->sc_beacons) { /* restart beacons */
1183#ifdef IEEE80211_SUPPORT_TDMA
1184 if (sc->sc_tdma)
1185 ath_tdma_config(sc, NULL);
1186 else
1187#endif
1188 ath_beacon_config(sc, NULL);
1189 }
1190 ath_hal_intrset(ah, sc->sc_imask);
1191 }
193b341d
SZ
1192}
1193
1194void
1195ath_suspend(struct ath_softc *sc)
1196{
86877dfb
RP
1197 struct ifnet *ifp = sc->sc_ifp;
1198 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
1199
1200 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1201 __func__, ifp->if_flags);
193b341d 1202
86877dfb
RP
1203 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1204 if (ic->ic_opmode == IEEE80211_M_STA)
1205 ath_stop(ifp);
1206 else
1207 ieee80211_suspend_all(ic);
1208 /*
1209 * NB: don't worry about putting the chip in low power
1210 * mode; pci will power off our socket on suspend and
1211 * CardBus detaches the device.
1212 */
1213}
1214
1215/*
1216 * Reset the key cache since some parts do not reset the
1217 * contents on resume. First we clear all entries, then
1218 * re-load keys that the 802.11 layer assumes are setup
1219 * in h/w.
1220 */
1221static void
1222ath_reset_keycache(struct ath_softc *sc)
1223{
1224 struct ifnet *ifp = sc->sc_ifp;
1225 struct ieee80211com *ic = ifp->if_l2com;
1226 struct ath_hal *ah = sc->sc_ah;
1227 int i;
1228
1229 for (i = 0; i < sc->sc_keymax; i++)
1230 ath_hal_keyreset(ah, i);
1231 ieee80211_crypto_reload_keys(ic);
193b341d
SZ
1232}
1233
1234void
1235ath_resume(struct ath_softc *sc)
1236{
86877dfb
RP
1237 struct ifnet *ifp = sc->sc_ifp;
1238 struct ieee80211com *ic = ifp->if_l2com;
1239 struct ath_hal *ah = sc->sc_ah;
1240 HAL_STATUS status;
193b341d
SZ
1241
1242 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1243 __func__, ifp->if_flags);
1244
86877dfb
RP
1245 /*
1246 * Must reset the chip before we reload the
1247 * keycache as we were powered down on suspend.
1248 */
1249 ath_hal_reset(ah, sc->sc_opmode,
1250 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1251 AH_FALSE, &status);
1252 ath_reset_keycache(sc);
1253 if (sc->sc_resume_up) {
1254 if (ic->ic_opmode == IEEE80211_M_STA) {
1255 ath_init(sc);
1256 /*
1257 * Program the beacon registers using the last rx'd
1258 * beacon frame and enable sync on the next beacon
1259 * we see. This should handle the case where we
1260 * wakeup and find the same AP and also the case where
1261 * we wakeup and need to roam. For the latter we
1262 * should get bmiss events that trigger a roam.
1263 */
1264 ath_beacon_config(sc, NULL);
1265 sc->sc_syncbeacon = 1;
1266 } else
1267 ieee80211_resume_all(ic);
193b341d
SZ
1268 }
1269 if (sc->sc_softled) {
86877dfb
RP
1270 ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1271 HAL_GPIO_MUX_MAC_NETWORK_LED);
1272 ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
193b341d 1273 }
193b341d
SZ
1274}
1275
1276void
1277ath_shutdown(struct ath_softc *sc)
1278{
86877dfb 1279 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1280
1281 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1282 __func__, ifp->if_flags);
193b341d 1283
86877dfb
RP
1284 ath_stop(ifp);
1285 /* NB: no point powering down chip as we're about to reboot */
193b341d
SZ
1286}
1287
1288/*
1289 * Interrupt handler. Most of the actual processing is deferred.
1290 */
1291void
1292ath_intr(void *arg)
1293{
1294 struct ath_softc *sc = arg;
86877dfb 1295 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1296 struct ath_hal *ah = sc->sc_ah;
1297 HAL_INT status;
1298
1299 if (sc->sc_invalid) {
1300 /*
1301 * The hardware is not ready/present, don't touch anything.
1302 * Note this can happen early on if the IRQ is shared.
1303 */
1304 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1305 return;
1306 }
1307 if (!ath_hal_intrpend(ah)) /* shared irq, not for us */
1308 return;
86877dfb
RP
1309 if ((ifp->if_flags & IFF_UP) == 0 ||
1310 (ifp->if_flags & IFF_RUNNING) == 0) {
1311 HAL_INT status;
1312
193b341d
SZ
1313 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1314 __func__, ifp->if_flags);
1315 ath_hal_getisr(ah, &status); /* clear ISR */
1316 ath_hal_intrset(ah, 0); /* disable further intr's */
1317 return;
1318 }
1319 /*
1320 * Figure out the reason(s) for the interrupt. Note
1321 * that the hal returns a pseudo-ISR that may include
1322 * bits we haven't explicitly enabled so we mask the
1323 * value to insure we only process bits we requested.
1324 */
1325 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1326 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1327 status &= sc->sc_imask; /* discard unasked for bits */
1328 if (status & HAL_INT_FATAL) {
193b341d
SZ
1329 sc->sc_stats.ast_hardware++;
1330 ath_hal_intrset(ah, 0); /* disable intr's until reset */
86877dfb 1331 ath_fatal_proc(sc, 0);
193b341d
SZ
1332 } else {
1333 if (status & HAL_INT_SWBA) {
1334 /*
1335 * Software beacon alert--time to send a beacon.
1336 * Handle beacon transmission directly; deferring
1337 * this is too slow to meet timing constraints
1338 * under load.
1339 */
86877dfb
RP
1340#ifdef IEEE80211_SUPPORT_TDMA
1341 if (sc->sc_tdma) {
1342 if (sc->sc_tdmaswba == 0) {
1343 struct ieee80211com *ic = ifp->if_l2com;
1344 struct ieee80211vap *vap =
1345 TAILQ_FIRST(&ic->ic_vaps);
1346 ath_tdma_beacon_send(sc, vap);
1347 sc->sc_tdmaswba =
1348 vap->iv_tdma->tdma_bintval;
1349 } else
1350 sc->sc_tdmaswba--;
1351 } else
1352#endif
1353 {
1354 ath_beacon_proc(sc, 0);
1355#ifdef IEEE80211_SUPPORT_SUPERG
1356 /*
1357 * Schedule the rx taskq in case there's no
1358 * traffic so any frames held on the staging
1359 * queue are aged and potentially flushed.
1360 */
1361 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1362#endif
1363 }
193b341d
SZ
1364 }
1365 if (status & HAL_INT_RXEOL) {
1366 /*
1367 * NB: the hardware should re-read the link when
1368 * RXE bit is written, but it doesn't work at
1369 * least on older hardware revs.
1370 */
1371 sc->sc_stats.ast_rxeol++;
1372 sc->sc_rxlink = NULL;
1373 }
1374 if (status & HAL_INT_TXURN) {
1375 sc->sc_stats.ast_txurn++;
1376 /* bump tx trigger level */
1377 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1378 }
1379 if (status & HAL_INT_RX)
86877dfb 1380 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
193b341d 1381 if (status & HAL_INT_TX)
86877dfb 1382 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
193b341d
SZ
1383 if (status & HAL_INT_BMISS) {
1384 sc->sc_stats.ast_bmiss++;
86877dfb 1385 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
193b341d
SZ
1386 }
1387 if (status & HAL_INT_MIB) {
1388 sc->sc_stats.ast_mib++;
1389 /*
1390 * Disable interrupts until we service the MIB
1391 * interrupt; otherwise it will continue to fire.
1392 */
1393 ath_hal_intrset(ah, 0);
1394 /*
1395 * Let the hal handle the event. We assume it will
1396 * clear whatever condition caused the interrupt.
1397 */
1398 ath_hal_mibevent(ah, &sc->sc_halstats);
1399 ath_hal_intrset(ah, sc->sc_imask);
1400 }
86877dfb
RP
1401 if (status & HAL_INT_RXORN) {
1402 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1403 sc->sc_stats.ast_rxorn++;
1404 }
193b341d
SZ
1405 }
1406}
1407
1408static void
86877dfb 1409ath_fatal_proc(void *arg, int pending)
193b341d 1410{
86877dfb
RP
1411 struct ath_softc *sc = arg;
1412 struct ifnet *ifp = sc->sc_ifp;
1413 u_int32_t *state;
1414 u_int32_t len;
1415 void *sp;
193b341d
SZ
1416
1417 if_printf(ifp, "hardware error; resetting\n");
ed33fa9f
SW
1418 /*
1419 * Fatal errors are unrecoverable. Typically these
1420 * are caused by DMA errors. Collect h/w state from
1421 * the hal so we can diagnose what's going on.
1422 */
86877dfb
RP
1423 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1424 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1425 state = sp;
ed33fa9f
SW
1426 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1427 state[0], state[1] , state[2], state[3],
1428 state[4], state[5]);
1429 }
193b341d
SZ
1430 ath_reset(ifp);
1431}
1432
1433static void
86877dfb 1434ath_bmiss_vap(struct ieee80211vap *vap)
193b341d 1435{
86877dfb
RP
1436 /*
1437 * Workaround phantom bmiss interrupts by sanity-checking
1438 * the time of our last rx'd frame. If it is within the
1439 * beacon miss interval then ignore the interrupt. If it's
1440 * truly a bmiss we'll get another interrupt soon and that'll
1441 * be dispatched up for processing. Note this applies only
1442 * for h/w beacon miss events.
1443 */
1444 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1445 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1446 struct ath_softc *sc = ifp->if_softc;
1447 u_int64_t lastrx = sc->sc_lastrx;
1448 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
193b341d 1449 u_int bmisstimeout =
86877dfb 1450 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
193b341d
SZ
1451
1452 DPRINTF(sc, ATH_DEBUG_BEACON,
1453 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1454 __func__, (unsigned long long) tsf,
1455 (unsigned long long)(tsf - lastrx),
1456 (unsigned long long) lastrx, bmisstimeout);
86877dfb
RP
1457
1458 if (tsf - lastrx <= bmisstimeout) {
193b341d 1459 sc->sc_stats.ast_bmiss_phantom++;
86877dfb
RP
1460 return;
1461 }
193b341d 1462 }
86877dfb
RP
1463 ATH_VAP(vap)->av_bmiss(vap);
1464}
1465
1466static int
1467ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1468{
1469 uint32_t rsize;
1470 void *sp;
1471
1472 if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(mask), &sp, &rsize))
1473 return 0;
1474 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1475 *hangs = *(uint32_t *)sp;
1476 return 1;
193b341d
SZ
1477}
1478
86877dfb
RP
1479static void
1480ath_bmiss_proc(void *arg, int pending)
193b341d 1481{
86877dfb
RP
1482 struct ath_softc *sc = arg;
1483 struct ifnet *ifp = sc->sc_ifp;
1484 uint32_t hangs;
193b341d 1485
86877dfb
RP
1486 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1487
1488 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1489 if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1490 ath_reset(ifp);
1491 } else
1492 ieee80211_beacon_miss(ifp->if_l2com);
1493}
1494
1495/*
1496 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1497 * calcs together with WME. If necessary disable the crypto
1498 * hardware and mark the 802.11 state so keys will be setup
1499 * with the MIC work done in software.
1500 */
1501static void
1502ath_settkipmic(struct ath_softc *sc)
1503{
1504 struct ifnet *ifp = sc->sc_ifp;
1505 struct ieee80211com *ic = ifp->if_l2com;
1506
1507 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1508 if (ic->ic_flags & IEEE80211_F_WME) {
1509 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1510 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1511 } else {
1512 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1513 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1514 }
1515 }
193b341d
SZ
1516}
1517
193b341d
SZ
1518static void
1519ath_init(void *arg)
1520{
86877dfb
RP
1521 struct ath_softc *sc = (struct ath_softc *) arg;
1522 struct ifnet *ifp = sc->sc_ifp;
1523 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
1524 struct ath_hal *ah = sc->sc_ah;
1525 HAL_STATUS status;
1526
193b341d
SZ
1527 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1528 __func__, ifp->if_flags);
1529
86877dfb 1530 ATH_LOCK(sc);
193b341d
SZ
1531 /*
1532 * Stop anything previously setup. This is safe
1533 * whether this is the first time through or not.
1534 */
86877dfb 1535 ath_stop_locked(ifp);
193b341d
SZ
1536
1537 /*
1538 * The basic interface to setting the hardware in a good
1539 * state is ``reset''. On return the hardware is known to
1540 * be powered up and with interrupts disabled. This must
1541 * be followed by initialization of the appropriate bits
1542 * and then setup of the interrupt mask.
1543 */
86877dfb
RP
1544 ath_settkipmic(sc);
1545 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
193b341d
SZ
1546 if_printf(ifp, "unable to reset hardware; hal status %u\n",
1547 status);
86877dfb 1548 ATH_UNLOCK(sc);
193b341d
SZ
1549 return;
1550 }
86877dfb 1551 ath_chan_change(sc, ic->ic_curchan);
193b341d
SZ
1552
1553 /*
193b341d
SZ
1554 * Likewise this is set during reset so update
1555 * state cached in the driver.
1556 */
1557 sc->sc_diversity = ath_hal_getdiversity(ah);
86877dfb
RP
1558 sc->sc_lastlongcal = 0;
1559 sc->sc_resetcal = 1;
1560 sc->sc_lastcalreset = 0;
193b341d
SZ
1561
1562 /*
1563 * Setup the hardware after reset: the key cache
1564 * is filled as needed and the receive engine is
1565 * set going. Frame transmit is handled entirely
1566 * in the frame output path; there's nothing to do
1567 * here except setup the interrupt mask.
1568 */
1569 if (ath_startrecv(sc) != 0) {
1570 if_printf(ifp, "unable to start recv logic\n");
86877dfb 1571 ATH_UNLOCK(sc);
193b341d
SZ
1572 return;
1573 }
1574
1575 /*
1576 * Enable interrupts.
1577 */
1578 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1579 | HAL_INT_RXEOL | HAL_INT_RXORN
1580 | HAL_INT_FATAL | HAL_INT_GLOBAL;
1581 /*
1582 * Enable MIB interrupts when there are hardware phy counters.
1583 * Note we only do this (at the moment) for station mode.
1584 */
1585 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1586 sc->sc_imask |= HAL_INT_MIB;
193b341d
SZ
1587
1588 ifp->if_flags |= IFF_RUNNING;
86877dfb
RP
1589 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1590 ath_hal_intrset(ah, sc->sc_imask);
193b341d 1591
86877dfb
RP
1592 ATH_UNLOCK(sc);
1593
1594#ifdef ATH_TX99_DIAG
1595 if (sc->sc_tx99 != NULL)
1596 sc->sc_tx99->start(sc->sc_tx99);
1597 else
1598#endif
1599 ieee80211_start_all(ic); /* start all vap's */
193b341d
SZ
1600}
1601
1602static void
86877dfb 1603ath_stop_locked(struct ifnet *ifp)
193b341d
SZ
1604{
1605 struct ath_softc *sc = ifp->if_softc;
193b341d
SZ
1606 struct ath_hal *ah = sc->sc_ah;
1607
193b341d
SZ
1608 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1609 __func__, sc->sc_invalid, ifp->if_flags);
1610
86877dfb 1611 ATH_LOCK_ASSERT(sc);
193b341d
SZ
1612 if (ifp->if_flags & IFF_RUNNING) {
1613 /*
1614 * Shutdown the hardware and driver:
1615 * reset 802.11 state machine
1616 * turn off timers
1617 * disable interrupts
1618 * turn off the radio
1619 * clear transmit machinery
1620 * clear receive machinery
1621 * drain and release tx queues
1622 * reclaim beacon resources
1623 * power down hardware
1624 *
1625 * Note that some of this work is not possible if the
1626 * hardware is gone (invalid).
1627 */
86877dfb
RP
1628#ifdef ATH_TX99_DIAG
1629 if (sc->sc_tx99 != NULL)
1630 sc->sc_tx99->stop(sc->sc_tx99);
1631#endif
1632 callout_stop(&sc->sc_wd_ch);
1633 sc->sc_wd_timer = 0;
193b341d 1634 ifp->if_flags &= ~IFF_RUNNING;
193b341d
SZ
1635 if (!sc->sc_invalid) {
1636 if (sc->sc_softled) {
1637 callout_stop(&sc->sc_ledtimer);
1638 ath_hal_gpioset(ah, sc->sc_ledpin,
1639 !sc->sc_ledon);
1640 sc->sc_blinking = 0;
1641 }
1642 ath_hal_intrset(ah, 0);
1643 }
1644 ath_draintxq(sc);
1645 if (!sc->sc_invalid) {
1646 ath_stoprecv(sc);
1647 ath_hal_phydisable(ah);
1648 } else
1649 sc->sc_rxlink = NULL;
86877dfb 1650 ath_beacon_free(sc); /* XXX not needed */
193b341d
SZ
1651 }
1652}
1653
1654static void
1655ath_stop(struct ifnet *ifp)
1656{
1657 struct ath_softc *sc = ifp->if_softc;
1658
86877dfb
RP
1659 ATH_LOCK(sc);
1660 ath_stop_locked(ifp);
1661 ATH_UNLOCK(sc);
193b341d
SZ
1662}
1663
1664/*
1665 * Reset the hardware w/o losing operational state. This is
1666 * basically a more efficient way of doing ath_stop, ath_init,
1667 * followed by state transitions to the current 802.11
1668 * operational state. Used to recover from various errors and
1669 * to reset or reload hardware state.
1670 */
1671static int
1672ath_reset(struct ifnet *ifp)
1673{
1674 struct ath_softc *sc = ifp->if_softc;
86877dfb 1675 struct ieee80211com *ic = ifp->if_l2com;
193b341d 1676 struct ath_hal *ah = sc->sc_ah;
193b341d
SZ
1677 HAL_STATUS status;
1678
193b341d
SZ
1679 ath_hal_intrset(ah, 0); /* disable interrupts */
1680 ath_draintxq(sc); /* stop xmit side */
1681 ath_stoprecv(sc); /* stop recv side */
86877dfb 1682 ath_settkipmic(sc); /* configure TKIP MIC handling */
193b341d 1683 /* NB: indicate channel change so we do a full reset */
86877dfb 1684 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
193b341d
SZ
1685 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1686 __func__, status);
193b341d 1687 sc->sc_diversity = ath_hal_getdiversity(ah);
86877dfb
RP
1688 if (ath_startrecv(sc) != 0) /* restart recv */
1689 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
193b341d
SZ
1690 /*
1691 * We may be doing a reset in response to an ioctl
1692 * that changes the channel so update any state that
1693 * might change as a result.
1694 */
86877dfb
RP
1695 ath_chan_change(sc, ic->ic_curchan);
1696 if (sc->sc_beacons) { /* restart beacons */
1697#ifdef IEEE80211_SUPPORT_TDMA
1698 if (sc->sc_tdma)
1699 ath_tdma_config(sc, NULL);
1700 else
1701#endif
1702 ath_beacon_config(sc, NULL);
1703 }
193b341d
SZ
1704 ath_hal_intrset(ah, sc->sc_imask);
1705
86877dfb 1706 ath_start(ifp); /* restart xmit */
193b341d
SZ
1707 return 0;
1708}
1709
86877dfb
RP
1710static int
1711ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
193b341d 1712{
86877dfb
RP
1713 struct ieee80211com *ic = vap->iv_ic;
1714 struct ifnet *ifp = ic->ic_ifp;
193b341d
SZ
1715 struct ath_softc *sc = ifp->if_softc;
1716 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
1717
1718 switch (cmd) {
1719 case IEEE80211_IOC_TXPOWER:
1720 /*
1721 * If per-packet TPC is enabled, then we have nothing
1722 * to do; otherwise we need to force the global limit.
1723 * All this can happen directly; no need to reset.
1724 */
1725 if (!ath_hal_gettpc(ah))
1726 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1727 return 0;
1728 }
1729 return ath_reset(ifp);
1730}
1731
1732static struct ath_buf *
1733_ath_getbuf_locked(struct ath_softc *sc)
1734{
1735 struct ath_buf *bf;
1736
1737 ATH_TXBUF_LOCK_ASSERT(sc);
1738
1739 bf = STAILQ_FIRST(&sc->sc_txbuf);
1740 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1741 STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1742 else
1743 bf = NULL;
1744 if (bf == NULL) {
1745 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1746 STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1747 "out of xmit buffers" : "xmit buffer busy");
1748 }
1749 return bf;
1750}
1751
1752static struct ath_buf *
1753ath_getbuf(struct ath_softc *sc)
1754{
193b341d 1755 struct ath_buf *bf;
86877dfb
RP
1756
1757 ATH_TXBUF_LOCK(sc);
1758 bf = _ath_getbuf_locked(sc);
1759 if (bf == NULL) {
1760 struct ifnet *ifp = sc->sc_ifp;
1761
1762 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
1763 sc->sc_stats.ast_tx_qstop++;
1764 ifp->if_flags |= IFF_OACTIVE;
1765 }
1766 ATH_TXBUF_UNLOCK(sc);
1767 return bf;
1768}
1769
1770/*
1771 * Cleanup driver resources when we run out of buffers
1772 * while processing fragments; return the tx buffers
1773 * allocated and drop node references.
1774 */
1775static void
1776ath_txfrag_cleanup(struct ath_softc *sc,
1777 ath_bufhead *frags, struct ieee80211_node *ni)
1778{
1779 struct ath_buf *bf, *next;
1780
1781 ATH_TXBUF_LOCK_ASSERT(sc);
1782
1783 STAILQ_FOREACH_MUTABLE(bf, frags, bf_list, next) {
1784 /* NB: bf assumed clean */
1785 STAILQ_REMOVE_HEAD(frags, bf_list);
1786 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1787 ieee80211_node_decref(ni);
1788 }
1789}
1790
1791/*
1792 * Setup xmit of a fragmented frame. Allocate a buffer
1793 * for each frag and bump the node reference count to
1794 * reflect the held reference to be setup by ath_tx_start.
1795 */
1796static int
1797ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
1798 struct mbuf *m0, struct ieee80211_node *ni)
1799{
193b341d 1800 struct mbuf *m;
86877dfb 1801 struct ath_buf *bf;
193b341d 1802
86877dfb
RP
1803 ATH_TXBUF_LOCK(sc);
1804 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
1805 bf = _ath_getbuf_locked(sc);
1806 if (bf == NULL) { /* out of buffers, cleanup */
1807 ath_txfrag_cleanup(sc, frags, ni);
1808 break;
1809 }
1810 ieee80211_node_incref(ni);
1811 STAILQ_INSERT_TAIL(frags, bf, bf_list);
9db4b353 1812 }
86877dfb 1813 ATH_TXBUF_UNLOCK(sc);
9db4b353 1814
86877dfb
RP
1815 return !STAILQ_EMPTY(frags);
1816}
1817
1818static void
1819ath_start(struct ifnet *ifp)
1820{
1821 struct ath_softc *sc = ifp->if_softc;
1822 struct ieee80211_node *ni;
1823 struct ath_buf *bf;
1824 struct mbuf *m, *next;
1825 ath_bufhead frags;
193b341d 1826
2508f206
RP
1827 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
1828 ifq_purge(&ifp->if_snd);
86877dfb 1829 return;
2508f206 1830 }
193b341d
SZ
1831 for (;;) {
1832 /*
1833 * Grab a TX buffer and associated resources.
1834 */
86877dfb
RP
1835 bf = ath_getbuf(sc);
1836 if (bf == NULL)
1837 break;
1838
1839 IF_DEQUEUE(&ifp->if_snd, m);
1840 if (m == NULL) {
1841 ATH_TXBUF_LOCK(sc);
1842 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1843 ATH_TXBUF_UNLOCK(sc);
193b341d
SZ
1844 break;
1845 }
86877dfb 1846 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
193b341d 1847 /*
86877dfb
RP
1848 * Check for fragmentation. If this frame
1849 * has been broken up verify we have enough
1850 * buffers to send all the fragments so all
1851 * go out or none...
193b341d 1852 */
86877dfb
RP
1853 STAILQ_INIT(&frags);
1854 if ((m->m_flags & M_FRAG) &&
1855 !ath_txfrag_setup(sc, &frags, m, ni)) {
1856 DPRINTF(sc, ATH_DEBUG_XMIT,
1857 "%s: out of txfrag buffers\n", __func__);
1858 sc->sc_stats.ast_tx_nofrag++;
1859 ifp->if_oerrors++;
1860 ath_freetx(m);
1861 goto bad;
193b341d 1862 }
86877dfb
RP
1863 ifp->if_opackets++;
1864 nextfrag:
1865 /*
1866 * Pass the frame to the h/w for transmission.
1867 * Fragmented frames have each frag chained together
1868 * with m_nextpkt. We know there are sufficient ath_buf's
1869 * to send all the frags because of work done by
1870 * ath_txfrag_setup. We leave m_nextpkt set while
1871 * calling ath_tx_start so it can use it to extend the
1872 * the tx duration to cover the subsequent frag and
1873 * so it can reclaim all the mbufs in case of an error;
1874 * ath_tx_start clears m_nextpkt once it commits to
1875 * handing the frame to the hardware.
1876 */
1877 next = m->m_nextpkt;
193b341d 1878 if (ath_tx_start(sc, ni, bf, m)) {
86877dfb 1879 bad:
193b341d 1880 ifp->if_oerrors++;
86877dfb
RP
1881 reclaim:
1882 bf->bf_m = NULL;
1883 bf->bf_node = NULL;
1884 ATH_TXBUF_LOCK(sc);
1885 STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1886 ath_txfrag_cleanup(sc, &frags, ni);
1887 ATH_TXBUF_UNLOCK(sc);
193b341d
SZ
1888 if (ni != NULL)
1889 ieee80211_free_node(ni);
1890 continue;
1891 }
86877dfb
RP
1892 if (next != NULL) {
1893 /*
1894 * Beware of state changing between frags.
1895 * XXX check sta power-save state?
1896 */
1897 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
1898 DPRINTF(sc, ATH_DEBUG_XMIT,
1899 "%s: flush fragmented packet, state %s\n",
1900 __func__,
1901 ieee80211_state_name[ni->ni_vap->iv_state]);
1902 ath_freetx(next);
1903 goto reclaim;
1904 }
1905 m = next;
1906 bf = STAILQ_FIRST(&frags);
1907 KASSERT(bf != NULL, ("no buf for txfrag"));
1908 STAILQ_REMOVE_HEAD(&frags, bf_list);
1909 goto nextfrag;
1910 }
193b341d 1911
86877dfb 1912 sc->sc_wd_timer = 5;
193b341d
SZ
1913 }
1914}
1915
1916static int
1917ath_media_change(struct ifnet *ifp)
1918{
86877dfb
RP
1919 int error = ieee80211_media_change(ifp);
1920 /* NB: only the fixed rate can change and that doesn't need a reset */
1921 return (error == ENETRESET ? 0 : error);
193b341d
SZ
1922}
1923
1924#ifdef ATH_DEBUG
1925static void
8982d733 1926ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
86877dfb 1927 const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d
SZ
1928{
1929 static const char *ciphers[] = {
1930 "WEP",
1931 "AES-OCB",
1932 "AES-CCM",
1933 "CKIP",
1934 "TKIP",
1935 "CLR",
1936 };
1937 int i, n;
1938
2508f206 1939 kprintf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
193b341d 1940 for (i = 0, n = hk->kv_len; i < n; i++)
2508f206
RP
1941 kprintf("%02x", hk->kv_val[i]);
1942 kprintf(" mac %s", ether_sprintf(mac));
193b341d 1943 if (hk->kv_type == HAL_CIPHER_TKIP) {
2508f206 1944 kprintf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
193b341d 1945 for (i = 0; i < sizeof(hk->kv_mic); i++)
2508f206 1946 kprintf("%02x", hk->kv_mic[i]);
8982d733 1947 if (!sc->sc_splitmic) {
2508f206 1948 kprintf(" txmic ");
8982d733 1949 for (i = 0; i < sizeof(hk->kv_txmic); i++)
2508f206 1950 kprintf("%02x", hk->kv_txmic[i]);
8982d733 1951 }
193b341d 1952 }
2508f206 1953 kprintf("\n");
193b341d
SZ
1954}
1955#endif
1956
1957/*
1958 * Set a TKIP key into the hardware. This handles the
1959 * potential distribution of key state to multiple key
1960 * cache slots for TKIP.
1961 */
1962static int
1963ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
86877dfb 1964 HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d
SZ
1965{
1966#define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
86877dfb 1967 static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
193b341d
SZ
1968 struct ath_hal *ah = sc->sc_ah;
1969
1970 KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
1971 ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
193b341d 1972 if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
8982d733
SZ
1973 if (sc->sc_splitmic) {
1974 /*
1975 * TX key goes at first index, RX key at the rx index.
1976 * The hal handles the MIC keys at index+64.
1977 */
1978 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
1979 KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
1980 if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
1981 return 0;
1982
1983 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1984 KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
1985 /* XXX delete tx key on failure? */
1986 return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
1987 } else {
1988 /*
1989 * Room for both TX+RX MIC keys in one key cache
1990 * slot, just set key at the first index; the hal
86877dfb 1991 * will handle the rest.
8982d733
SZ
1992 */
1993 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
8982d733 1994 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
8982d733
SZ
1995 KEYPRINTF(sc, k->wk_keyix, hk, mac);
1996 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1997 }
86877dfb
RP
1998 } else if (k->wk_flags & IEEE80211_KEY_XMIT) {
1999 if (sc->sc_splitmic) {
2000 /*
2001 * NB: must pass MIC key in expected location when
2002 * the keycache only holds one MIC key per entry.
2003 */
2004 memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
2005 } else
2006 memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
2007 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2008 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2009 } else if (k->wk_flags & IEEE80211_KEY_RECV) {
2010 memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
193b341d
SZ
2011 KEYPRINTF(sc, k->wk_keyix, hk, mac);
2012 return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
2013 }
2014 return 0;
2015#undef IEEE80211_KEY_XR
2016}
2017
2018/*
2019 * Set a net80211 key into the hardware. This handles the
2020 * potential distribution of key state to multiple key
2021 * cache slots for TKIP with hardware MIC support.
2022 */
2023static int
2024ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
193b341d
SZ
2025 struct ieee80211_node *bss)
2026{
2027#define N(a) (sizeof(a)/sizeof(a[0]))
86877dfb 2028 static const u_int8_t ciphermap[] = {
193b341d
SZ
2029 HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
2030 HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
2031 HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
2032 HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
86877dfb 2033 (u_int8_t) -1, /* 4 is not allocated */
193b341d
SZ
2034 HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
2035 HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
2036 };
2037 struct ath_hal *ah = sc->sc_ah;
2038 const struct ieee80211_cipher *cip = k->wk_cipher;
86877dfb
RP
2039 u_int8_t gmac[IEEE80211_ADDR_LEN];
2040 const u_int8_t *mac;
193b341d
SZ
2041 HAL_KEYVAL hk;
2042
2043 memset(&hk, 0, sizeof(hk));
2044 /*
2045 * Software crypto uses a "clear key" so non-crypto
2046 * state kept in the key cache are maintained and
2047 * so that rx frames have an entry to match.
2048 */
2049 if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2050 KASSERT(cip->ic_cipher < N(ciphermap),
2051 ("invalid cipher type %u", cip->ic_cipher));
2052 hk.kv_type = ciphermap[cip->ic_cipher];
2053 hk.kv_len = k->wk_keylen;
2054 memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2055 } else
2056 hk.kv_type = HAL_CIPHER_CLR;
2057
2058 if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2059 /*
2060 * Group keys on hardware that supports multicast frame
86877dfb 2061 * key search use a MAC that is the sender's address with
193b341d
SZ
2062 * the high bit set instead of the app-specified address.
2063 */
2064 IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2065 gmac[0] |= 0x80;
2066 mac = gmac;
2067 } else
86877dfb 2068 mac = k->wk_macaddr;
193b341d
SZ
2069
2070 if (hk.kv_type == HAL_CIPHER_TKIP &&
8982d733 2071 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
193b341d
SZ
2072 return ath_keyset_tkip(sc, k, &hk, mac);
2073 } else {
2074 KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2075 return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2076 }
2077#undef N
2078}
2079
2080/*
2081 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2082 * each key, one for decrypt/encrypt and the other for the MIC.
2083 */
86877dfb 2084static u_int16_t
193b341d
SZ
2085key_alloc_2pair(struct ath_softc *sc,
2086 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2087{
2088#define N(a) (sizeof(a)/sizeof(a[0]))
2089 u_int i, keyix;
2090
2091 KASSERT(sc->sc_splitmic, ("key cache !split"));
2092 /* XXX could optimize */
2093 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
86877dfb 2094 u_int8_t b = sc->sc_keymap[i];
193b341d
SZ
2095 if (b != 0xff) {
2096 /*
2097 * One or more slots in this byte are free.
2098 */
2099 keyix = i*NBBY;
2100 while (b & 1) {
2101 again:
2102 keyix++;
2103 b >>= 1;
2104 }
2105 /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2106 if (isset(sc->sc_keymap, keyix+32) ||
2107 isset(sc->sc_keymap, keyix+64) ||
2108 isset(sc->sc_keymap, keyix+32+64)) {
2109 /* full pair unavailable */
2110 /* XXX statistic */
2111 if (keyix == (i+1)*NBBY) {
2112 /* no slots were appropriate, advance */
2113 continue;
2114 }
2115 goto again;
2116 }
2117 setbit(sc->sc_keymap, keyix);
2118 setbit(sc->sc_keymap, keyix+64);
2119 setbit(sc->sc_keymap, keyix+32);
2120 setbit(sc->sc_keymap, keyix+32+64);
2121 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2122 "%s: key pair %u,%u %u,%u\n",
2123 __func__, keyix, keyix+64,
2124 keyix+32, keyix+32+64);
2125 *txkeyix = keyix;
2126 *rxkeyix = keyix+32;
2127 return 1;
2128 }
2129 }
2130 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2131 return 0;
2132#undef N
2133}
2134
2135/*
8982d733
SZ
2136 * Allocate tx/rx key slots for TKIP. We allocate two slots for
2137 * each key, one for decrypt/encrypt and the other for the MIC.
2138 */
86877dfb 2139static u_int16_t
8982d733
SZ
2140key_alloc_pair(struct ath_softc *sc,
2141 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2142{
2143#define N(a) (sizeof(a)/sizeof(a[0]))
2144 u_int i, keyix;
2145
2146 KASSERT(!sc->sc_splitmic, ("key cache split"));
2147 /* XXX could optimize */
2148 for (i = 0; i < N(sc->sc_keymap)/4; i++) {
86877dfb 2149 u_int8_t b = sc->sc_keymap[i];
8982d733
SZ
2150 if (b != 0xff) {
2151 /*
2152 * One or more slots in this byte are free.
2153 */
2154 keyix = i*NBBY;
2155 while (b & 1) {
2156 again:
2157 keyix++;
2158 b >>= 1;
2159 }
2160 if (isset(sc->sc_keymap, keyix+64)) {
2161 /* full pair unavailable */
2162 /* XXX statistic */
2163 if (keyix == (i+1)*NBBY) {
2164 /* no slots were appropriate, advance */
2165 continue;
2166 }
2167 goto again;
2168 }
2169 setbit(sc->sc_keymap, keyix);
2170 setbit(sc->sc_keymap, keyix+64);
2171 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2172 "%s: key pair %u,%u\n",
2173 __func__, keyix, keyix+64);
2174 *txkeyix = *rxkeyix = keyix;
2175 return 1;
2176 }
2177 }
2178 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2179 return 0;
2180#undef N
2181}
2182
2183/*
193b341d
SZ
2184 * Allocate a single key cache slot.
2185 */
2186static int
2187key_alloc_single(struct ath_softc *sc,
2188 ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2189{
2190#define N(a) (sizeof(a)/sizeof(a[0]))
2191 u_int i, keyix;
2192
2193 /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2194 for (i = 0; i < N(sc->sc_keymap); i++) {
86877dfb 2195 u_int8_t b = sc->sc_keymap[i];
193b341d
SZ
2196 if (b != 0xff) {
2197 /*
2198 * One or more slots are free.
2199 */
2200 keyix = i*NBBY;
2201 while (b & 1)
2202 keyix++, b >>= 1;
2203 setbit(sc->sc_keymap, keyix);
2204 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2205 __func__, keyix);
2206 *txkeyix = *rxkeyix = keyix;
2207 return 1;
2208 }
2209 }
2210 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2211 return 0;
2212#undef N
2213}
2214
2215/*
2216 * Allocate one or more key cache slots for a uniacst key. The
2217 * key itself is needed only to identify the cipher. For hardware
2218 * TKIP with split cipher+MIC keys we allocate two key cache slot
2219 * pairs so that we can setup separate TX and RX MIC keys. Note
2220 * that the MIC key for a TKIP key at slot i is assumed by the
2221 * hardware to be at slot i+64. This limits TKIP keys to the first
2222 * 64 entries.
2223 */
2224static int
86877dfb 2225ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
193b341d
SZ
2226 ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2227{
86877dfb 2228 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d
SZ
2229
2230 /*
2231 * Group key allocation must be handled specially for
2232 * parts that do not support multicast key cache search
2233 * functionality. For those parts the key id must match
2234 * the h/w key index so lookups find the right key. On
2235 * parts w/ the key search facility we install the sender's
2236 * mac address (with the high bit set) and let the hardware
2237 * find the key w/o using the key id. This is preferred as
2238 * it permits us to support multiple users for adhoc and/or
2239 * multi-station operation.
2240 */
86877dfb
RP
2241 if (k->wk_keyix != IEEE80211_KEYIX_NONE) {
2242 /*
2243 * Only global keys should have key index assigned.
2244 */
2245 if (!(&vap->iv_nw_keys[0] <= k &&
2246 k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
193b341d
SZ
2247 /* should not happen */
2248 DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2249 "%s: bogus group key\n", __func__);
2250 return 0;
2251 }
86877dfb
RP
2252 if (vap->iv_opmode != IEEE80211_M_HOSTAP ||
2253 !(k->wk_flags & IEEE80211_KEY_GROUP) ||
2254 !sc->sc_mcastkey) {
2255 /*
2256 * XXX we pre-allocate the global keys so
2257 * have no way to check if they've already
2258 * been allocated.
2259 */
2260 *keyix = *rxkeyix = k - vap->iv_nw_keys;
2261 return 1;
2262 }
193b341d 2263 /*
86877dfb 2264 * Group key and device supports multicast key search.
193b341d 2265 */
86877dfb 2266 k->wk_keyix = IEEE80211_KEYIX_NONE;
193b341d
SZ
2267 }
2268
2269 /*
2270 * We allocate two pair for TKIP when using the h/w to do
2271 * the MIC. For everything else, including software crypto,
2272 * we allocate a single entry. Note that s/w crypto requires
2273 * a pass-through slot on the 5211 and 5212. The 5210 does
2274 * not support pass-through cache entries and we map all
2275 * those requests to slot 0.
2276 */
2277 if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2278 return key_alloc_single(sc, keyix, rxkeyix);
2279 } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
8982d733
SZ
2280 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2281 if (sc->sc_splitmic)
2282 return key_alloc_2pair(sc, keyix, rxkeyix);
2283 else
2284 return key_alloc_pair(sc, keyix, rxkeyix);
193b341d
SZ
2285 } else {
2286 return key_alloc_single(sc, keyix, rxkeyix);
2287 }
2288}
2289
2290/*
2291 * Delete an entry in the key cache allocated by ath_key_alloc.
2292 */
2293static int
86877dfb 2294ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
193b341d 2295{
86877dfb 2296 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d
SZ
2297 struct ath_hal *ah = sc->sc_ah;
2298 const struct ieee80211_cipher *cip = k->wk_cipher;
2299 u_int keyix = k->wk_keyix;
2300
2301 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2302
2303 ath_hal_keyreset(ah, keyix);
2304 /*
2305 * Handle split tx/rx keying required for TKIP with h/w MIC.
2306 */
2307 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2308 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2309 ath_hal_keyreset(ah, keyix+32); /* RX key */
2310 if (keyix >= IEEE80211_WEP_NKID) {
2311 /*
2312 * Don't touch keymap entries for global keys so
2313 * they are never considered for dynamic allocation.
2314 */
2315 clrbit(sc->sc_keymap, keyix);
2316 if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
8982d733 2317 (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
193b341d 2318 clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */
8982d733
SZ
2319 if (sc->sc_splitmic) {
2320 /* +32 for RX key, +32+64 for RX key MIC */
2321 clrbit(sc->sc_keymap, keyix+32);
2322 clrbit(sc->sc_keymap, keyix+32+64);
2323 }
193b341d
SZ
2324 }
2325 }
2326 return 1;
2327}
2328
2329/*
2330 * Set the key cache contents for the specified key. Key cache
2331 * slot(s) must already have been allocated by ath_key_alloc.
2332 */
2333static int
86877dfb
RP
2334ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2335 const u_int8_t mac[IEEE80211_ADDR_LEN])
193b341d 2336{
86877dfb 2337 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
193b341d 2338
86877dfb 2339 return ath_keyset(sc, k, vap->iv_bss);
193b341d
SZ
2340}
2341
2342/*
2343 * Block/unblock tx+rx processing while a key change is done.
2344 * We assume the caller serializes key management operations
2345 * so we only need to worry about synchronization with other
2346 * uses that originate in the driver.
2347 */
2348static void
86877dfb 2349ath_key_update_begin(struct ieee80211vap *vap)
193b341d 2350{
86877dfb 2351 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
2352 struct ath_softc *sc = ifp->if_softc;
2353
2354 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
86877dfb 2355 taskqueue_block(sc->sc_tq);
193b341d 2356 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
193b341d
SZ
2357}
2358
2359static void
86877dfb 2360ath_key_update_end(struct ieee80211vap *vap)
193b341d 2361{
86877dfb 2362 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
2363 struct ath_softc *sc = ifp->if_softc;
2364
2365 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
193b341d 2366 IF_UNLOCK(&ifp->if_snd);
86877dfb 2367 taskqueue_unblock(sc->sc_tq);
193b341d
SZ
2368}
2369
2370/*
2371 * Calculate the receive filter according to the
2372 * operating mode and state:
2373 *
2374 * o always accept unicast, broadcast, and multicast traffic
86877dfb
RP
2375 * o accept PHY error frames when hardware doesn't have MIB support
2376 * to count and we need them for ANI (sta mode only until recently)
2377 * and we are not scanning (ANI is disabled)
2378 * NB: older hal's add rx filter bits out of sight and we need to
2379 * blindly preserve them
193b341d 2380 * o probe request frames are accepted only when operating in
86877dfb
RP
2381 * hostap, adhoc, mesh, or monitor modes
2382 * o enable promiscuous mode
2383 * - when in monitor mode
2384 * - if interface marked PROMISC (assumes bridge setting is filtered)
193b341d 2385 * o accept beacons:
193b341d
SZ
2386 * - when operating in station mode for collecting rssi data when
2387 * the station is otherwise quiet, or
86877dfb
RP
2388 * - when operating in adhoc mode so the 802.11 layer creates
2389 * node table entries for peers,
193b341d 2390 * - when scanning
86877dfb
RP
2391 * - when doing s/w beacon miss (e.g. for ap+sta)
2392 * - when operating in ap mode in 11g to detect overlapping bss that
2393 * require protection
2394 * - when operating in mesh mode to detect neighbors
ed33fa9f
SW
2395 * o accept control frames:
2396 * - when in monitor mode
86877dfb
RP
2397 * XXX BAR frames for 11n
2398 * XXX HT protection for 11n
193b341d 2399 */
86877dfb
RP
2400static u_int32_t
2401ath_calcrxfilter(struct ath_softc *sc)
193b341d 2402{
86877dfb
RP
2403 struct ifnet *ifp = sc->sc_ifp;
2404 struct ieee80211com *ic = ifp->if_l2com;
2405 u_int32_t rfilt;
193b341d 2406
86877dfb
RP
2407 rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2408 if (!sc->sc_needmib && !sc->sc_scanning)
2409 rfilt |= HAL_RX_FILTER_PHYERR;
193b341d
SZ
2410 if (ic->ic_opmode != IEEE80211_M_STA)
2411 rfilt |= HAL_RX_FILTER_PROBEREQ;
86877dfb
RP
2412 /* XXX ic->ic_monvaps != 0? */
2413 if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
193b341d
SZ
2414 rfilt |= HAL_RX_FILTER_PROM;
2415 if (ic->ic_opmode == IEEE80211_M_STA ||
2416 ic->ic_opmode == IEEE80211_M_IBSS ||
86877dfb
RP
2417 sc->sc_swbmiss || sc->sc_scanning)
2418 rfilt |= HAL_RX_FILTER_BEACON;
2419 /*
2420 * NB: We don't recalculate the rx filter when
2421 * ic_protmode changes; otherwise we could do
2422 * this only when ic_protmode != NONE.
2423 */
2424 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2425 IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2426 rfilt |= HAL_RX_FILTER_BEACON;
2427 if (sc->sc_nmeshvaps) {
193b341d 2428 rfilt |= HAL_RX_FILTER_BEACON;
86877dfb
RP
2429 if (sc->sc_hasbmatch)
2430 rfilt |= HAL_RX_FILTER_BSSID;
2431 else
2432 rfilt |= HAL_RX_FILTER_PROM;
2433 }
ed33fa9f
SW
2434 if (ic->ic_opmode == IEEE80211_M_MONITOR)
2435 rfilt |= HAL_RX_FILTER_CONTROL;
86877dfb
RP
2436 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2437 __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
193b341d 2438 return rfilt;
193b341d
SZ
2439}
2440
2441static void
86877dfb 2442ath_update_promisc(struct ifnet *ifp)
193b341d 2443{
86877dfb
RP
2444 struct ath_softc *sc = ifp->if_softc;
2445 u_int32_t rfilt;
193b341d
SZ
2446
2447 /* configure rx filter */
86877dfb
RP
2448 rfilt = ath_calcrxfilter(sc);
2449 ath_hal_setrxfilter(sc->sc_ah, rfilt);
193b341d 2450
86877dfb
RP
2451 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2452}
193b341d 2453
86877dfb
RP
2454static void
2455ath_update_mcast(struct ifnet *ifp)
2456{
2457 struct ath_softc *sc = ifp->if_softc;
2458 u_int32_t mfilt[2];
193b341d
SZ
2459
2460 /* calculate and install multicast filter */
2461 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
86877dfb
RP
2462 struct ifmultiaddr *ifma;
2463 /*
2464 * Merge multicast addresses to form the hardware filter.
2465 */
193b341d 2466 mfilt[0] = mfilt[1] = 0;
86877dfb
RP
2467#ifdef __FreeBSD__
2468 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
2469#endif
193b341d
SZ
2470 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2471 caddr_t dl;
86877dfb
RP
2472 u_int32_t val;
2473 u_int8_t pos;
193b341d
SZ
2474
2475 /* calculate XOR of eight 6bit values */
86877dfb 2476 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
193b341d
SZ
2477 val = LE_READ_4(dl + 0);
2478 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2479 val = LE_READ_4(dl + 3);
2480 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2481 pos &= 0x3f;
2482 mfilt[pos / 32] |= (1 << (pos % 32));
2483 }
86877dfb
RP
2484#ifdef __FreeBSD__
2485 if_maddr_runlock(ifp);
2486#endif
2487 } else
193b341d 2488 mfilt[0] = mfilt[1] = ~0;
86877dfb
RP
2489 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2490 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2491 __func__, mfilt[0], mfilt[1]);
2492}
2493
2494static void
2495ath_mode_init(struct ath_softc *sc)
2496{
2497 struct ifnet *ifp = sc->sc_ifp;
2498 struct ath_hal *ah = sc->sc_ah;
2499 u_int32_t rfilt;
2500
2501 /* configure rx filter */
2502 rfilt = ath_calcrxfilter(sc);
2503 ath_hal_setrxfilter(ah, rfilt);
2504
2505 /* configure operational mode */
2506 ath_hal_setopmode(ah);
2507
2508 /* handle any link-level address change */
2509 ath_hal_setmac(ah, IF_LLADDR(ifp));
2510
2511 /* calculate and install multicast filter */
2512 ath_update_mcast(ifp);
193b341d
SZ
2513}
2514
2515/*
2516 * Set the slot time based on the current setting.
2517 */
2518static void
2519ath_setslottime(struct ath_softc *sc)
2520{
86877dfb 2521 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
193b341d 2522 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
2523 u_int usec;
2524
2525 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2526 usec = 13;
2527 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2528 usec = 21;
2529 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2530 /* honor short/long slot time only in 11g */
2531 /* XXX shouldn't honor on pure g or turbo g channel */
2532 if (ic->ic_flags & IEEE80211_F_SHSLOT)
2533 usec = HAL_SLOT_TIME_9;
2534 else
2535 usec = HAL_SLOT_TIME_20;
2536 } else
2537 usec = HAL_SLOT_TIME_9;
193b341d 2538
86877dfb
RP
2539 DPRINTF(sc, ATH_DEBUG_RESET,
2540 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2541 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2542 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2543
2544 ath_hal_setslottime(ah, usec);
193b341d
SZ
2545 sc->sc_updateslot = OK;
2546}
2547
2548/*
2549 * Callback from the 802.11 layer to update the
2550 * slot time based on the current setting.
2551 */
2552static void
2553ath_updateslot(struct ifnet *ifp)
2554{
2555 struct ath_softc *sc = ifp->if_softc;
86877dfb 2556 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
2557
2558 /*
2559 * When not coordinating the BSS, change the hardware
2560 * immediately. For other operation we defer the change
2561 * until beacon updates have propagated to the stations.
2562 */
86877dfb
RP
2563 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2564 ic->ic_opmode == IEEE80211_M_MBSS)
193b341d
SZ
2565 sc->sc_updateslot = UPDATE;
2566 else
2567 ath_setslottime(sc);
2568}
2569
2570/*
2571 * Setup a h/w transmit queue for beacons.
2572 */
2573static int
2574ath_beaconq_setup(struct ath_hal *ah)
2575{
2576 HAL_TXQ_INFO qi;
2577
2578 memset(&qi, 0, sizeof(qi));
2579 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2580 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2581 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2582 /* NB: for dynamic turbo, don't enable any other interrupts */
2583 qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2584 return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2585}
2586
2587/*
2588 * Setup the transmit queue parameters for the beacon queue.
2589 */
2590static int
2591ath_beaconq_config(struct ath_softc *sc)
2592{
2593#define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1)
86877dfb 2594 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
193b341d
SZ
2595 struct ath_hal *ah = sc->sc_ah;
2596 HAL_TXQ_INFO qi;
2597
2598 ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
86877dfb
RP
2599 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
2600 ic->ic_opmode == IEEE80211_M_MBSS) {
193b341d
SZ
2601 /*
2602 * Always burst out beacon and CAB traffic.
2603 */
2604 qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2605 qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2606 qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2607 } else {
2608 struct wmeParams *wmep =
2609 &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2610 /*
2611 * Adhoc mode; important thing is to use 2x cwmin.
2612 */
2613 qi.tqi_aifs = wmep->wmep_aifsn;
2614 qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2615 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2616 }
2617
2618 if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2619 device_printf(sc->sc_dev, "unable to update parameters for "
2620 "beacon hardware queue!\n");
2621 return 0;
2622 } else {
2623 ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2624 return 1;
2625 }
2626#undef ATH_EXPONENT_TO_VALUE
2627}
2628
2629/*
2630 * Allocate and setup an initial beacon frame.
2631 */
2632static int
2633ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2634{
86877dfb
RP
2635 struct ieee80211vap *vap = ni->ni_vap;
2636 struct ath_vap *avp = ATH_VAP(vap);
193b341d
SZ
2637 struct ath_buf *bf;
2638 struct mbuf *m;
2639 int error;
2640
86877dfb
RP
2641 bf = avp->av_bcbuf;
2642 if (bf->bf_m != NULL) {
2643 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2644 m_freem(bf->bf_m);
2645 bf->bf_m = NULL;
2646 }
2647 if (bf->bf_node != NULL) {
2648 ieee80211_free_node(bf->bf_node);
2649 bf->bf_node = NULL;
193b341d 2650 }
86877dfb 2651
193b341d
SZ
2652 /*
2653 * NB: the beacon data buffer must be 32-bit aligned;
2654 * we assume the mbuf routines will return us something
2655 * with this alignment (perhaps should assert).
2656 */
86877dfb 2657 m = ieee80211_beacon_alloc(ni, &avp->av_boff);
193b341d 2658 if (m == NULL) {
86877dfb 2659 device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
193b341d
SZ
2660 sc->sc_stats.ast_be_nombuf++;
2661 return ENOMEM;
2662 }
04522223
RP
2663 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
2664 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
2665 BUS_DMA_NOWAIT);
2666 if (error != 0) {
2667 device_printf(sc->sc_dev,
04522223 2668 "%s: cannot map mbuf, bus_dmamap_load_mbuf_segment returns %d\n",
86877dfb 2669 __func__, error);
193b341d 2670 m_freem(m);
86877dfb 2671 return error;
193b341d 2672 }
86877dfb
RP
2673
2674 /*
2675 * Calculate a TSF adjustment factor required for staggered
2676 * beacons. Note that we assume the format of the beacon
2677 * frame leaves the tstamp field immediately following the
2678 * header.
2679 */
2680 if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2681 uint64_t tsfadjust;
2682 struct ieee80211_frame *wh;
2683
2684 /*
2685 * The beacon interval is in TU's; the TSF is in usecs.
2686 * We figure out how many TU's to add to align the timestamp
2687 * then convert to TSF units and handle byte swapping before
2688 * inserting it in the frame. The hardware will then add this
2689 * each time a beacon frame is sent. Note that we align vap's
2690 * 1..N and leave vap 0 untouched. This means vap 0 has a
2691 * timestamp in one beacon interval while the others get a
2692 * timstamp aligned to the next interval.
2693 */
2694 tsfadjust = ni->ni_intval *
2695 (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2696 tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */
2697
2698 DPRINTF(sc, ATH_DEBUG_BEACON,
2699 "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2700 __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2701 avp->av_bslot, ni->ni_intval,
2702 (long long unsigned) le64toh(tsfadjust));
2703
2704 wh = mtod(m, struct ieee80211_frame *);
2705 memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2706 }
2707 bf->bf_m = m;
2708 bf->bf_node = ieee80211_ref_node(ni);
2709
2710 return 0;
193b341d
SZ
2711}
2712
2713/*
2714 * Setup the beacon frame for transmit.
2715 */
2716static void
2717ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2718{
2719#define USE_SHPREAMBLE(_ic) \
2720 (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2721 == IEEE80211_F_SHPREAMBLE)
2722 struct ieee80211_node *ni = bf->bf_node;
2723 struct ieee80211com *ic = ni->ni_ic;
2724 struct mbuf *m = bf->bf_m;
2725 struct ath_hal *ah = sc->sc_ah;
2726 struct ath_desc *ds;
2727 int flags, antenna;
2728 const HAL_RATE_TABLE *rt;
86877dfb 2729 u_int8_t rix, rate;
193b341d 2730
ed33fa9f 2731 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
193b341d
SZ
2732 __func__, m, m->m_len);
2733
2734 /* setup descriptors */
2735 ds = bf->bf_desc;
2736
2737 flags = HAL_TXDESC_NOACK;
2738 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2739 ds->ds_link = bf->bf_daddr; /* self-linked */
2740 flags |= HAL_TXDESC_VEOL;
2741 /*
2742 * Let hardware handle antenna switching.
2743 */
2744 antenna = sc->sc_txantenna;
2745 } else {
2746 ds->ds_link = 0;
2747 /*
2748 * Switch antenna every 4 beacons.
2749 * XXX assumes two antenna
2750 */
86877dfb
RP
2751 if (sc->sc_txantenna != 0)
2752 antenna = sc->sc_txantenna;
2753 else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2754 antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2755 else
2756 antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
193b341d
SZ
2757 }
2758
2759 KASSERT(bf->bf_nseg == 1,
2760 ("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2761 ds->ds_data = bf->bf_segs[0].ds_addr;
2762 /*
2763 * Calculate rate code.
2764 * XXX everything at min xmit rate
2765 */
86877dfb 2766 rix = 0;
193b341d
SZ
2767 rt = sc->sc_currates;
2768 rate = rt->info[rix].rateCode;
2769 if (USE_SHPREAMBLE(ic))
2770 rate |= rt->info[rix].shortPreamble;
2771 ath_hal_setuptxdesc(ah, ds
2772 , m->m_len + IEEE80211_CRC_LEN /* frame length */
2773 , sizeof(struct ieee80211_frame)/* header length */
2774 , HAL_PKT_TYPE_BEACON /* Atheros packet type */
2775 , ni->ni_txpower /* txpower XXX */
2776 , rate, 1 /* series 0 rate/tries */
2777 , HAL_TXKEYIX_INVALID /* no encryption */
2778 , antenna /* antenna mode */
2779 , flags /* no ack, veol for beacons */
2780 , 0 /* rts/cts rate */
2781 , 0 /* rts/cts duration */
2782 );
2783 /* NB: beacon's BufLen must be a multiple of 4 bytes */
2784 ath_hal_filltxdesc(ah, ds
2785 , roundup(m->m_len, 4) /* buffer length */
2786 , AH_TRUE /* first segment */
2787 , AH_TRUE /* last segment */
2788 , ds /* first descriptor */
2789 );
86877dfb
RP
2790#if 0
2791 ath_desc_swap(ds);
2792#endif
193b341d
SZ
2793#undef USE_SHPREAMBLE
2794}
2795
86877dfb
RP
2796static void
2797ath_beacon_update(struct ieee80211vap *vap, int item)
2798{
2799 struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2800
2801 setbit(bo->bo_flags, item);
2802}
2803
193b341d 2804/*
ed33fa9f
SW
2805 * Append the contents of src to dst; both queues
2806 * are assumed to be locked.
2807 */
2808static void
2809ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2810{
2811 STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2812 dst->axq_link = src->axq_link;
2813 src->axq_link = NULL;
2814 dst->axq_depth += src->axq_depth;
2815 src->axq_depth = 0;
2816}
2817
2818/*
193b341d
SZ
2819 * Transmit a beacon frame at SWBA. Dynamic updates to the
2820 * frame contents are done as needed and the slot time is
2821 * also adjusted based on current state.
2822 */
2823static void
86877dfb 2824ath_beacon_proc(void *arg, int pending)
193b341d 2825{
86877dfb 2826 struct ath_softc *sc = arg;
193b341d 2827 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
2828 struct ieee80211vap *vap;
2829 struct ath_buf *bf;
2830 int slot, otherant;
2831 uint32_t bfaddr;
193b341d 2832
86877dfb
RP
2833 DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2834 __func__, pending);
193b341d
SZ
2835 /*
2836 * Check if the previous beacon has gone out. If
8982d733
SZ
2837 * not don't try to post another, skip this period
2838 * and wait for the next. Missed beacons indicate
2839 * a problem and should not occur. If we miss too
2840 * many consecutive beacons reset the device.
193b341d
SZ
2841 */
2842 if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2843 sc->sc_bmisscount++;
ed33fa9f 2844 DPRINTF(sc, ATH_DEBUG_BEACON,
193b341d
SZ
2845 "%s: missed %u consecutive beacons\n",
2846 __func__, sc->sc_bmisscount);
86877dfb
RP
2847 if (sc->sc_bmisscount >= ath_bstuck_threshold)
2848 taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
193b341d
SZ
2849 return;
2850 }
2851 if (sc->sc_bmisscount != 0) {
2852 DPRINTF(sc, ATH_DEBUG_BEACON,
2853 "%s: resume beacon xmit after %u misses\n",
2854 __func__, sc->sc_bmisscount);
2855 sc->sc_bmisscount = 0;
2856 }
2857
86877dfb
RP
2858 if (sc->sc_stagbeacons) { /* staggered beacons */
2859 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2860 uint32_t tsftu;
193b341d 2861
86877dfb
RP
2862 tsftu = ath_hal_gettsf32(ah) >> 10;
2863 /* XXX lintval */
2864 slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2865 vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2866 bfaddr = 0;
2867 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2868 bf = ath_beacon_generate(sc, vap);
2869 if (bf != NULL)
2870 bfaddr = bf->bf_daddr;
2871 }
2872 } else { /* burst'd beacons */
2873 uint32_t *bflink = &bfaddr;
2874
2875 for (slot = 0; slot < ATH_BCBUF; slot++) {
2876 vap = sc->sc_bslot[slot];
2877 if (vap != NULL && vap->iv_state >= IEEE80211_S_RUN) {
2878 bf = ath_beacon_generate(sc, vap);
2879 if (bf != NULL) {
2880 *bflink = bf->bf_daddr;
2881 bflink = &bf->bf_desc->ds_link;
2882 }
2883 }
2884 }
2885 *bflink = 0; /* terminate list */
ed33fa9f
SW
2886 }
2887
193b341d
SZ
2888 /*
2889 * Handle slot time change when a non-ERP station joins/leaves
2890 * an 11g network. The 802.11 layer notifies us via callback,
2891 * we mark updateslot, then wait one beacon before effecting
2892 * the change. This gives associated stations at least one
2893 * beacon interval to note the state change.
2894 */
2895 /* XXX locking */
86877dfb 2896 if (sc->sc_updateslot == UPDATE) {
193b341d 2897 sc->sc_updateslot = COMMIT; /* commit next beacon */
86877dfb
RP
2898 sc->sc_slotupdate = slot;
2899 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
193b341d
SZ
2900 ath_setslottime(sc); /* commit change to h/w */
2901
2902 /*
2903 * Check recent per-antenna transmit statistics and flip
2904 * the default antenna if noticeably more frames went out
2905 * on the non-default antenna.
2906 * XXX assumes 2 anntenae
2907 */
86877dfb
RP
2908 if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2909 otherant = sc->sc_defant & 1 ? 2 : 1;
2910 if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2911 ath_setdefantenna(sc, otherant);
2912 sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2913 }
193b341d 2914
86877dfb
RP
2915 if (bfaddr != 0) {
2916 /*
2917 * Stop any current dma and put the new frame on the queue.
2918 * This should never fail since we check above that no frames
2919 * are still pending on the queue.
2920 */
2921 if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2922 DPRINTF(sc, ATH_DEBUG_ANY,
2923 "%s: beacon queue %u did not stop?\n",
2924 __func__, sc->sc_bhalq);
2925 }
2926 /* NB: cabq traffic should already be queued and primed */
2927 ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2928 ath_hal_txstart(ah, sc->sc_bhalq);
2929
2930 sc->sc_stats.ast_be_xmit++;
2931 }
2932}
2933
2934static struct ath_buf *
2935ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2936{
2937 struct ath_vap *avp = ATH_VAP(vap);
2938 struct ath_txq *cabq = sc->sc_cabq;
2939 struct ath_buf *bf;
2940 struct mbuf *m;
2941 int nmcastq, error;
2942
2943 KASSERT(vap->iv_state >= IEEE80211_S_RUN,
2944 ("not running, state %d", vap->iv_state));
2945 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
193b341d
SZ
2946
2947 /*
86877dfb
RP
2948 * Update dynamic beacon contents. If this returns
2949 * non-zero then we need to remap the memory because
2950 * the beacon frame changed size (probably because
2951 * of the TIM bitmap).
193b341d 2952 */
86877dfb
RP
2953 bf = avp->av_bcbuf;
2954 m = bf->bf_m;
2955 nmcastq = avp->av_mcastq.axq_depth;
2956 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2957 /* XXX too conservative? */
2958 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
04522223
RP
2959 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
2960 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
2961 BUS_DMA_NOWAIT);
2962 if (error != 0) {
2963 if_printf(vap->iv_ifp,
04522223 2964 "%s: bus_dmamap_load_mbuf_segment failed, error %u\n",
86877dfb
RP
2965 __func__, error);
2966 return NULL;
2967 }
2968 }
2969 if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
2970 DPRINTF(sc, ATH_DEBUG_BEACON,
2971 "%s: cabq did not drain, mcastq %u cabq %u\n",
2972 __func__, nmcastq, cabq->axq_depth);
2973 sc->sc_stats.ast_cabq_busy++;
2974 if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
2975 /*
2976 * CABQ traffic from a previous vap is still pending.
2977 * We must drain the q before this beacon frame goes
2978 * out as otherwise this vap's stations will get cab
2979 * frames from a different vap.
2980 * XXX could be slow causing us to miss DBA
2981 */
2982 ath_tx_draintxq(sc, cabq);
2983 }
193b341d 2984 }
86877dfb 2985 ath_beacon_setup(sc, bf);
193b341d
SZ
2986 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2987
2988 /*
2989 * Enable the CAB queue before the beacon queue to
2990 * insure cab frames are triggered by this beacon.
2991 */
86877dfb
RP
2992 if (avp->av_boff.bo_tim[4] & 1) {
2993 struct ath_hal *ah = sc->sc_ah;
2994
8982d733 2995 /* NB: only at DTIM */
86877dfb
RP
2996 ATH_TXQ_LOCK(cabq);
2997 ATH_TXQ_LOCK(&avp->av_mcastq);
ed33fa9f
SW
2998 if (nmcastq) {
2999 struct ath_buf *bfm;
3000
3001 /*
3002 * Move frames from the s/w mcast q to the h/w cab q.
86877dfb 3003 * XXX MORE_DATA bit
ed33fa9f 3004 */
86877dfb 3005 bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
ed33fa9f
SW
3006 if (cabq->axq_link != NULL) {
3007 *cabq->axq_link = bfm->bf_daddr;
86877dfb 3008 } else
ed33fa9f
SW
3009 ath_hal_puttxbuf(ah, cabq->axq_qnum,
3010 bfm->bf_daddr);
86877dfb 3011 ath_txqmove(cabq, &avp->av_mcastq);
ed33fa9f
SW
3012
3013 sc->sc_stats.ast_cabq_xmit += nmcastq;
3014 }
3015 /* NB: gated by beacon so safe to start here */
3016 ath_hal_txstart(ah, cabq->axq_qnum);
86877dfb
RP
3017 ATH_TXQ_UNLOCK(cabq);
3018 ATH_TXQ_UNLOCK(&avp->av_mcastq);
3019 }
3020 return bf;
3021}
3022
3023static void
3024ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
3025{
3026 struct ath_vap *avp = ATH_VAP(vap);
3027 struct ath_hal *ah = sc->sc_ah;
3028 struct ath_buf *bf;
3029 struct mbuf *m;
3030 int error;
3031
3032 KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
3033
3034 /*
3035 * Update dynamic beacon contents. If this returns
3036 * non-zero then we need to remap the memory because
3037 * the beacon frame changed size (probably because
3038 * of the TIM bitmap).
3039 */
3040 bf = avp->av_bcbuf;
3041 m = bf->bf_m;
3042 if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
3043 /* XXX too conservative? */
3044 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
04522223
RP
3045 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat, bf->bf_dmamap, m,
3046 bf->bf_segs, 1, &bf->bf_nseg,
86877dfb
RP
3047 BUS_DMA_NOWAIT);
3048 if (error != 0) {
3049 if_printf(vap->iv_ifp,
04522223 3050 "%s: bus_dmamap_load_mbuf_segment failed, error %u\n",
86877dfb
RP
3051 __func__, error);
3052 return;
3053 }
ed33fa9f 3054 }
86877dfb
RP
3055 ath_beacon_setup(sc, bf);
3056 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
3057
3058 /* NB: caller is known to have already stopped tx dma */
193b341d
SZ
3059 ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
3060 ath_hal_txstart(ah, sc->sc_bhalq);
193b341d
SZ
3061}
3062
3063/*
3064 * Reset the hardware after detecting beacons have stopped.
3065 */
3066static void
86877dfb 3067ath_bstuck_proc(void *arg, int pending)
193b341d 3068{
86877dfb
RP
3069 struct ath_softc *sc = arg;
3070 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
3071
3072 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
86877dfb
RP
3073 sc->sc_bmisscount);
3074 sc->sc_stats.ast_bstuck++;
193b341d
SZ
3075 ath_reset(ifp);
3076}
3077
3078/*
86877dfb
RP
3079 * Reclaim beacon resources and return buffer to the pool.
3080 */
3081static void
3082ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3083{
3084
3085 if (bf->bf_m != NULL) {
3086 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3087 m_freem(bf->bf_m);
3088 bf->bf_m = NULL;
3089 }
3090 if (bf->bf_node != NULL) {
3091 ieee80211_free_node(bf->bf_node);
3092 bf->bf_node = NULL;
3093 }
3094 STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3095}
3096
3097/*
193b341d
SZ
3098 * Reclaim beacon resources.
3099 */
3100static void
3101ath_beacon_free(struct ath_softc *sc)
3102{
3103 struct ath_buf *bf;
3104
193b341d
SZ
3105 STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3106 if (bf->bf_m != NULL) {
3107 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3108 m_freem(bf->bf_m);
3109 bf->bf_m = NULL;
3110 }
3111 if (bf->bf_node != NULL) {
3112 ieee80211_free_node(bf->bf_node);
3113 bf->bf_node = NULL;
3114 }
3115 }
3116}
3117
3118/*
3119 * Configure the beacon and sleep timers.
3120 *
3121 * When operating as an AP this resets the TSF and sets
3122 * up the hardware to notify us when we need to issue beacons.
3123 *
3124 * When operating in station mode this sets up the beacon
3125 * timers according to the timestamp of the last received
3126 * beacon and the current TSF, configures PCF and DTIM
3127 * handling, programs the sleep registers so the hardware
3128 * will wakeup in time to receive beacons, and configures
3129 * the beacon miss handling so we'll receive a BMISS
3130 * interrupt when we stop seeing beacons from the AP
3131 * we've associated with.
3132 */
3133static void
86877dfb 3134ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
193b341d
SZ
3135{
3136#define TSF_TO_TU(_h,_l) \
86877dfb 3137 ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
193b341d
SZ
3138#define FUDGE 2
3139 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
3140 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3141 struct ieee80211_node *ni;
3142 u_int32_t nexttbtt, intval, tsftu;
3143 u_int64_t tsf;
3144
3145 if (vap == NULL)
3146 vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */
3147 ni = vap->iv_bss;
193b341d
SZ
3148
3149 /* extract tstamp from last beacon and convert to TU */
3150 nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3151 LE_READ_4(ni->ni_tstamp.data));
86877dfb
RP
3152 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3153 ic->ic_opmode == IEEE80211_M_MBSS) {
3154 /*
3155 * For multi-bss ap/mesh support beacons are either staggered
3156 * evenly over N slots or burst together. For the former
3157 * arrange for the SWBA to be delivered for each slot.
3158 * Slots that are not occupied will generate nothing.
3159 */
3160 /* NB: the beacon interval is kept internally in TU's */
3161 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3162 if (sc->sc_stagbeacons)
3163 intval /= ATH_BCBUF;
3164 } else {
3165 /* NB: the beacon interval is kept internally in TU's */
3166 intval = ni->ni_intval & HAL_BEACON_PERIOD;
3167 }
193b341d
SZ
3168 if (nexttbtt == 0) /* e.g. for ap mode */
3169 nexttbtt = intval;
3170 else if (intval) /* NB: can be 0 for monitor mode */
3171 nexttbtt = roundup(nexttbtt, intval);
3172 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3173 __func__, nexttbtt, intval, ni->ni_intval);
86877dfb 3174 if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
193b341d
SZ
3175 HAL_BEACON_STATE bs;
3176 int dtimperiod, dtimcount;
3177 int cfpperiod, cfpcount;
3178
3179 /*
3180 * Setup dtim and cfp parameters according to
3181 * last beacon we received (which may be none).
3182 */
3183 dtimperiod = ni->ni_dtim_period;
3184 if (dtimperiod <= 0) /* NB: 0 if not known */
3185 dtimperiod = 1;
3186 dtimcount = ni->ni_dtim_count;
3187 if (dtimcount >= dtimperiod) /* NB: sanity check */
3188 dtimcount = 0; /* XXX? */
3189 cfpperiod = 1; /* NB: no PCF support yet */
3190 cfpcount = 0;
3191 /*
3192 * Pull nexttbtt forward to reflect the current
3193 * TSF and calculate dtim+cfp state for the result.
3194 */
3195 tsf = ath_hal_gettsf64(ah);
3196 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3197 do {
3198 nexttbtt += intval;
3199 if (--dtimcount < 0) {
3200 dtimcount = dtimperiod - 1;
3201 if (--cfpcount < 0)
3202 cfpcount = cfpperiod - 1;
3203 }
3204 } while (nexttbtt < tsftu);
3205 memset(&bs, 0, sizeof(bs));
3206 bs.bs_intval = intval;
3207 bs.bs_nexttbtt = nexttbtt;
3208 bs.bs_dtimperiod = dtimperiod*intval;
3209 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3210 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3211 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3212 bs.bs_cfpmaxduration = 0;
3213#if 0
3214 /*
3215 * The 802.11 layer records the offset to the DTIM
3216 * bitmap while receiving beacons; use it here to
3217 * enable h/w detection of our AID being marked in
3218 * the bitmap vector (to indicate frames for us are
3219 * pending at the AP).
3220 * XXX do DTIM handling in s/w to WAR old h/w bugs
3221 * XXX enable based on h/w rev for newer chips
3222 */
3223 bs.bs_timoffset = ni->ni_timoff;
3224#endif
3225 /*
3226 * Calculate the number of consecutive beacons to miss
86877dfb 3227 * before taking a BMISS interrupt.
193b341d
SZ
3228 * Note that we clamp the result to at most 10 beacons.
3229 */
86877dfb 3230 bs.bs_bmissthreshold = vap->iv_bmissthreshold;
193b341d
SZ
3231 if (bs.bs_bmissthreshold > 10)
3232 bs.bs_bmissthreshold = 10;
3233 else if (bs.bs_bmissthreshold <= 0)
3234 bs.bs_bmissthreshold = 1;
3235
3236 /*
3237 * Calculate sleep duration. The configuration is
3238 * given in ms. We insure a multiple of the beacon
3239 * period is used. Also, if the sleep duration is
3240 * greater than the DTIM period then it makes senses
3241 * to make it a multiple of that.
3242 *
3243 * XXX fixed at 100ms
3244 */
3245 bs.bs_sleepduration =
3246 roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3247 if (bs.bs_sleepduration > bs.bs_dtimperiod)
3248 bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3249
86877dfb 3250 DPRINTF(sc, ATH_DEBUG_BEACON,
193b341d
SZ
3251 "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3252 , __func__
3253 , tsf, tsftu
3254 , bs.bs_intval
3255 , bs.bs_nexttbtt
3256 , bs.bs_dtimperiod
3257 , bs.bs_nextdtim
3258 , bs.bs_bmissthreshold
3259 , bs.bs_sleepduration
3260 , bs.bs_cfpperiod
3261 , bs.bs_cfpmaxduration
3262 , bs.bs_cfpnext
3263 , bs.bs_timoffset
3264 );
3265 ath_hal_intrset(ah, 0);
3266 ath_hal_beacontimers(ah, &bs);
3267 sc->sc_imask |= HAL_INT_BMISS;
3268 ath_hal_intrset(ah, sc->sc_imask);
3269 } else {
3270 ath_hal_intrset(ah, 0);
3271 if (nexttbtt == intval)
3272 intval |= HAL_BEACON_RESET_TSF;
3273 if (ic->ic_opmode == IEEE80211_M_IBSS) {
3274 /*
3275 * In IBSS mode enable the beacon timers but only
3276 * enable SWBA interrupts if we need to manually
3277 * prepare beacon frames. Otherwise we use a
3278 * self-linked tx descriptor and let the hardware
3279 * deal with things.
3280 */
3281 intval |= HAL_BEACON_ENA;
3282 if (!sc->sc_hasveol)
3283 sc->sc_imask |= HAL_INT_SWBA;
3284 if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3285 /*
3286 * Pull nexttbtt forward to reflect
3287 * the current TSF.
3288 */
3289 tsf = ath_hal_gettsf64(ah);
3290 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3291 do {
3292 nexttbtt += intval;
3293 } while (nexttbtt < tsftu);
3294 }
3295 ath_beaconq_config(sc);
86877dfb
RP
3296 } else if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3297 ic->ic_opmode == IEEE80211_M_MBSS) {
193b341d 3298 /*
86877dfb
RP
3299 * In AP/mesh mode we enable the beacon timers
3300 * and SWBA interrupts to prepare beacon frames.
193b341d
SZ
3301 */
3302 intval |= HAL_BEACON_ENA;
3303 sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */
3304 ath_beaconq_config(sc);
3305 }
3306 ath_hal_beaconinit(ah, nexttbtt, intval);
3307 sc->sc_bmisscount = 0;
3308 ath_hal_intrset(ah, sc->sc_imask);
3309 /*
3310 * When using a self-linked beacon descriptor in
3311 * ibss mode load it once here.
3312 */
3313 if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
86877dfb 3314 ath_beacon_start_adhoc(sc, vap);
193b341d
SZ
3315 }
3316 sc->sc_syncbeacon = 0;
3317#undef FUDGE
3318#undef TSF_TO_TU
3319}
3320
3321static void
3322ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3323{
3324 bus_addr_t *paddr = (bus_addr_t*) arg;
3325 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3326 *paddr = segs->ds_addr;
3327}
3328
3329static int
86877dfb
RP
3330ath_descdma_setup(struct ath_softc *sc,
3331 struct ath_descdma *dd, ath_bufhead *head,
3332 const char *name, int nbuf, int ndesc)
193b341d
SZ
3333{
3334#define DS2PHYS(_dd, _ds) \
3335 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
86877dfb 3336 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
3337 struct ath_desc *ds;
3338 struct ath_buf *bf;
3339 int i, bsize, error;
3340
3341 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3342 __func__, name, nbuf, ndesc);
3343
3344 dd->dd_name = name;
3345 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
193b341d
SZ
3346
3347 /*
3348 * Setup DMA descriptor area.
3349 */
86877dfb 3350 error = bus_dma_tag_create(dd->dd_dmat, /* parent */
193b341d
SZ
3351 PAGE_SIZE, 0, /* alignment, bounds */
3352 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3353 BUS_SPACE_MAXADDR, /* highaddr */
3354 NULL, NULL, /* filter, filterarg */
3355 dd->dd_desc_len, /* maxsize */
3356 1, /* nsegments */
ed33fa9f 3357 dd->dd_desc_len, /* maxsegsize */
193b341d
SZ
3358 BUS_DMA_ALLOCNOW, /* flags */
3359 &dd->dd_dmat);
86877dfb 3360 if (error != 0) {
193b341d
SZ
3361 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3362 return error;
3363 }
3364
3365 /* allocate descriptors */
86877dfb
RP
3366 error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3367 if (error != 0) {
193b341d
SZ
3368 if_printf(ifp, "unable to create dmamap for %s descriptors, "
3369 "error %u\n", dd->dd_name, error);
86877dfb 3370 goto fail0;
193b341d
SZ
3371 }
3372
86877dfb
RP
3373 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3374 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3375 &dd->dd_dmamap);
3376 if (error != 0) {
193b341d
SZ
3377 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3378 "error %u\n", nbuf * ndesc, dd->dd_name, error);
86877dfb 3379 goto fail1;
193b341d
SZ
3380 }
3381
3382 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3383 dd->dd_desc, dd->dd_desc_len,
3384 ath_load_cb, &dd->dd_desc_paddr,
86877dfb
RP
3385 BUS_DMA_NOWAIT);
3386 if (error != 0) {
193b341d
SZ
3387 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3388 dd->dd_name, error);
86877dfb 3389 goto fail2;
193b341d
SZ
3390 }
3391
3392 ds = dd->dd_desc;
3393 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3394 __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3395 (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3396
3397 /* allocate rx buffers */
3398 bsize = sizeof(struct ath_buf) * nbuf;
a3062ee4 3399 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
86877dfb
RP
3400 if (bf == NULL) {
3401 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3402 dd->dd_name, bsize);
3403 goto fail3;
3404 }
193b341d
SZ
3405 dd->dd_bufptr = bf;
3406
86877dfb 3407 STAILQ_INIT(head);
193b341d
SZ
3408 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3409 bf->bf_desc = ds;
3410 bf->bf_daddr = DS2PHYS(dd, ds);
86877dfb
RP
3411 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3412 &bf->bf_dmamap);
3413 if (error != 0) {
193b341d
SZ
3414 if_printf(ifp, "unable to create dmamap for %s "
3415 "buffer %u, error %u\n", dd->dd_name, i, error);
3416 ath_descdma_cleanup(sc, dd, head);
3417 return error;
3418 }
3419 STAILQ_INSERT_TAIL(head, bf, bf_list);
3420 }
3421 return 0;
193b341d
SZ
3422fail3:
3423 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3424fail2:
3425 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3426fail1:
3427 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3428fail0:
3429 bus_dma_tag_destroy(dd->dd_dmat);
3430 memset(dd, 0, sizeof(*dd));
3431 return error;
193b341d
SZ
3432#undef DS2PHYS
3433}
3434
3435static void
3436ath_descdma_cleanup(struct ath_softc *sc,
3437 struct ath_descdma *dd, ath_bufhead *head)
3438{
3439 struct ath_buf *bf;
3440 struct ieee80211_node *ni;
3441
86877dfb
RP
3442 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3443 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3444 bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3445 bus_dma_tag_destroy(dd->dd_dmat);
193b341d
SZ
3446
3447 STAILQ_FOREACH(bf, head, bf_list) {
3448 if (bf->bf_m) {
3449 m_freem(bf->bf_m);
3450 bf->bf_m = NULL;
3451 }
3452 if (bf->bf_dmamap != NULL) {
3453 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3454 bf->bf_dmamap = NULL;
3455 }
3456 ni = bf->bf_node;
3457 bf->bf_node = NULL;
3458 if (ni != NULL) {
3459 /*
3460 * Reclaim node reference.
3461 */
3462 ieee80211_free_node(ni);
3463 }
3464 }
193b341d 3465
86877dfb
RP
3466 STAILQ_INIT(head);
3467 kfree(dd->dd_bufptr, M_ATHDEV);
193b341d
SZ
3468 memset(dd, 0, sizeof(*dd));
3469}
3470
3471static int
3472ath_desc_alloc(struct ath_softc *sc)
3473{
3474 int error;
3475
3476 error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
86877dfb
RP
3477 "rx", ath_rxbuf, 1);
3478 if (error != 0)
193b341d
SZ
3479 return error;
3480
3481 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
86877dfb
RP
3482 "tx", ath_txbuf, ATH_TXDESC);
3483 if (error != 0) {
3484 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d 3485 return error;
86877dfb 3486 }
193b341d
SZ
3487
3488 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
86877dfb
RP
3489 "beacon", ATH_BCBUF, 1);
3490 if (error != 0) {
3491 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3492 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d 3493 return error;
86877dfb 3494 }
193b341d
SZ
3495 return 0;
3496}
3497
3498static void
3499ath_desc_free(struct ath_softc *sc)
3500{
3501
86877dfb 3502 if (sc->sc_bdma.dd_desc_len != 0)
193b341d 3503 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
86877dfb 3504 if (sc->sc_txdma.dd_desc_len != 0)
193b341d 3505 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
86877dfb 3506 if (sc->sc_rxdma.dd_desc_len != 0)
193b341d 3507 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
193b341d
SZ
3508}
3509
3510static struct ieee80211_node *
86877dfb 3511ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
193b341d 3512{
86877dfb 3513 struct ieee80211com *ic = vap->iv_ic;
193b341d
SZ
3514 struct ath_softc *sc = ic->ic_ifp->if_softc;
3515 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3516 struct ath_node *an;
3517
a3062ee4 3518 an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO);
193b341d
SZ
3519 if (an == NULL) {
3520 /* XXX stat+msg */
3521 return NULL;
3522 }
193b341d
SZ
3523 ath_rate_node_init(sc, an);
3524
3525 DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3526 return &an->an_node;
3527}
3528
3529static void
3530ath_node_free(struct ieee80211_node *ni)
3531{
3532 struct ieee80211com *ic = ni->ni_ic;
3533 struct ath_softc *sc = ic->ic_ifp->if_softc;
3534
3535 DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3536
3537 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3538 sc->sc_node_free(ni);
3539}
3540
86877dfb
RP
3541static void
3542ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
193b341d 3543{
86877dfb
RP
3544 struct ieee80211com *ic = ni->ni_ic;
3545 struct ath_softc *sc = ic->ic_ifp->if_softc;
3546 struct ath_hal *ah = sc->sc_ah;
193b341d 3547
86877dfb
RP
3548 *rssi = ic->ic_node_getrssi(ni);
3549 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3550 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
193b341d 3551 else
86877dfb 3552 *noise = -95; /* nominally correct */
193b341d
SZ
3553}
3554
3555static int
3556ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3557{
3558 struct ath_hal *ah = sc->sc_ah;
3559 int error;
3560 struct mbuf *m;
3561 struct ath_desc *ds;
3562
3563 m = bf->bf_m;
3564 if (m == NULL) {
3565 /*
3566 * NB: by assigning a page to the rx dma buffer we
3567 * implicitly satisfy the Atheros requirement that
3568 * this buffer be cache-line-aligned and sized to be
3569 * multiple of the cache line size. Not doing this
3570 * causes weird stuff to happen (for the 5210 at least).
3571 */
3572 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
3573 if (m == NULL) {
3574 DPRINTF(sc, ATH_DEBUG_ANY,
3575 "%s: no mbuf/cluster\n", __func__);
3576 sc->sc_stats.ast_rx_nombuf++;
3577 return ENOMEM;
3578 }
3579 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3580
04522223 3581 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat,
86877dfb 3582 bf->bf_dmamap, m,
04522223 3583 bf->bf_segs, 1, &bf->bf_nseg,
193b341d
SZ
3584 BUS_DMA_NOWAIT);
3585 if (error != 0) {
3586 DPRINTF(sc, ATH_DEBUG_ANY,
04522223 3587 "%s: bus_dmamap_load_mbuf_segment failed; error %d\n",
193b341d
SZ
3588 __func__, error);
3589 sc->sc_stats.ast_rx_busdma++;