2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
33 * Driver for the Atheros Wireless LAN controller.
35 * This software is derived from work of Atsushi Onoe; his contribution
36 * is greatly appreciated.
42 * This is needed for register operations which are performed
43 * by the driver - eg, calls to ath_hal_gettsf32().
45 * It's also required for any AH_DEBUG checks in here, eg the
46 * module dependencies.
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysctl.h>
55 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
68 #include <sys/module.h>
72 #include <net/if_var.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_types.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_llc.h>
79 #include <net/ifq_var.h>
81 #include <netproto/802_11/ieee80211_var.h>
82 #include <netproto/802_11/ieee80211_regdomain.h>
83 #ifdef IEEE80211_SUPPORT_SUPERG
84 #include <netproto/802_11/ieee80211_superg.h>
86 #ifdef IEEE80211_SUPPORT_TDMA
87 #include <netproto/802_11/ieee80211_tdma.h>
93 #include <netinet/in.h>
94 #include <netinet/if_ether.h>
97 #include <dev/netif/ath/ath/if_athvar.h>
98 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
99 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
101 #include <dev/netif/ath/ath/if_ath_debug.h>
102 #include <dev/netif/ath/ath/if_ath_misc.h>
103 #include <dev/netif/ath/ath/if_ath_tsf.h>
104 #include <dev/netif/ath/ath/if_ath_tx.h>
105 #include <dev/netif/ath/ath/if_ath_sysctl.h>
106 #include <dev/netif/ath/ath/if_ath_led.h>
107 #include <dev/netif/ath/ath/if_ath_keycache.h>
108 #include <dev/netif/ath/ath/if_ath_rx.h>
109 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
110 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
111 #include <dev/netif/ath/ath/if_ath_beacon.h>
112 #include <dev/netif/ath/ath/if_ath_btcoex.h>
113 #include <dev/netif/ath/ath/if_ath_spectral.h>
114 #include <dev/netif/ath/ath/if_ath_lna_div.h>
115 #include <dev/netif/ath/ath/if_athdfs.h>
118 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
122 #include <dev/netif/ath/ath/if_ath_alq.h>
126 * Only enable this if you're working on PS-POLL support.
131 #define CURVNET_SET(name)
132 #define CURVNET_RESTORE()
136 * ATH_BCBUF determines the number of vap's that can transmit
137 * beacons and also (currently) the number of vap's that can
138 * have unique mac addresses/bssid. When staggering beacons
139 * 4 is probably a good max as otherwise the beacons become
140 * very closely spaced and there is limited time for cab q traffic
141 * to go out. You can burst beacons instead but that is not good
142 * for stations in power save and at some point you really want
143 * another radio (and channel).
145 * The limit on the number of mac addresses is tied to our use of
146 * the U/L bit and tracking addresses in a byte; it would be
147 * worthwhile to allow more for applications like proxy sta.
149 CTASSERT(ATH_BCBUF <= 8);
151 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
152 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
153 const uint8_t [IEEE80211_ADDR_LEN],
154 const uint8_t [IEEE80211_ADDR_LEN]);
155 static void ath_vap_delete(struct ieee80211vap *);
156 static void ath_init(void *);
157 static void ath_stop_locked(struct ifnet *);
158 static void ath_stop(struct ifnet *);
159 static int ath_reset_vap(struct ieee80211vap *, u_long);
160 static int ath_transmit(struct ifnet *ifp, struct mbuf *m);
162 static void ath_qflush(struct ifnet *ifp);
164 static int ath_media_change(struct ifnet *);
165 static void ath_watchdog(void *);
166 static void ath_start(struct ifnet *, struct ifaltq_subque *);
167 static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
168 static void ath_fatal_proc(void *, int);
169 static void ath_bmiss_vap(struct ieee80211vap *);
170 static void ath_bmiss_proc(void *, int);
171 static void ath_key_update_begin(struct ieee80211vap *);
172 static void ath_key_update_end(struct ieee80211vap *);
173 static void ath_update_mcast(struct ifnet *);
174 static void ath_update_promisc(struct ifnet *);
175 static void ath_updateslot(struct ifnet *);
176 static void ath_bstuck_proc(void *, int);
177 static void ath_reset_proc(void *, int);
178 static int ath_desc_alloc(struct ath_softc *);
179 static void ath_desc_free(struct ath_softc *);
180 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
181 const uint8_t [IEEE80211_ADDR_LEN]);
182 static void ath_node_cleanup(struct ieee80211_node *);
183 static void ath_node_free(struct ieee80211_node *);
184 static void ath_node_getsignal(const struct ieee80211_node *,
186 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
187 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
188 static int ath_tx_setup(struct ath_softc *, int, int);
189 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
190 static void ath_tx_cleanup(struct ath_softc *);
191 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
193 static void ath_tx_proc_q0(void *, int);
194 static void ath_tx_proc_q0123(void *, int);
195 static void ath_tx_proc(void *, int);
196 static void ath_txq_sched_tasklet(void *, int);
197 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
198 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
199 static void ath_scan_start(struct ieee80211com *);
200 static void ath_scan_end(struct ieee80211com *);
201 static void ath_set_channel(struct ieee80211com *);
202 #ifdef ATH_ENABLE_11N
203 static void ath_update_chw(struct ieee80211com *);
204 #endif /* ATH_ENABLE_11N */
205 static void ath_calibrate(void *);
206 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
207 static void ath_setup_stationkey(struct ieee80211_node *);
208 static void ath_newassoc(struct ieee80211_node *, int);
209 static int ath_setregdomain(struct ieee80211com *,
210 struct ieee80211_regdomain *, int,
211 struct ieee80211_channel []);
212 static void ath_getradiocaps(struct ieee80211com *, int, int *,
213 struct ieee80211_channel []);
214 static int ath_getchannels(struct ath_softc *);
216 static int ath_rate_setup(struct ath_softc *, u_int mode);
217 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
219 static void ath_announce(struct ath_softc *);
221 static void ath_dfs_tasklet(void *, int);
223 static void ath_node_powersave(struct ieee80211_node *, int);
224 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
226 static int ath_node_set_tim(struct ieee80211_node *, int);
228 #ifdef IEEE80211_SUPPORT_TDMA
229 #include <dev/netif/ath/ath/if_ath_tdma.h>
232 extern const char* ath_hal_ether_sprintf(const u_int8_t *mac);
234 SYSCTL_DECL(_hw_ath);
236 /* XXX validate sysctl values */
237 static int ath_longcalinterval = 30; /* long cals every 30 secs */
238 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
239 0, "long chip calibration interval (secs)");
240 static int ath_shortcalinterval = 100; /* short cals every 100 ms */
241 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
242 0, "short chip calibration interval (msecs)");
243 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
244 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
245 0, "reset chip calibration results (secs)");
246 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
247 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
248 0, "ANI calibration (msecs)");
250 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
251 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
252 0, "rx buffers allocated");
253 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
254 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
255 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
256 0, "tx buffers allocated");
257 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
258 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
259 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
260 0, "tx (mgmt) buffers allocated");
261 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
263 int ath_bstuck_threshold = 4; /* max missed beacons */
264 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
265 0, "max missed beacon xmits before chip reset");
267 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
270 ath_legacy_attach_comp_func(struct ath_softc *sc)
274 * Special case certain configurations. Note the
275 * CAB queue is handled by these specially so don't
276 * include them when checking the txq setup mask.
278 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
280 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
283 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
286 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
291 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
292 #define HAL_MODE_HT40 \
293 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
294 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
296 ath_attach(u_int16_t devid, struct ath_softc *sc)
299 struct ieee80211com *ic;
300 struct ath_hal *ah = NULL;
304 uint8_t macaddr[IEEE80211_ADDR_LEN];
305 int rx_chainmask, tx_chainmask;
307 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
310 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
312 device_printf(sc->sc_dev, "can not if_alloc()\n");
319 /* set these up early for if_printf use */
320 if_initname(ifp, device_get_name(sc->sc_dev),
321 device_get_unit(sc->sc_dev));
324 /* prepare sysctl tree for use in sub modules */
325 sysctl_ctx_init(&sc->sc_sysctl_ctx);
326 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
327 SYSCTL_STATIC_CHILDREN(_hw),
329 device_get_nameunit(sc->sc_dev),
333 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
334 sc->sc_eepromdata, &status);
336 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
342 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
344 sc->sc_debug = ath_debug;
348 * Setup the DMA/EDMA functions based on the current
351 * This is required before the descriptors are allocated.
353 if (ath_hal_hasedma(sc->sc_ah)) {
355 ath_recv_setup_edma(sc);
356 ath_xmit_setup_edma(sc);
358 ath_recv_setup_legacy(sc);
359 ath_xmit_setup_legacy(sc);
363 * Check if the MAC has multi-rate retry support.
364 * We do this by trying to setup a fake extended
365 * descriptor. MAC's that don't have support will
366 * return false w/o doing anything. MAC's that do
367 * support it will return true w/o doing anything.
369 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
372 * Check if the device has hardware counters for PHY
373 * errors. If so we need to enable the MIB interrupt
374 * so we can act on stat triggers.
376 if (ath_hal_hwphycounters(ah))
380 * Get the hardware key cache size.
382 sc->sc_keymax = ath_hal_keycachesize(ah);
383 if (sc->sc_keymax > ATH_KEYMAX) {
384 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
385 ATH_KEYMAX, sc->sc_keymax);
386 sc->sc_keymax = ATH_KEYMAX;
389 * Reset the key cache since some parts do not
390 * reset the contents on initial power up.
392 for (i = 0; i < sc->sc_keymax; i++)
393 ath_hal_keyreset(ah, i);
396 * Collect the default channel list.
398 error = ath_getchannels(sc);
403 * Setup rate tables for all potential media types.
405 ath_rate_setup(sc, IEEE80211_MODE_11A);
406 ath_rate_setup(sc, IEEE80211_MODE_11B);
407 ath_rate_setup(sc, IEEE80211_MODE_11G);
408 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
409 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
410 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
411 ath_rate_setup(sc, IEEE80211_MODE_11NA);
412 ath_rate_setup(sc, IEEE80211_MODE_11NG);
413 ath_rate_setup(sc, IEEE80211_MODE_HALF);
414 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
416 /* NB: setup here so ath_rate_update is happy */
417 ath_setcurmode(sc, IEEE80211_MODE_11A);
420 * Allocate TX descriptors and populate the lists.
422 wlan_assert_serialized();
423 wlan_serialize_exit();
424 error = ath_desc_alloc(sc);
425 wlan_serialize_enter();
427 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
431 error = ath_txdma_setup(sc);
433 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
439 * Allocate RX descriptors and populate the lists.
441 error = ath_rxdma_setup(sc);
443 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
448 callout_init_mp(&sc->sc_cal_ch);
449 callout_init_mp(&sc->sc_wd_ch);
451 ATH_TXBUF_LOCK_INIT(sc);
453 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT,
454 taskqueue_thread_enqueue, &sc->sc_tq);
455 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
456 "%s taskq", ifp->if_xname);
458 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
459 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
460 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
461 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
462 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
463 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
466 * Allocate hardware transmit queues: one queue for
467 * beacon frames and one data queue for each QoS
468 * priority. Note that the hal handles resetting
469 * these queues at the needed time.
473 sc->sc_bhalq = ath_beaconq_setup(sc);
474 if (sc->sc_bhalq == (u_int) -1) {
475 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
479 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
480 if (sc->sc_cabq == NULL) {
481 if_printf(ifp, "unable to setup CAB xmit queue!\n");
485 /* NB: insure BK queue is the lowest priority h/w queue */
486 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
487 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
488 ieee80211_wme_acnames[WME_AC_BK]);
492 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
493 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
494 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
496 * Not enough hardware tx queues to properly do WME;
497 * just punt and assign them all to the same h/w queue.
498 * We could do a better job of this if, for example,
499 * we allocate queues when we switch from station to
502 if (sc->sc_ac2q[WME_AC_VI] != NULL)
503 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
504 if (sc->sc_ac2q[WME_AC_BE] != NULL)
505 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
506 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
507 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
508 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
512 * Attach the TX completion function.
514 * The non-EDMA chips may have some special case optimisations;
515 * this method gives everyone a chance to attach cleanly.
517 sc->sc_tx.xmit_attach_comp_func(sc);
520 * Setup rate control. Some rate control modules
521 * call back to change the anntena state so expose
522 * the necessary entry points.
523 * XXX maybe belongs in struct ath_ratectrl?
525 sc->sc_setdefantenna = ath_setdefantenna;
526 sc->sc_rc = ath_rate_attach(sc);
527 if (sc->sc_rc == NULL) {
532 /* Attach DFS module */
533 if (! ath_dfs_attach(sc)) {
534 device_printf(sc->sc_dev,
535 "%s: unable to attach DFS\n", __func__);
540 /* Attach spectral module */
541 if (ath_spectral_attach(sc) < 0) {
542 device_printf(sc->sc_dev,
543 "%s: unable to attach spectral\n", __func__);
548 /* Attach bluetooth coexistence module */
549 if (ath_btcoex_attach(sc) < 0) {
550 device_printf(sc->sc_dev,
551 "%s: unable to attach bluetooth coexistence\n", __func__);
556 /* Attach LNA diversity module */
557 if (ath_lna_div_attach(sc) < 0) {
558 device_printf(sc->sc_dev,
559 "%s: unable to attach LNA diversity\n", __func__);
564 /* Start DFS processing tasklet */
565 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
567 /* Configure LED state */
570 sc->sc_ledon = 0; /* low true */
571 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
572 callout_init_mp(&sc->sc_ledtimer);
575 * Don't setup hardware-based blinking.
577 * Although some NICs may have this configured in the
578 * default reset register values, the user may wish
579 * to alter which pins have which function.
581 * The reference driver attaches the MAC network LED to GPIO1 and
582 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
583 * NIC has these reversed.
585 sc->sc_hardled = (1 == 0);
586 sc->sc_led_net_pin = -1;
587 sc->sc_led_pwr_pin = -1;
589 * Auto-enable soft led processing for IBM cards and for
590 * 5211 minipci cards. Users can also manually enable/disable
591 * support with a sysctl.
593 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
595 ath_hal_setledstate(ah, HAL_LED_INIT);
598 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
600 ifp->if_transmit = ath_transmit;
601 ifp->if_qflush = ath_qflush;
603 ifp->if_start = ath_start;
604 ifp->if_ioctl = ath_ioctl;
605 ifp->if_init = ath_init;
606 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
608 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
609 IFQ_SET_READY(&ifp->if_snd);
613 /* XXX not right but it's not used anywhere important */
614 ic->ic_phytype = IEEE80211_T_OFDM;
615 ic->ic_opmode = IEEE80211_M_STA;
617 IEEE80211_C_STA /* station mode */
618 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
619 | IEEE80211_C_HOSTAP /* hostap mode */
620 | IEEE80211_C_MONITOR /* monitor mode */
621 | IEEE80211_C_AHDEMO /* adhoc demo mode */
622 | IEEE80211_C_WDS /* 4-address traffic works */
623 | IEEE80211_C_MBSS /* mesh point link mode */
624 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
625 | IEEE80211_C_SHSLOT /* short slot time supported */
626 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
627 #ifndef ATH_ENABLE_11N
628 | IEEE80211_C_BGSCAN /* capable of bg scanning */
630 | IEEE80211_C_TXFRAG /* handle tx frags */
631 #ifdef ATH_ENABLE_DFS
632 | IEEE80211_C_DFS /* Enable radar detection */
636 * Query the hal to figure out h/w crypto support.
638 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
639 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
640 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
641 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
642 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
643 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
644 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
645 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
646 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
647 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
649 * Check if h/w does the MIC and/or whether the
650 * separate key cache entries are required to
651 * handle both tx+rx MIC keys.
653 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
654 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
656 * If the h/w supports storing tx+rx MIC keys
657 * in one cache slot automatically enable use.
659 if (ath_hal_hastkipsplit(ah) ||
660 !ath_hal_settkipsplit(ah, AH_FALSE))
663 * If the h/w can do TKIP MIC together with WME then
664 * we use it; otherwise we force the MIC to be done
665 * in software by the net80211 layer.
667 if (ath_hal_haswmetkipmic(ah))
668 sc->sc_wmetkipmic = 1;
670 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
672 * Check for multicast key search support.
674 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
675 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
676 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
678 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
680 * Mark key cache slots associated with global keys
681 * as in use. If we knew TKIP was not to be used we
682 * could leave the +32, +64, and +32+64 slots free.
684 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
685 setbit(sc->sc_keymap, i);
686 setbit(sc->sc_keymap, i+64);
687 if (sc->sc_splitmic) {
688 setbit(sc->sc_keymap, i+32);
689 setbit(sc->sc_keymap, i+32+64);
693 * TPC support can be done either with a global cap or
694 * per-packet support. The latter is not available on
695 * all parts. We're a bit pedantic here as all parts
696 * support a global cap.
698 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
699 ic->ic_caps |= IEEE80211_C_TXPMGT;
702 * Mark WME capability only if we have sufficient
703 * hardware queues to do proper priority scheduling.
705 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
706 ic->ic_caps |= IEEE80211_C_WME;
708 * Check for misc other capabilities.
710 if (ath_hal_hasbursting(ah))
711 ic->ic_caps |= IEEE80211_C_BURST;
712 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
713 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
714 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
715 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
716 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
717 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah);
718 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah);
719 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah);
721 if (ath_hal_hasfastframes(ah))
722 ic->ic_caps |= IEEE80211_C_FF;
723 wmodes = ath_hal_getwirelessmodes(ah);
724 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
725 ic->ic_caps |= IEEE80211_C_TURBOP;
726 #ifdef IEEE80211_SUPPORT_TDMA
727 if (ath_hal_macversion(ah) > 0x78) {
728 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
729 ic->ic_tdma_update = ath_tdma_update;
734 * TODO: enforce that at least this many frames are available
735 * in the txbuf list before allowing data frames (raw or
736 * otherwise) to be transmitted.
738 sc->sc_txq_data_minfree = 10;
740 * Leave this as default to maintain legacy behaviour.
741 * Shortening the cabq/mcastq may end up causing some
742 * undesirable behaviour.
744 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
747 * How deep can the node software TX queue get whilst it's asleep.
749 sc->sc_txq_node_psq_maxdepth = 16;
752 * Default the maximum queue depth for a given node
753 * to 1/4'th the TX buffers, or 64, whichever
756 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
758 /* Enable CABQ by default */
759 sc->sc_cabq_enable = 1;
762 * Allow the TX and RX chainmasks to be overridden by
763 * environment variables and/or device.hints.
765 * This must be done early - before the hardware is
766 * calibrated or before the 802.11n stream calculation
769 if (resource_int_value(device_get_name(sc->sc_dev),
770 device_get_unit(sc->sc_dev), "rx_chainmask",
771 &rx_chainmask) == 0) {
772 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
774 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
776 if (resource_int_value(device_get_name(sc->sc_dev),
777 device_get_unit(sc->sc_dev), "tx_chainmask",
778 &tx_chainmask) == 0) {
779 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
781 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
785 * Query the TX/RX chainmask configuration.
787 * This is only relevant for 11n devices.
789 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
790 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
793 * Disable MRR with protected frames by default.
794 * Only 802.11n series NICs can handle this.
796 sc->sc_mrrprot = 0; /* XXX should be a capability */
799 * Query the enterprise mode information the HAL.
801 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
802 &sc->sc_ent_cfg) == HAL_OK)
805 #ifdef ATH_ENABLE_11N
807 * Query HT capabilities
809 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
810 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
813 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
815 sc->sc_mrrprot = 1; /* XXX should be a capability */
817 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
818 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
819 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
820 | IEEE80211_HTCAP_MAXAMSDU_3839
821 /* max A-MSDU length */
822 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
826 * Enable short-GI for HT20 only if the hardware
827 * advertises support.
828 * Notably, anything earlier than the AR9287 doesn't.
830 if ((ath_hal_getcapability(ah,
831 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
832 (wmodes & HAL_MODE_HT20)) {
833 device_printf(sc->sc_dev,
834 "[HT] enabling short-GI in 20MHz mode\n");
835 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
838 if (wmodes & HAL_MODE_HT40)
839 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
840 | IEEE80211_HTCAP_SHORTGI40;
843 * TX/RX streams need to be taken into account when
844 * negotiating which MCS rates it'll receive and
845 * what MCS rates are available for TX.
847 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
848 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
849 ic->ic_txstream = txs;
850 ic->ic_rxstream = rxs;
853 * Setup TX and RX STBC based on what the HAL allows and
854 * the currently configured chainmask set.
855 * Ie - don't enable STBC TX if only one chain is enabled.
856 * STBC RX is fine on a single RX chain; it just won't
857 * provide any real benefit.
859 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
862 device_printf(sc->sc_dev,
863 "[HT] 1 stream STBC receive enabled\n");
864 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
866 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
869 device_printf(sc->sc_dev,
870 "[HT] 1 stream STBC transmit enabled\n");
871 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
874 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
875 &sc->sc_rts_aggr_limit);
876 if (sc->sc_rts_aggr_limit != (64 * 1024))
877 device_printf(sc->sc_dev,
878 "[HT] RTS aggregates limited to %d KiB\n",
879 sc->sc_rts_aggr_limit / 1024);
881 device_printf(sc->sc_dev,
882 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
887 * Initial aggregation settings.
889 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH;
890 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH;
891 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
892 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
893 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
894 sc->sc_delim_min_pad = 0;
897 * Check if the hardware requires PCI register serialisation.
898 * Some of the Owl based MACs require this.
901 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
902 0, NULL) == HAL_OK) {
903 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
904 device_printf(sc->sc_dev,
905 "Enabling register serialisation\n");
909 * Initialise the deferred completed RX buffer list.
911 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
912 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
915 * Indicate we need the 802.11 header padded to a
916 * 32-bit boundary for 4-address and QoS frames.
918 ic->ic_flags |= IEEE80211_F_DATAPAD;
921 * Query the hal about antenna support.
923 sc->sc_defant = ath_hal_getdefantenna(ah);
926 * Not all chips have the VEOL support we want to
927 * use with IBSS beacons; check here for it.
929 sc->sc_hasveol = ath_hal_hasveol(ah);
931 /* get mac address from hardware */
932 ath_hal_getmac(ah, macaddr);
934 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
936 /* NB: used to size node table key mapping array */
937 ic->ic_max_keyix = sc->sc_keymax;
938 /* call MI attach routine. */
939 ieee80211_ifattach(ic, macaddr);
940 ic->ic_setregdomain = ath_setregdomain;
941 ic->ic_getradiocaps = ath_getradiocaps;
942 sc->sc_opmode = HAL_M_STA;
944 /* override default methods */
945 ic->ic_newassoc = ath_newassoc;
946 ic->ic_updateslot = ath_updateslot;
947 ic->ic_wme.wme_update = ath_wme_update;
948 ic->ic_vap_create = ath_vap_create;
949 ic->ic_vap_delete = ath_vap_delete;
950 ic->ic_raw_xmit = ath_raw_xmit;
951 ic->ic_update_mcast = ath_update_mcast;
952 ic->ic_update_promisc = ath_update_promisc;
953 ic->ic_node_alloc = ath_node_alloc;
954 sc->sc_node_free = ic->ic_node_free;
955 ic->ic_node_free = ath_node_free;
956 sc->sc_node_cleanup = ic->ic_node_cleanup;
957 ic->ic_node_cleanup = ath_node_cleanup;
958 ic->ic_node_getsignal = ath_node_getsignal;
959 ic->ic_scan_start = ath_scan_start;
960 ic->ic_scan_end = ath_scan_end;
961 ic->ic_set_channel = ath_set_channel;
962 #ifdef ATH_ENABLE_11N
963 /* 802.11n specific - but just override anyway */
964 sc->sc_addba_request = ic->ic_addba_request;
965 sc->sc_addba_response = ic->ic_addba_response;
966 sc->sc_addba_stop = ic->ic_addba_stop;
967 sc->sc_bar_response = ic->ic_bar_response;
968 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
970 ic->ic_addba_request = ath_addba_request;
971 ic->ic_addba_response = ath_addba_response;
972 ic->ic_addba_response_timeout = ath_addba_response_timeout;
973 ic->ic_addba_stop = ath_addba_stop;
974 ic->ic_bar_response = ath_bar_response;
976 ic->ic_update_chw = ath_update_chw;
977 #endif /* ATH_ENABLE_11N */
979 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
981 * There's one vendor bitmap entry in the RX radiotap
982 * header; make sure that's taken into account.
984 ieee80211_radiotap_attachv(ic,
985 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
986 ATH_TX_RADIOTAP_PRESENT,
987 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
988 ATH_RX_RADIOTAP_PRESENT);
991 * No vendor bitmap/extensions are present.
993 ieee80211_radiotap_attach(ic,
994 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
995 ATH_TX_RADIOTAP_PRESENT,
996 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
997 ATH_RX_RADIOTAP_PRESENT);
998 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
1001 * Setup the ALQ logging if required
1003 #ifdef ATH_DEBUG_ALQ
1004 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
1005 if_ath_alq_setcfg(&sc->sc_alq,
1006 sc->sc_ah->ah_macVersion,
1007 sc->sc_ah->ah_macRev,
1008 sc->sc_ah->ah_phyRev,
1009 sc->sc_ah->ah_magic);
1013 * Setup dynamic sysctl's now that country code and
1014 * regdomain are available from the hal.
1016 ath_sysctlattach(sc);
1017 ath_sysctl_stats_attach(sc);
1018 ath_sysctl_hal_attach(sc);
1021 ieee80211_announce(ic);
1027 ath_txdma_teardown(sc);
1028 ath_rxdma_teardown(sc);
1034 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
1036 #if !defined(__DragonFly__)
1037 if (ifp != NULL && ifp->if_vnet) {
1038 CURVNET_SET(ifp->if_vnet);
1050 ath_detach(struct ath_softc *sc)
1052 struct ifnet *ifp = sc->sc_ifp;
1054 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1055 __func__, ifp->if_flags);
1058 * NB: the order of these is important:
1059 * o stop the chip so no more interrupts will fire
1060 * o call the 802.11 layer before detaching the hal to
1061 * insure callbacks into the driver to delete global
1062 * key cache entries can be handled
1063 * o free the taskqueue which drains any pending tasks
1064 * o reclaim the tx queue data structures after calling
1065 * the 802.11 layer as we'll get called back to reclaim
1066 * node state and potentially want to use them
1067 * o to cleanup the tx queues the hal is called, so detach
1069 * Other than that, it's straightforward...
1072 ieee80211_ifdetach(ifp->if_l2com);
1073 taskqueue_free(sc->sc_tq);
1074 #ifdef ATH_TX99_DIAG
1075 if (sc->sc_tx99 != NULL)
1076 sc->sc_tx99->detach(sc->sc_tx99);
1078 ath_rate_detach(sc->sc_rc);
1079 #ifdef ATH_DEBUG_ALQ
1080 if_ath_alq_tidyup(&sc->sc_alq);
1082 ath_lna_div_detach(sc);
1083 ath_btcoex_detach(sc);
1084 ath_spectral_detach(sc);
1087 ath_txdma_teardown(sc);
1088 ath_rxdma_teardown(sc);
1090 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
1092 CURVNET_SET(ifp->if_vnet);
1096 if (sc->sc_sysctl_tree) {
1097 sysctl_ctx_free(&sc->sc_sysctl_ctx);
1098 sc->sc_sysctl_tree = NULL;
1105 * MAC address handling for multiple BSS on the same radio.
1106 * The first vap uses the MAC address from the EEPROM. For
1107 * subsequent vap's we set the U/L bit (bit 1) in the MAC
1108 * address and use the next six bits as an index.
1111 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
1115 if (clone && sc->sc_hasbmask) {
1116 /* NB: we only do this if h/w supports multiple bssid */
1117 for (i = 0; i < 8; i++)
1118 if ((sc->sc_bssidmask & (1<<i)) == 0)
1121 mac[0] |= (i << 2)|0x2;
1124 sc->sc_bssidmask |= 1<<i;
1125 sc->sc_hwbssidmask[0] &= ~mac[0];
1131 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
1133 int i = mac[0] >> 2;
1136 if (i != 0 || --sc->sc_nbssid0 == 0) {
1137 sc->sc_bssidmask &= ~(1<<i);
1138 /* recalculate bssid mask from remaining addresses */
1140 for (i = 1; i < 8; i++)
1141 if (sc->sc_bssidmask & (1<<i))
1142 mask &= ~((i<<2)|0x2);
1143 sc->sc_hwbssidmask[0] |= mask;
1148 * Assign a beacon xmit slot. We try to space out
1149 * assignments so when beacons are staggered the
1150 * traffic coming out of the cab q has maximal time
1151 * to go out before the next beacon is scheduled.
1154 assign_bslot(struct ath_softc *sc)
1159 for (slot = 0; slot < ATH_BCBUF; slot++)
1160 if (sc->sc_bslot[slot] == NULL) {
1161 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1162 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1165 /* NB: keep looking for a double slot */
1170 static struct ieee80211vap *
1171 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1172 enum ieee80211_opmode opmode, int flags,
1173 const uint8_t bssid[IEEE80211_ADDR_LEN],
1174 const uint8_t mac0[IEEE80211_ADDR_LEN])
1176 struct ath_softc *sc = ic->ic_ifp->if_softc;
1177 struct ath_vap *avp;
1178 struct ieee80211vap *vap;
1179 uint8_t mac[IEEE80211_ADDR_LEN];
1180 int needbeacon, error;
1181 enum ieee80211_opmode ic_opmode;
1183 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap),
1184 M_80211_VAP, M_WAITOK | M_ZERO);
1186 IEEE80211_ADDR_COPY(mac, mac0);
1189 ic_opmode = opmode; /* default to opmode of new vap */
1191 case IEEE80211_M_STA:
1192 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1193 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1198 * With multiple vaps we must fall back
1199 * to s/w beacon miss handling.
1201 flags |= IEEE80211_CLONE_NOBEACONS;
1203 if (flags & IEEE80211_CLONE_NOBEACONS) {
1205 * Station mode w/o beacons are implemented w/ AP mode.
1207 ic_opmode = IEEE80211_M_HOSTAP;
1210 case IEEE80211_M_IBSS:
1211 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1212 device_printf(sc->sc_dev,
1213 "only 1 ibss vap supported\n");
1218 case IEEE80211_M_AHDEMO:
1219 #ifdef IEEE80211_SUPPORT_TDMA
1220 if (flags & IEEE80211_CLONE_TDMA) {
1221 if (sc->sc_nvaps != 0) {
1222 device_printf(sc->sc_dev,
1223 "only 1 tdma vap supported\n");
1227 flags |= IEEE80211_CLONE_NOBEACONS;
1231 case IEEE80211_M_MONITOR:
1232 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1234 * Adopt existing mode. Adding a monitor or ahdemo
1235 * vap to an existing configuration is of dubious
1236 * value but should be ok.
1238 /* XXX not right for monitor mode */
1239 ic_opmode = ic->ic_opmode;
1242 case IEEE80211_M_HOSTAP:
1243 case IEEE80211_M_MBSS:
1246 case IEEE80211_M_WDS:
1247 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1248 device_printf(sc->sc_dev,
1249 "wds not supported in sta mode\n");
1253 * Silently remove any request for a unique
1254 * bssid; WDS vap's always share the local
1257 flags &= ~IEEE80211_CLONE_BSSID;
1258 if (sc->sc_nvaps == 0)
1259 ic_opmode = IEEE80211_M_HOSTAP;
1261 ic_opmode = ic->ic_opmode;
1264 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1268 * Check that a beacon buffer is available; the code below assumes it.
1270 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1271 device_printf(sc->sc_dev, "no beacon buffer available\n");
1276 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1277 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1278 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1282 /* XXX can't hold mutex across if_alloc */
1284 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1288 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1293 /* h/w crypto support */
1294 vap->iv_key_alloc = ath_key_alloc;
1295 vap->iv_key_delete = ath_key_delete;
1296 vap->iv_key_set = ath_key_set;
1297 vap->iv_key_update_begin = ath_key_update_begin;
1298 vap->iv_key_update_end = ath_key_update_end;
1300 /* override various methods */
1301 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1302 vap->iv_recv_mgmt = ath_recv_mgmt;
1303 vap->iv_reset = ath_reset_vap;
1304 vap->iv_update_beacon = ath_beacon_update;
1305 avp->av_newstate = vap->iv_newstate;
1306 vap->iv_newstate = ath_newstate;
1307 avp->av_bmiss = vap->iv_bmiss;
1308 vap->iv_bmiss = ath_bmiss_vap;
1311 avp->av_node_ps = vap->iv_node_ps;
1312 vap->iv_node_ps = ath_node_powersave;
1315 avp->av_set_tim = vap->iv_set_tim;
1316 vap->iv_set_tim = ath_node_set_tim;
1319 avp->av_recv_pspoll = vap->iv_recv_pspoll;
1320 vap->iv_recv_pspoll = ath_node_recv_pspoll;
1323 /* Set default parameters */
1326 * Anything earlier than some AR9300 series MACs don't
1327 * support a smaller MPDU density.
1329 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1331 * All NICs can handle the maximum size, however
1332 * AR5416 based MACs can only TX aggregates w/ RTS
1333 * protection when the total aggregate size is <= 8k.
1334 * However, for now that's enforced by the TX path.
1336 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1341 * Allocate beacon state and setup the q for buffered
1342 * multicast frames. We know a beacon buffer is
1343 * available because we checked above.
1345 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1346 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1347 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1349 * Assign the vap to a beacon xmit slot. As above
1350 * this cannot fail to find a free one.
1352 avp->av_bslot = assign_bslot(sc);
1353 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1354 ("beacon slot %u not empty", avp->av_bslot));
1355 sc->sc_bslot[avp->av_bslot] = vap;
1358 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1360 * Multple vaps are to transmit beacons and we
1361 * have h/w support for TSF adjusting; enable
1362 * use of staggered beacons.
1364 sc->sc_stagbeacons = 1;
1366 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1369 ic->ic_opmode = ic_opmode;
1370 if (opmode != IEEE80211_M_WDS) {
1372 if (opmode == IEEE80211_M_STA)
1374 if (opmode == IEEE80211_M_MBSS)
1377 switch (ic_opmode) {
1378 case IEEE80211_M_IBSS:
1379 sc->sc_opmode = HAL_M_IBSS;
1381 case IEEE80211_M_STA:
1382 sc->sc_opmode = HAL_M_STA;
1384 case IEEE80211_M_AHDEMO:
1385 #ifdef IEEE80211_SUPPORT_TDMA
1386 if (vap->iv_caps & IEEE80211_C_TDMA) {
1388 /* NB: disable tsf adjust */
1389 sc->sc_stagbeacons = 0;
1392 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1397 case IEEE80211_M_HOSTAP:
1398 case IEEE80211_M_MBSS:
1399 sc->sc_opmode = HAL_M_HOSTAP;
1401 case IEEE80211_M_MONITOR:
1402 sc->sc_opmode = HAL_M_MONITOR;
1405 /* XXX should not happen */
1408 if (sc->sc_hastsfadd) {
1410 * Configure whether or not TSF adjust should be done.
1412 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1414 if (flags & IEEE80211_CLONE_NOBEACONS) {
1416 * Enable s/w beacon miss handling.
1422 /* complete setup */
1423 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1426 reclaim_address(sc, mac);
1427 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1429 kfree(avp, M_80211_VAP);
1435 ath_vap_delete(struct ieee80211vap *vap)
1437 struct ieee80211com *ic = vap->iv_ic;
1438 struct ifnet *ifp = ic->ic_ifp;
1439 struct ath_softc *sc = ifp->if_softc;
1440 struct ath_hal *ah = sc->sc_ah;
1441 struct ath_vap *avp = ATH_VAP(vap);
1443 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1444 if (ifp->if_flags & IFF_RUNNING) {
1446 * Quiesce the hardware while we remove the vap. In
1447 * particular we need to reclaim all references to
1448 * the vap state by any frames pending on the tx queues.
1450 ath_hal_intrset(ah, 0); /* disable interrupts */
1451 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1452 /* XXX Do all frames from all vaps/nodes need draining here? */
1453 ath_stoprecv(sc, 1); /* stop recv side */
1456 ieee80211_vap_detach(vap);
1459 * XXX Danger Will Robinson! Danger!
1461 * Because ieee80211_vap_detach() can queue a frame (the station
1462 * diassociate message?) after we've drained the TXQ and
1463 * flushed the software TXQ, we will end up with a frame queued
1464 * to a node whose vap is about to be freed.
1466 * To work around this, flush the hardware/software again.
1467 * This may be racy - the ath task may be running and the packet
1468 * may be being scheduled between sw->hw txq. Tsk.
1470 * TODO: figure out why a new node gets allocated somewhere around
1471 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1475 ath_draintxq(sc, ATH_RESET_DEFAULT);
1479 * Reclaim beacon state. Note this must be done before
1480 * the vap instance is reclaimed as we may have a reference
1481 * to it in the buffer for the beacon frame.
1483 if (avp->av_bcbuf != NULL) {
1484 if (avp->av_bslot != -1) {
1485 sc->sc_bslot[avp->av_bslot] = NULL;
1488 ath_beacon_return(sc, avp->av_bcbuf);
1489 avp->av_bcbuf = NULL;
1490 if (sc->sc_nbcnvaps == 0) {
1491 sc->sc_stagbeacons = 0;
1492 if (sc->sc_hastsfadd)
1493 ath_hal_settsfadjust(sc->sc_ah, 0);
1496 * Reclaim any pending mcast frames for the vap.
1498 ath_tx_draintxq(sc, &avp->av_mcastq);
1501 * Update bookkeeping.
1503 if (vap->iv_opmode == IEEE80211_M_STA) {
1505 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1507 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1508 vap->iv_opmode == IEEE80211_M_MBSS) {
1509 reclaim_address(sc, vap->iv_myaddr);
1510 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1511 if (vap->iv_opmode == IEEE80211_M_MBSS)
1514 if (vap->iv_opmode != IEEE80211_M_WDS)
1516 #ifdef IEEE80211_SUPPORT_TDMA
1517 /* TDMA operation ceases when the last vap is destroyed */
1518 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1523 kfree(avp, M_80211_VAP);
1525 if (ifp->if_flags & IFF_RUNNING) {
1527 * Restart rx+tx machines if still running (RUNNING will
1528 * be reset if we just destroyed the last vap).
1530 if (ath_startrecv(sc) != 0)
1531 if_printf(ifp, "%s: unable to restart recv logic\n",
1533 if (sc->sc_beacons) { /* restart beacons */
1534 #ifdef IEEE80211_SUPPORT_TDMA
1536 ath_tdma_config(sc, NULL);
1539 ath_beacon_config(sc, NULL);
1541 ath_hal_intrset(ah, sc->sc_imask);
1547 ath_suspend(struct ath_softc *sc)
1549 struct ifnet *ifp = sc->sc_ifp;
1550 struct ieee80211com *ic = ifp->if_l2com;
1552 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1553 __func__, ifp->if_flags);
1555 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1557 ieee80211_suspend_all(ic);
1559 * NB: don't worry about putting the chip in low power
1560 * mode; pci will power off our socket on suspend and
1561 * CardBus detaches the device.
1565 * XXX ensure none of the taskqueues are running
1566 * XXX ensure sc_invalid is 1
1567 * XXX ensure the calibration callout is disabled
1570 /* Disable the PCIe PHY, complete with workarounds */
1571 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1575 * Reset the key cache since some parts do not reset the
1576 * contents on resume. First we clear all entries, then
1577 * re-load keys that the 802.11 layer assumes are setup
1581 ath_reset_keycache(struct ath_softc *sc)
1583 struct ifnet *ifp = sc->sc_ifp;
1584 struct ieee80211com *ic = ifp->if_l2com;
1585 struct ath_hal *ah = sc->sc_ah;
1588 for (i = 0; i < sc->sc_keymax; i++)
1589 ath_hal_keyreset(ah, i);
1590 ieee80211_crypto_reload_keys(ic);
1594 * Fetch the current chainmask configuration based on the current
1595 * operating channel and options.
1598 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
1602 * Set TX chainmask to the currently configured chainmask;
1603 * the TX chainmask depends upon the current operating mode.
1605 sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
1606 if (IEEE80211_IS_CHAN_HT(chan)) {
1607 sc->sc_cur_txchainmask = sc->sc_txchainmask;
1609 sc->sc_cur_txchainmask = 1;
1612 DPRINTF(sc, ATH_DEBUG_RESET,
1613 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n",
1615 sc->sc_cur_txchainmask,
1616 sc->sc_cur_rxchainmask);
1620 ath_resume(struct ath_softc *sc)
1622 struct ifnet *ifp = sc->sc_ifp;
1623 struct ieee80211com *ic = ifp->if_l2com;
1624 struct ath_hal *ah = sc->sc_ah;
1627 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1628 __func__, ifp->if_flags);
1630 /* Re-enable PCIe, re-enable the PCIe bus */
1631 ath_hal_enablepcie(ah, 0, 0);
1634 * Must reset the chip before we reload the
1635 * keycache as we were powered down on suspend.
1637 ath_update_chainmasks(sc,
1638 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
1639 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1640 sc->sc_cur_rxchainmask);
1641 ath_hal_reset(ah, sc->sc_opmode,
1642 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1644 ath_reset_keycache(sc);
1646 /* Let DFS at it in case it's a DFS channel */
1647 ath_dfs_radar_enable(sc, ic->ic_curchan);
1649 /* Let spectral at in case spectral is enabled */
1650 ath_spectral_enable(sc, ic->ic_curchan);
1653 * Let bluetooth coexistence at in case it's needed for this channel
1655 ath_btcoex_enable(sc, ic->ic_curchan);
1658 * If we're doing TDMA, enforce the TXOP limitation for chips that
1661 if (sc->sc_hasenforcetxop && sc->sc_tdma)
1662 ath_hal_setenforcetxop(sc->sc_ah, 1);
1664 ath_hal_setenforcetxop(sc->sc_ah, 0);
1666 /* Restore the LED configuration */
1668 ath_hal_setledstate(ah, HAL_LED_INIT);
1670 if (sc->sc_resume_up)
1671 ieee80211_resume_all(ic);
1677 ath_shutdown(struct ath_softc *sc)
1679 struct ifnet *ifp = sc->sc_ifp;
1681 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1682 __func__, ifp->if_flags);
1685 /* NB: no point powering down chip as we're about to reboot */
1689 * Interrupt handler. Most of the actual processing is deferred.
1694 struct ath_softc *sc = arg;
1695 struct ifnet *ifp = sc->sc_ifp;
1696 struct ath_hal *ah = sc->sc_ah;
1701 * If we're inside a reset path, just print a warning and
1702 * clear the ISR. The reset routine will finish it for us.
1705 if (sc->sc_inreset_cnt) {
1707 ath_hal_getisr(ah, &status); /* clear ISR */
1708 ath_hal_intrset(ah, 0); /* disable further intr's */
1709 DPRINTF(sc, ATH_DEBUG_ANY,
1710 "%s: in reset, ignoring: status=0x%x\n",
1716 if (sc->sc_invalid) {
1718 * The hardware is not ready/present, don't touch anything.
1719 * Note this can happen early on if the IRQ is shared.
1721 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1725 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1730 if ((ifp->if_flags & IFF_UP) == 0 ||
1731 (ifp->if_flags & IFF_RUNNING) == 0) {
1734 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1735 __func__, ifp->if_flags);
1736 ath_hal_getisr(ah, &status); /* clear ISR */
1737 ath_hal_intrset(ah, 0); /* disable further intr's */
1743 * Figure out the reason(s) for the interrupt. Note
1744 * that the hal returns a pseudo-ISR that may include
1745 * bits we haven't explicitly enabled so we mask the
1746 * value to insure we only process bits we requested.
1748 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1749 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1750 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
1751 #ifdef ATH_DEBUG_ALQ
1752 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
1754 #endif /* ATH_DEBUG_ALQ */
1755 #ifdef ATH_KTR_INTR_DEBUG
1756 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
1757 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1758 ah->ah_intrstate[0],
1759 ah->ah_intrstate[1],
1760 ah->ah_intrstate[2],
1761 ah->ah_intrstate[3],
1762 ah->ah_intrstate[6]);
1765 /* Squirrel away SYNC interrupt debugging */
1766 if (ah->ah_syncstate != 0) {
1768 for (i = 0; i < 32; i++)
1769 if (ah->ah_syncstate & (i << i))
1770 sc->sc_intr_stats.sync_intr[i]++;
1773 status &= sc->sc_imask; /* discard unasked for bits */
1775 /* Short-circuit un-handled interrupts */
1776 if (status == 0x0) {
1782 * Take a note that we're inside the interrupt handler, so
1783 * the reset routines know to wait.
1789 * Handle the interrupt. We won't run concurrent with the reset
1790 * or channel change routines as they'll wait for sc_intr_cnt
1791 * to be 0 before continuing.
1793 if (status & HAL_INT_FATAL) {
1794 sc->sc_stats.ast_hardware++;
1795 ath_hal_intrset(ah, 0); /* disable intr's until reset */
1796 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
1798 if (status & HAL_INT_SWBA) {
1800 * Software beacon alert--time to send a beacon.
1801 * Handle beacon transmission directly; deferring
1802 * this is too slow to meet timing constraints
1805 #ifdef IEEE80211_SUPPORT_TDMA
1807 if (sc->sc_tdmaswba == 0) {
1808 struct ieee80211com *ic = ifp->if_l2com;
1809 struct ieee80211vap *vap =
1810 TAILQ_FIRST(&ic->ic_vaps);
1811 ath_tdma_beacon_send(sc, vap);
1813 vap->iv_tdma->tdma_bintval;
1819 ath_beacon_proc(sc, 0);
1820 #ifdef IEEE80211_SUPPORT_SUPERG
1822 * Schedule the rx taskq in case there's no
1823 * traffic so any frames held on the staging
1824 * queue are aged and potentially flushed.
1826 sc->sc_rx.recv_sched(sc, 1);
1830 if (status & HAL_INT_RXEOL) {
1832 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
1835 * NB: the hardware should re-read the link when
1836 * RXE bit is written, but it doesn't work at
1837 * least on older hardware revs.
1839 sc->sc_stats.ast_rxeol++;
1841 * Disable RXEOL/RXORN - prevent an interrupt
1842 * storm until the PCU logic can be reset.
1843 * In case the interface is reset some other
1844 * way before "sc_kickpcu" is called, don't
1845 * modify sc_imask - that way if it is reset
1846 * by a call to ath_reset() somehow, the
1847 * interrupt mask will be correctly reprogrammed.
1849 imask = sc->sc_imask;
1850 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1851 ath_hal_intrset(ah, imask);
1853 * Only blank sc_rxlink if we've not yet kicked
1856 * This isn't entirely correct - the correct solution
1857 * would be to have a PCU lock and engage that for
1858 * the duration of the PCU fiddling; which would include
1859 * running the RX process. Otherwise we could end up
1860 * messing up the RX descriptor chain and making the
1861 * RX desc list much shorter.
1863 if (! sc->sc_kickpcu)
1864 sc->sc_rxlink = NULL;
1868 * Enqueue an RX proc, to handled whatever
1869 * is in the RX queue.
1870 * This will then kick the PCU.
1872 sc->sc_rx.recv_sched(sc, 1);
1874 if (status & HAL_INT_TXURN) {
1875 sc->sc_stats.ast_txurn++;
1876 /* bump tx trigger level */
1877 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1880 * Handle both the legacy and RX EDMA interrupt bits.
1881 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
1883 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
1884 sc->sc_stats.ast_rx_intr++;
1885 sc->sc_rx.recv_sched(sc, 1);
1887 if (status & HAL_INT_TX) {
1888 sc->sc_stats.ast_tx_intr++;
1890 * Grab all the currently set bits in the HAL txq bitmap
1891 * and blank them. This is the only place we should be
1894 if (! sc->sc_isedma) {
1897 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1898 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
1899 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
1902 sc->sc_txq_active | txqs);
1903 sc->sc_txq_active |= txqs;
1906 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1908 if (status & HAL_INT_BMISS) {
1909 sc->sc_stats.ast_bmiss++;
1910 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1912 if (status & HAL_INT_GTT)
1913 sc->sc_stats.ast_tx_timeout++;
1914 if (status & HAL_INT_CST)
1915 sc->sc_stats.ast_tx_cst++;
1916 if (status & HAL_INT_MIB) {
1917 sc->sc_stats.ast_mib++;
1920 * Disable interrupts until we service the MIB
1921 * interrupt; otherwise it will continue to fire.
1923 ath_hal_intrset(ah, 0);
1925 * Let the hal handle the event. We assume it will
1926 * clear whatever condition caused the interrupt.
1928 ath_hal_mibevent(ah, &sc->sc_halstats);
1930 * Don't reset the interrupt if we've just
1931 * kicked the PCU, or we may get a nested
1932 * RXEOL before the rxproc has had a chance
1935 if (sc->sc_kickpcu == 0)
1936 ath_hal_intrset(ah, sc->sc_imask);
1939 if (status & HAL_INT_RXORN) {
1940 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1941 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
1942 sc->sc_stats.ast_rxorn++;
1951 ath_fatal_proc(void *arg, int pending)
1953 struct ath_softc *sc = arg;
1954 struct ifnet *ifp = sc->sc_ifp;
1959 if_printf(ifp, "hardware error; resetting\n");
1961 * Fatal errors are unrecoverable. Typically these
1962 * are caused by DMA errors. Collect h/w state from
1963 * the hal so we can diagnose what's going on.
1965 wlan_serialize_enter();
1966 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1967 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1969 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1970 state[0], state[1] , state[2], state[3],
1971 state[4], state[5]);
1973 ath_reset(ifp, ATH_RESET_NOLOSS);
1974 wlan_serialize_exit();
1978 ath_bmiss_vap(struct ieee80211vap *vap)
1981 * Workaround phantom bmiss interrupts by sanity-checking
1982 * the time of our last rx'd frame. If it is within the
1983 * beacon miss interval then ignore the interrupt. If it's
1984 * truly a bmiss we'll get another interrupt soon and that'll
1985 * be dispatched up for processing. Note this applies only
1986 * for h/w beacon miss events.
1988 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1989 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1990 struct ath_softc *sc = ifp->if_softc;
1991 u_int64_t lastrx = sc->sc_lastrx;
1992 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1993 /* XXX should take a locked ref to iv_bss */
1994 u_int bmisstimeout =
1995 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1997 DPRINTF(sc, ATH_DEBUG_BEACON,
1998 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1999 __func__, (unsigned long long) tsf,
2000 (unsigned long long)(tsf - lastrx),
2001 (unsigned long long) lastrx, bmisstimeout);
2003 if (tsf - lastrx <= bmisstimeout) {
2004 sc->sc_stats.ast_bmiss_phantom++;
2008 ATH_VAP(vap)->av_bmiss(vap);
2012 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
2017 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
2019 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
2020 *hangs = *(uint32_t *)sp;
2025 ath_bmiss_proc(void *arg, int pending)
2027 struct ath_softc *sc = arg;
2028 struct ifnet *ifp = sc->sc_ifp;
2031 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
2034 * Do a reset upon any becaon miss event.
2036 * It may be a non-recognised RX clear hang which needs a reset
2039 wlan_serialize_enter();
2040 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
2041 ath_reset(ifp, ATH_RESET_NOLOSS);
2042 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
2044 ath_reset(ifp, ATH_RESET_NOLOSS);
2045 ieee80211_beacon_miss(ifp->if_l2com);
2047 wlan_serialize_exit();
2051 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
2052 * calcs together with WME. If necessary disable the crypto
2053 * hardware and mark the 802.11 state so keys will be setup
2054 * with the MIC work done in software.
2057 ath_settkipmic(struct ath_softc *sc)
2059 struct ifnet *ifp = sc->sc_ifp;
2060 struct ieee80211com *ic = ifp->if_l2com;
2062 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
2063 if (ic->ic_flags & IEEE80211_F_WME) {
2064 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
2065 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
2067 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
2068 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
2076 struct ath_softc *sc = (struct ath_softc *) arg;
2077 struct ifnet *ifp = sc->sc_ifp;
2078 struct ieee80211com *ic = ifp->if_l2com;
2079 struct ath_hal *ah = sc->sc_ah;
2082 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
2083 __func__, ifp->if_flags);
2087 * Stop anything previously setup. This is safe
2088 * whether this is the first time through or not.
2090 ath_stop_locked(ifp);
2093 * The basic interface to setting the hardware in a good
2094 * state is ``reset''. On return the hardware is known to
2095 * be powered up and with interrupts disabled. This must
2096 * be followed by initialization of the appropriate bits
2097 * and then setup of the interrupt mask.
2100 ath_update_chainmasks(sc, ic->ic_curchan);
2101 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2102 sc->sc_cur_rxchainmask);
2103 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
2104 if_printf(ifp, "unable to reset hardware; hal status %u\n",
2109 ath_chan_change(sc, ic->ic_curchan);
2111 /* Let DFS at it in case it's a DFS channel */
2112 ath_dfs_radar_enable(sc, ic->ic_curchan);
2114 /* Let spectral at in case spectral is enabled */
2115 ath_spectral_enable(sc, ic->ic_curchan);
2118 * Let bluetooth coexistence at in case it's needed for this channel
2120 ath_btcoex_enable(sc, ic->ic_curchan);
2123 * If we're doing TDMA, enforce the TXOP limitation for chips that
2126 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2127 ath_hal_setenforcetxop(sc->sc_ah, 1);
2129 ath_hal_setenforcetxop(sc->sc_ah, 0);
2132 * Likewise this is set during reset so update
2133 * state cached in the driver.
2135 sc->sc_diversity = ath_hal_getdiversity(ah);
2136 sc->sc_lastlongcal = 0;
2137 sc->sc_resetcal = 1;
2138 sc->sc_lastcalreset = 0;
2140 sc->sc_lastshortcal = 0;
2141 sc->sc_doresetcal = AH_FALSE;
2143 * Beacon timers were cleared here; give ath_newstate()
2144 * a hint that the beacon timers should be poked when
2145 * things transition to the RUN state.
2150 * Setup the hardware after reset: the key cache
2151 * is filled as needed and the receive engine is
2152 * set going. Frame transmit is handled entirely
2153 * in the frame output path; there's nothing to do
2154 * here except setup the interrupt mask.
2156 if (ath_startrecv(sc) != 0) {
2157 if_printf(ifp, "unable to start recv logic\n");
2163 * Enable interrupts.
2165 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
2166 | HAL_INT_RXEOL | HAL_INT_RXORN
2168 | HAL_INT_FATAL | HAL_INT_GLOBAL;
2171 * Enable RX EDMA bits. Note these overlap with
2172 * HAL_INT_RX and HAL_INT_RXDESC respectively.
2175 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
2178 * Enable MIB interrupts when there are hardware phy counters.
2179 * Note we only do this (at the moment) for station mode.
2181 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
2182 sc->sc_imask |= HAL_INT_MIB;
2184 /* Enable global TX timeout and carrier sense timeout if available */
2185 if (ath_hal_gtxto_supported(ah))
2186 sc->sc_imask |= HAL_INT_GTT;
2188 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
2189 __func__, sc->sc_imask);
2191 ifp->if_flags |= IFF_RUNNING;
2192 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
2193 ath_hal_intrset(ah, sc->sc_imask);
2197 #ifdef ATH_TX99_DIAG
2198 if (sc->sc_tx99 != NULL)
2199 sc->sc_tx99->start(sc->sc_tx99);
2202 ieee80211_start_all(ic); /* start all vap's */
2206 ath_stop_locked(struct ifnet *ifp)
2208 struct ath_softc *sc = ifp->if_softc;
2209 struct ath_hal *ah = sc->sc_ah;
2211 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
2212 __func__, sc->sc_invalid, ifp->if_flags);
2214 ATH_LOCK_ASSERT(sc);
2215 if (ifp->if_flags & IFF_RUNNING) {
2217 * Shutdown the hardware and driver:
2218 * reset 802.11 state machine
2220 * disable interrupts
2221 * turn off the radio
2222 * clear transmit machinery
2223 * clear receive machinery
2224 * drain and release tx queues
2225 * reclaim beacon resources
2226 * power down hardware
2228 * Note that some of this work is not possible if the
2229 * hardware is gone (invalid).
2231 #ifdef ATH_TX99_DIAG
2232 if (sc->sc_tx99 != NULL)
2233 sc->sc_tx99->stop(sc->sc_tx99);
2235 callout_stop(&sc->sc_wd_ch);
2236 sc->sc_wd_timer = 0;
2237 ifp->if_flags &= ~IFF_RUNNING;
2238 if (!sc->sc_invalid) {
2239 if (sc->sc_softled) {
2240 callout_stop(&sc->sc_ledtimer);
2241 ath_hal_gpioset(ah, sc->sc_ledpin,
2243 sc->sc_blinking = 0;
2245 ath_hal_intrset(ah, 0);
2247 ath_draintxq(sc, ATH_RESET_DEFAULT);
2248 if (!sc->sc_invalid) {
2249 ath_stoprecv(sc, 1);
2250 ath_hal_phydisable(ah);
2252 sc->sc_rxlink = NULL;
2253 ath_beacon_free(sc); /* XXX not needed */
2257 #define MAX_TXRX_ITERATIONS 1000
2259 ath_txrx_stop_locked(struct ath_softc *sc)
2261 int i = MAX_TXRX_ITERATIONS;
2263 ATH_UNLOCK_ASSERT(sc);
2264 ATH_PCU_LOCK_ASSERT(sc);
2267 * Sleep until all the pending operations have completed.
2269 * The caller must ensure that reset has been incremented
2270 * or the pending operations may continue being queued.
2272 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2273 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2276 wlan_serialize_sleep(sc, 0, "ath_txrx_stop", 1);
2281 device_printf(sc->sc_dev,
2282 "%s: didn't finish after %d iterations\n",
2283 __func__, MAX_TXRX_ITERATIONS);
2285 #undef MAX_TXRX_ITERATIONS
2289 ath_txrx_stop(struct ath_softc *sc)
2291 ATH_UNLOCK_ASSERT(sc);
2292 ATH_PCU_UNLOCK_ASSERT(sc);
2295 ath_txrx_stop_locked(sc);
2301 ath_txrx_start(struct ath_softc *sc)
2304 taskqueue_unblock(sc->sc_tq);
2308 * Grab the reset lock, and wait around until noone else
2309 * is trying to do anything with it.
2311 * This is totally horrible but we can't hold this lock for
2312 * long enough to do TX/RX or we end up with net80211/ip stack
2313 * LORs and eventual deadlock.
2315 * "dowait" signals whether to spin, waiting for the reset
2316 * lock count to reach 0. This should (for now) only be used
2317 * during the reset path, as the rest of the code may not
2318 * be locking-reentrant enough to behave correctly.
2320 * Another, cleaner way should be found to serialise all of
2323 #define MAX_RESET_ITERATIONS 10
2325 ath_reset_grablock(struct ath_softc *sc, int dowait)
2328 int i = MAX_RESET_ITERATIONS;
2330 ATH_PCU_LOCK_ASSERT(sc);
2332 if (sc->sc_inreset_cnt == 0) {
2341 wlan_serialize_sleep(sc, 0, "ath_reset_grablock", 1);
2347 * We always increment the refcounter, regardless
2348 * of whether we succeeded to get it in an exclusive
2351 sc->sc_inreset_cnt++;
2354 device_printf(sc->sc_dev,
2355 "%s: didn't finish after %d iterations\n",
2356 __func__, MAX_RESET_ITERATIONS);
2359 device_printf(sc->sc_dev,
2360 "%s: warning, recursive reset path!\n",
2365 #undef MAX_RESET_ITERATIONS
2368 * XXX TODO: write ath_reset_releaselock
2372 ath_stop(struct ifnet *ifp)
2374 struct ath_softc *sc __unused = ifp->if_softc;
2377 ath_stop_locked(ifp);
2382 * Reset the hardware w/o losing operational state. This is
2383 * basically a more efficient way of doing ath_stop, ath_init,
2384 * followed by state transitions to the current 802.11
2385 * operational state. Used to recover from various errors and
2386 * to reset or reload hardware state.
2389 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2391 struct ath_softc *sc = ifp->if_softc;
2392 struct ieee80211com *ic = ifp->if_l2com;
2393 struct ath_hal *ah = sc->sc_ah;
2397 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2399 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2400 ATH_PCU_UNLOCK_ASSERT(sc);
2401 ATH_UNLOCK_ASSERT(sc);
2403 /* Try to (stop any further TX/RX from occuring */
2404 taskqueue_block(sc->sc_tq);
2409 * Grab the reset lock before TX/RX is stopped.
2411 * This is needed to ensure that when the TX/RX actually does finish,
2412 * no further TX/RX/reset runs in parallel with this.
2414 if (ath_reset_grablock(sc, 1) == 0) {
2415 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2419 /* disable interrupts */
2420 ath_hal_intrset(ah, 0);
2423 * Now, ensure that any in progress TX/RX completes before we
2426 ath_txrx_stop_locked(sc);
2431 * Should now wait for pending TX/RX to complete
2432 * and block future ones from occuring. This needs to be
2433 * done before the TX queue is drained.
2435 ath_draintxq(sc, reset_type); /* stop xmit side */
2438 * Regardless of whether we're doing a no-loss flush or
2439 * not, stop the PCU and handle what's in the RX queue.
2440 * That way frames aren't dropped which shouldn't be.
2442 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2445 ath_settkipmic(sc); /* configure TKIP MIC handling */
2446 /* NB: indicate channel change so we do a full reset */
2447 ath_update_chainmasks(sc, ic->ic_curchan);
2448 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2449 sc->sc_cur_rxchainmask);
2450 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2451 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2453 sc->sc_diversity = ath_hal_getdiversity(ah);
2455 /* Let DFS at it in case it's a DFS channel */
2456 ath_dfs_radar_enable(sc, ic->ic_curchan);
2458 /* Let spectral at in case spectral is enabled */
2459 ath_spectral_enable(sc, ic->ic_curchan);
2462 * Let bluetooth coexistence at in case it's needed for this channel
2464 ath_btcoex_enable(sc, ic->ic_curchan);
2467 * If we're doing TDMA, enforce the TXOP limitation for chips that
2470 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2471 ath_hal_setenforcetxop(sc->sc_ah, 1);
2473 ath_hal_setenforcetxop(sc->sc_ah, 0);
2475 if (ath_startrecv(sc) != 0) /* restart recv */
2476 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2478 * We may be doing a reset in response to an ioctl
2479 * that changes the channel so update any state that
2480 * might change as a result.
2482 ath_chan_change(sc, ic->ic_curchan);
2483 if (sc->sc_beacons) { /* restart beacons */
2484 #ifdef IEEE80211_SUPPORT_TDMA
2486 ath_tdma_config(sc, NULL);
2489 ath_beacon_config(sc, NULL);
2493 * Release the reset lock and re-enable interrupts here.
2494 * If an interrupt was being processed in ath_intr(),
2495 * it would disable interrupts at this point. So we have
2496 * to atomically enable interrupts and decrement the
2497 * reset counter - this way ath_intr() doesn't end up
2498 * disabling interrupts without a corresponding enable
2499 * in the rest or channel change path.
2502 sc->sc_inreset_cnt--;
2503 /* XXX only do this if sc_inreset_cnt == 0? */
2504 ath_hal_intrset(ah, sc->sc_imask);
2508 * TX and RX can be started here. If it were started with
2509 * sc_inreset_cnt > 0, the TX and RX path would abort.
2510 * Thus if this is a nested call through the reset or
2511 * channel change code, TX completion will occur but
2512 * RX completion and ath_start / ath_tx_start will not
2516 /* Restart TX/RX as needed */
2519 /* Restart TX completion and pending TX */
2520 if (reset_type == ATH_RESET_NOLOSS) {
2521 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2522 if (ATH_TXQ_SETUP(sc, i)) {
2523 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2524 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2525 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2528 ath_txq_sched(sc, &sc->sc_txq[i]);
2535 /* remove, DragonFly uses OACTIVE to control if_start calls */
2537 * This may have been set during an ath_start() call which
2538 * set this once it detected a concurrent TX was going on.
2541 IF_LOCK(&ifp->if_snd);
2542 ifq_clr_oactive(&ifp->if_snd);
2543 IF_UNLOCK(&ifp->if_snd);
2546 /* Handle any frames in the TX queue */
2548 * XXX should this be done by the caller, rather than
2551 ath_tx_kick(sc); /* restart xmit */
2556 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
2558 struct ieee80211com *ic = vap->iv_ic;
2559 struct ifnet *ifp = ic->ic_ifp;
2560 struct ath_softc *sc = ifp->if_softc;
2561 struct ath_hal *ah = sc->sc_ah;
2564 case IEEE80211_IOC_TXPOWER:
2566 * If per-packet TPC is enabled, then we have nothing
2567 * to do; otherwise we need to force the global limit.
2568 * All this can happen directly; no need to reset.
2570 if (!ath_hal_gettpc(ah))
2571 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2574 /* XXX? Full or NOLOSS? */
2575 return ath_reset(ifp, ATH_RESET_FULL);
2579 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
2583 ATH_TXBUF_LOCK_ASSERT(sc);
2585 if (btype == ATH_BUFTYPE_MGMT)
2586 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
2588 bf = TAILQ_FIRST(&sc->sc_txbuf);
2591 sc->sc_stats.ast_tx_getnobuf++;
2593 if (bf->bf_flags & ATH_BUF_BUSY) {
2594 sc->sc_stats.ast_tx_getbusybuf++;
2599 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
2600 if (btype == ATH_BUFTYPE_MGMT)
2601 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
2603 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2607 * This shuldn't happen; however just to be
2608 * safe print a warning and fudge the txbuf
2611 if (sc->sc_txbuf_cnt < 0) {
2612 device_printf(sc->sc_dev,
2613 "%s: sc_txbuf_cnt < 0?\n",
2615 sc->sc_txbuf_cnt = 0;
2622 /* XXX should check which list, mgmt or otherwise */
2623 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
2624 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
2625 "out of xmit buffers" : "xmit buffer busy");
2629 /* XXX TODO: should do this at buffer list initialisation */
2630 /* XXX (then, ensure the buffer has the right flag set) */
2632 if (btype == ATH_BUFTYPE_MGMT)
2633 bf->bf_flags |= ATH_BUF_MGMT;
2635 bf->bf_flags &= (~ATH_BUF_MGMT);
2637 /* Valid bf here; clear some basic fields */
2638 bf->bf_next = NULL; /* XXX just to be sure */
2639 bf->bf_last = NULL; /* XXX again, just to be sure */
2640 bf->bf_comp = NULL; /* XXX again, just to be sure */
2641 bzero(&bf->bf_state, sizeof(bf->bf_state));
2644 * Track the descriptor ID only if doing EDMA
2646 if (sc->sc_isedma) {
2647 bf->bf_descid = sc->sc_txbuf_descid;
2648 sc->sc_txbuf_descid++;
2655 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2656 * can't be thrown back on the queue as they could still be
2657 * in use by the hardware.
2659 * This duplicates the buffer, or returns NULL.
2661 * The descriptor is also copied but the link pointers and
2662 * the DMA segments aren't copied; this frame should thus
2663 * be again passed through the descriptor setup/chain routines
2664 * so the link is correct.
2666 * The caller must free the buffer using ath_freebuf().
2669 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
2671 struct ath_buf *tbf;
2673 tbf = ath_getbuf(sc,
2674 (bf->bf_flags & ATH_BUF_MGMT) ?
2675 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
2677 return NULL; /* XXX failure? Why? */
2680 tbf->bf_next = NULL;
2681 tbf->bf_nseg = bf->bf_nseg;
2682 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
2683 tbf->bf_status = bf->bf_status;
2684 tbf->bf_m = bf->bf_m;
2685 tbf->bf_node = bf->bf_node;
2686 /* will be setup by the chain/setup function */
2687 tbf->bf_lastds = NULL;
2688 /* for now, last == self */
2690 tbf->bf_comp = bf->bf_comp;
2692 /* NOTE: DMA segments will be setup by the setup/chain functions */
2694 /* The caller has to re-init the descriptor + links */
2697 * Free the DMA mapping here, before we NULL the mbuf.
2698 * We must only call bus_dmamap_unload() once per mbuf chain
2699 * or behaviour is undefined.
2701 if (bf->bf_m != NULL) {
2703 * XXX is this POSTWRITE call required?
2705 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2706 BUS_DMASYNC_POSTWRITE);
2707 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2714 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2720 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
2725 bf = _ath_getbuf_locked(sc, btype);
2727 * If a mgmt buffer was requested but we're out of those,
2728 * try requesting a normal one.
2730 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
2731 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
2732 ATH_TXBUF_UNLOCK(sc);
2735 struct ifnet *ifp = sc->sc_ifp;
2738 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2739 sc->sc_stats.ast_tx_qstop++;
2741 /* remove, DragonFly uses OACTIVE to control if_start calls */
2742 IF_LOCK(&ifp->if_snd);
2743 ifq_set_oactive(&ifp->if_snd);
2744 IF_UNLOCK(&ifp->if_snd);
2753 ath_qflush(struct ifnet *ifp)
2762 * Transmit a single frame.
2764 * net80211 will free the node reference if the transmit
2765 * fails, so don't free the node reference here.
2768 ath_transmit(struct ifnet *ifp, struct mbuf *m)
2770 struct ieee80211com *ic = ifp->if_l2com;
2771 struct ath_softc *sc = ic->ic_ifp->if_softc;
2772 struct ieee80211_node *ni;
2779 * Tell the reset path that we're currently transmitting.
2782 if (sc->sc_inreset_cnt > 0) {
2783 DPRINTF(sc, ATH_DEBUG_XMIT,
2784 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2786 IF_LOCK(&ifp->if_snd);
2787 sc->sc_stats.ast_tx_qstop++;
2789 /* remove, DragonFly uses OACTIVE to control if_start calls */
2790 ifq_set_oactive(&ifp->if_snd);
2792 IF_UNLOCK(&ifp->if_snd);
2793 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
2796 return (ENOBUFS); /* XXX should be EINVAL or? */
2798 sc->sc_txstart_cnt++;
2801 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
2803 * Grab the TX lock - it's ok to do this here; we haven't
2804 * yet started transmitting.
2809 * Node reference, if there's one.
2811 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
2814 * Enforce how deep a node queue can get.
2816 * XXX it would be nicer if we kept an mbuf queue per
2817 * node and only whacked them into ath_bufs when we
2818 * are ready to schedule some traffic from them.
2819 * .. that may come later.
2821 * XXX we should also track the per-node hardware queue
2822 * depth so it is easy to limit the _SUM_ of the swq and
2823 * hwq frames. Since we only schedule two HWQ frames
2824 * at a time, this should be OK for now.
2826 if ((!(m->m_flags & M_EAPOL)) &&
2827 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
2828 sc->sc_stats.ast_tx_nodeq_overflow++;
2836 * Check how many TX buffers are available.
2838 * If this is for non-EAPOL traffic, just leave some
2839 * space free in order for buffer cloning and raw
2840 * frame transmission to occur.
2842 * If it's for EAPOL traffic, ignore this for now.
2843 * Management traffic will be sent via the raw transmit
2844 * method which bypasses this check.
2846 * This is needed to ensure that EAPOL frames during
2847 * (re) keying have a chance to go out.
2849 * See kern/138379 for more information.
2851 if ((!(m->m_flags & M_EAPOL)) &&
2852 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
2853 sc->sc_stats.ast_tx_nobuf++;
2861 * Grab a TX buffer and associated resources.
2863 * If it's an EAPOL frame, allocate a MGMT ath_buf.
2864 * That way even with temporary buffer exhaustion due to
2865 * the data path doesn't leave us without the ability
2866 * to transmit management frames.
2868 * Otherwise allocate a normal buffer.
2870 if (m->m_flags & M_EAPOL)
2871 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2873 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
2877 * If we failed to allocate a buffer, fail.
2879 * We shouldn't fail normally, due to the check
2882 sc->sc_stats.ast_tx_nobuf++;
2884 /* remove, DragonFly uses OACTIVE to control if_start calls */
2885 IF_LOCK(&ifp->if_snd);
2886 ifq_set_oactive(&ifp->if_snd);
2887 IF_UNLOCK(&ifp->if_snd);
2896 * At this point we have a buffer; so we need to free it
2897 * if we hit any error conditions.
2901 * Check for fragmentation. If this frame
2902 * has been broken up verify we have enough
2903 * buffers to send all the fragments so all
2907 if ((m->m_flags & M_FRAG) &&
2908 !ath_txfrag_setup(sc, &frags, m, ni)) {
2909 DPRINTF(sc, ATH_DEBUG_XMIT,
2910 "%s: out of txfrag buffers\n", __func__);
2911 sc->sc_stats.ast_tx_nofrag++;
2918 * At this point if we have any TX fragments, then we will
2919 * have bumped the node reference once for each of those.
2923 * XXX Is there anything actually _enforcing_ that the
2924 * fragments are being transmitted in one hit, rather than
2925 * being interleaved with other transmissions on that
2928 * The ATH TX output lock is the only thing serialising this
2933 * Calculate the "next fragment" length field in ath_buf
2934 * in order to let the transmit path know enough about
2935 * what to next write to the hardware.
2937 if (m->m_flags & M_FRAG) {
2938 struct ath_buf *fbf = bf;
2939 struct ath_buf *n_fbf = NULL;
2940 struct mbuf *fm = m->m_nextpkt;
2943 * We need to walk the list of fragments and set
2944 * the next size to the following buffer.
2945 * However, the first buffer isn't in the frag
2946 * list, so we have to do some gymnastics here.
2948 TAILQ_FOREACH(n_fbf, &frags, bf_list) {
2949 fbf->bf_nextfraglen = fm->m_pkthdr.len;
2956 * Bump the ifp output counter.
2958 * XXX should use atomics?
2963 * Pass the frame to the h/w for transmission.
2964 * Fragmented frames have each frag chained together
2965 * with m_nextpkt. We know there are sufficient ath_buf's
2966 * to send all the frags because of work done by
2967 * ath_txfrag_setup. We leave m_nextpkt set while
2968 * calling ath_tx_start so it can use it to extend the
2969 * the tx duration to cover the subsequent frag and
2970 * so it can reclaim all the mbufs in case of an error;
2971 * ath_tx_start clears m_nextpkt once it commits to
2972 * handing the frame to the hardware.
2974 * Note: if this fails, then the mbufs are freed but
2975 * not the node reference.
2977 next = m->m_nextpkt;
2978 if (ath_tx_start(sc, ni, bf, m)) {
2985 ath_returnbuf_head(sc, bf);
2987 * Free the rest of the node references and
2988 * buffers for the fragment list.
2990 ath_txfrag_cleanup(sc, &frags, ni);
2991 ATH_TXBUF_UNLOCK(sc);
2997 * Check here if the node is in power save state.
2999 ath_tx_update_tim(sc, ni, 1);
3003 * Beware of state changing between frags.
3004 * XXX check sta power-save state?
3006 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
3007 DPRINTF(sc, ATH_DEBUG_XMIT,
3008 "%s: flush fragmented packet, state %s\n",
3010 ieee80211_state_name[ni->ni_vap->iv_state]);
3016 bf = TAILQ_FIRST(&frags);
3017 KASSERT(bf != NULL, ("no buf for txfrag"));
3018 TAILQ_REMOVE(&frags, bf, bf_list);
3023 * Bump watchdog timer.
3025 sc->sc_wd_timer = 5;
3031 * Finished transmitting!
3034 sc->sc_txstart_cnt--;
3037 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
3043 ath_media_change(struct ifnet *ifp)
3045 int error = ieee80211_media_change(ifp);
3046 /* NB: only the fixed rate can change and that doesn't need a reset */
3047 return (error == ENETRESET ? 0 : error);
3051 * Block/unblock tx+rx processing while a key change is done.
3052 * We assume the caller serializes key management operations
3053 * so we only need to worry about synchronization with other
3054 * uses that originate in the driver.
3057 ath_key_update_begin(struct ieee80211vap *vap)
3059 struct ifnet *ifp = vap->iv_ic->ic_ifp;
3060 struct ath_softc *sc = ifp->if_softc;
3062 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3063 taskqueue_block(sc->sc_tq);
3064 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
3068 ath_key_update_end(struct ieee80211vap *vap)
3070 struct ifnet *ifp = vap->iv_ic->ic_ifp;
3071 struct ath_softc *sc = ifp->if_softc;
3073 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3074 IF_UNLOCK(&ifp->if_snd);
3075 taskqueue_unblock(sc->sc_tq);
3079 ath_update_promisc(struct ifnet *ifp)
3081 struct ath_softc *sc = ifp->if_softc;
3084 /* configure rx filter */
3085 rfilt = ath_calcrxfilter(sc);
3086 ath_hal_setrxfilter(sc->sc_ah, rfilt);
3088 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
3092 ath_update_mcast(struct ifnet *ifp)
3094 struct ath_softc *sc = ifp->if_softc;
3097 /* calculate and install multicast filter */
3098 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
3099 struct ifmultiaddr *ifma;
3101 * Merge multicast addresses to form the hardware filter.
3103 mfilt[0] = mfilt[1] = 0;
3105 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
3107 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3112 /* calculate XOR of eight 6bit values */
3113 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
3114 val = LE_READ_4(dl + 0);
3115 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3116 val = LE_READ_4(dl + 3);
3117 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3119 mfilt[pos / 32] |= (1 << (pos % 32));
3122 if_maddr_runlock(ifp);
3125 mfilt[0] = mfilt[1] = ~0;
3126 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
3127 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
3128 __func__, mfilt[0], mfilt[1]);
3132 ath_mode_init(struct ath_softc *sc)
3134 struct ifnet *ifp = sc->sc_ifp;
3135 struct ath_hal *ah = sc->sc_ah;
3138 /* configure rx filter */
3139 rfilt = ath_calcrxfilter(sc);
3140 ath_hal_setrxfilter(ah, rfilt);
3142 /* configure operational mode */
3143 ath_hal_setopmode(ah);
3146 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
3147 "%s: ah=%p, ifp=%p, if_addr=%p\n",
3151 (ifp == NULL) ? NULL : ifp->if_addr);
3154 /* handle any link-level address change */
3155 ath_hal_setmac(ah, IF_LLADDR(ifp));
3157 /* calculate and install multicast filter */
3158 ath_update_mcast(ifp);
3162 * Set the slot time based on the current setting.
3165 ath_setslottime(struct ath_softc *sc)
3167 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3168 struct ath_hal *ah = sc->sc_ah;
3171 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
3173 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
3175 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
3176 /* honor short/long slot time only in 11g */
3177 /* XXX shouldn't honor on pure g or turbo g channel */
3178 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3179 usec = HAL_SLOT_TIME_9;
3181 usec = HAL_SLOT_TIME_20;
3183 usec = HAL_SLOT_TIME_9;
3185 DPRINTF(sc, ATH_DEBUG_RESET,
3186 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
3187 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
3188 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
3190 ath_hal_setslottime(ah, usec);
3191 sc->sc_updateslot = OK;
3195 * Callback from the 802.11 layer to update the
3196 * slot time based on the current setting.
3199 ath_updateslot(struct ifnet *ifp)
3201 struct ath_softc *sc = ifp->if_softc;
3202 struct ieee80211com *ic = ifp->if_l2com;
3205 * When not coordinating the BSS, change the hardware
3206 * immediately. For other operation we defer the change
3207 * until beacon updates have propagated to the stations.
3209 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3210 ic->ic_opmode == IEEE80211_M_MBSS)
3211 sc->sc_updateslot = UPDATE;
3213 ath_setslottime(sc);
3217 * Append the contents of src to dst; both queues
3218 * are assumed to be locked.
3221 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3224 ATH_TXQ_LOCK_ASSERT(src);
3225 ATH_TXQ_LOCK_ASSERT(dst);
3227 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
3228 dst->axq_link = src->axq_link;
3229 src->axq_link = NULL;
3230 dst->axq_depth += src->axq_depth;
3231 dst->axq_aggr_depth += src->axq_aggr_depth;
3233 src->axq_aggr_depth = 0;
3237 * Reset the hardware, with no loss.
3239 * This can't be used for a general case reset.
3242 ath_reset_proc(void *arg, int pending)
3244 struct ath_softc *sc = arg;
3245 struct ifnet *ifp = sc->sc_ifp;
3248 if_printf(ifp, "%s: resetting\n", __func__);
3250 wlan_serialize_enter();
3251 ath_reset(ifp, ATH_RESET_NOLOSS);
3252 wlan_serialize_exit();
3256 * Reset the hardware after detecting beacons have stopped.
3259 ath_bstuck_proc(void *arg, int pending)
3261 struct ath_softc *sc = arg;
3262 struct ifnet *ifp = sc->sc_ifp;
3265 wlan_serialize_enter();
3266 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3267 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
3269 #ifdef ATH_DEBUG_ALQ
3270 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON))
3271 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL);
3274 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3276 sc->sc_stats.ast_bstuck++;
3278 * This assumes that there's no simultaneous channel mode change
3281 ath_reset(ifp, ATH_RESET_NOLOSS);
3282 wlan_serialize_exit();
3286 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3288 bus_addr_t *paddr = (bus_addr_t*) arg;
3289 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3290 *paddr = segs->ds_addr;
3294 * Allocate the descriptors and appropriate DMA tag/setup.
3296 * For some situations (eg EDMA TX completion), there isn't a requirement
3297 * for the ath_buf entries to be allocated.
3300 ath_descdma_alloc_desc(struct ath_softc *sc,
3301 struct ath_descdma *dd, ath_bufhead *head,
3302 const char *name, int ds_size, int ndesc)
3304 #define DS2PHYS(_dd, _ds) \
3305 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3306 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3307 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3308 struct ifnet *ifp = sc->sc_ifp;
3311 dd->dd_descsize = ds_size;
3313 DPRINTF(sc, ATH_DEBUG_RESET,
3314 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
3315 __func__, name, ndesc, dd->dd_descsize);
3318 dd->dd_desc_len = dd->dd_descsize * ndesc;
3321 * Merlin work-around:
3322 * Descriptors that cross the 4KB boundary can't be used.
3323 * Assume one skipped descriptor per 4KB page.
3325 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3326 int numpages = dd->dd_desc_len / 4096;
3327 dd->dd_desc_len += ds_size * numpages;
3331 * Setup DMA descriptor area.
3333 * BUS_DMA_ALLOCNOW is not used; we never use bounce
3334 * buffers for the descriptors themselves.
3336 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3337 PAGE_SIZE, 0, /* alignment, bounds */
3338 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3339 BUS_SPACE_MAXADDR, /* highaddr */
3340 NULL, NULL, /* filter, filterarg */
3341 dd->dd_desc_len, /* maxsize */
3343 dd->dd_desc_len, /* maxsegsize */
3347 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3351 /* allocate descriptors */
3352 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3353 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3356 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3357 "error %u\n", ndesc, dd->dd_name, error);
3361 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3362 dd->dd_desc, dd->dd_desc_len,
3363 ath_load_cb, &dd->dd_desc_paddr,
3366 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3367 dd->dd_name, error);
3371 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3372 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
3373 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
3374 /*XXX*/ (u_long) dd->dd_desc_len);
3379 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3381 bus_dma_tag_destroy(dd->dd_dmat);
3382 memset(dd, 0, sizeof(*dd));
3385 #undef ATH_DESC_4KB_BOUND_CHECK
3389 ath_descdma_setup(struct ath_softc *sc,
3390 struct ath_descdma *dd, ath_bufhead *head,
3391 const char *name, int ds_size, int nbuf, int ndesc)
3393 #define DS2PHYS(_dd, _ds) \
3394 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3395 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3396 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3397 struct ifnet *ifp = sc->sc_ifp;
3400 int i, bsize, error;
3402 /* Allocate descriptors */
3403 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
3406 /* Assume any errors during allocation were dealt with */
3411 ds = (uint8_t *) dd->dd_desc;
3413 /* allocate rx buffers */
3414 bsize = sizeof(struct ath_buf) * nbuf;
3415 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT|M_ZERO);
3417 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3418 dd->dd_name, bsize);
3424 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
3425 bf->bf_desc = (struct ath_desc *) ds;
3426 bf->bf_daddr = DS2PHYS(dd, ds);
3427 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3429 * Merlin WAR: Skip descriptor addresses which
3430 * cause 4KB boundary crossing along any point
3431 * in the descriptor.
3433 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3435 /* Start at the next page */
3436 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3437 bf->bf_desc = (struct ath_desc *) ds;
3438 bf->bf_daddr = DS2PHYS(dd, ds);
3441 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3444 if_printf(ifp, "unable to create dmamap for %s "
3445 "buffer %u, error %u\n", dd->dd_name, i, error);
3446 ath_descdma_cleanup(sc, dd, head);
3449 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3450 TAILQ_INSERT_TAIL(head, bf, bf_list);
3454 * XXX TODO: ensure that ds doesn't overflow the descriptor
3455 * allocation otherwise weird stuff will occur and crash your
3459 /* XXX this should likely just call ath_descdma_cleanup() */
3461 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3462 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3463 bus_dma_tag_destroy(dd->dd_dmat);
3464 memset(dd, 0, sizeof(*dd));
3467 #undef ATH_DESC_4KB_BOUND_CHECK
3471 * Allocate ath_buf entries but no descriptor contents.
3473 * This is for RX EDMA where the descriptors are the header part of
3477 ath_descdma_setup_rx_edma(struct ath_softc *sc,
3478 struct ath_descdma *dd, ath_bufhead *head,
3479 const char *name, int nbuf, int rx_status_len)
3481 struct ifnet *ifp = sc->sc_ifp;
3483 int i, bsize, error;
3485 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
3486 __func__, name, nbuf);
3490 * This is (mostly) purely for show. We're not allocating any actual
3491 * descriptors here as EDMA RX has the descriptor be part
3494 * However, dd_desc_len is used by ath_descdma_free() to determine
3495 * whether we have already freed this DMA mapping.
3497 dd->dd_desc_len = rx_status_len * nbuf;
3498 dd->dd_descsize = rx_status_len;
3500 /* allocate rx buffers */
3501 bsize = sizeof(struct ath_buf) * nbuf;
3502 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
3504 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3505 dd->dd_name, bsize);
3512 for (i = 0; i < nbuf; i++, bf++) {
3515 bf->bf_lastds = NULL; /* Just an initial value */
3517 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3520 if_printf(ifp, "unable to create dmamap for %s "
3521 "buffer %u, error %u\n", dd->dd_name, i, error);
3522 ath_descdma_cleanup(sc, dd, head);
3525 TAILQ_INSERT_TAIL(head, bf, bf_list);
3529 memset(dd, 0, sizeof(*dd));
3534 ath_descdma_cleanup(struct ath_softc *sc,
3535 struct ath_descdma *dd, ath_bufhead *head)
3538 struct ieee80211_node *ni;
3541 if (dd->dd_dmamap != 0) {
3542 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3543 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3544 bus_dma_tag_destroy(dd->dd_dmat);
3548 TAILQ_FOREACH(bf, head, bf_list) {
3551 * XXX warn if there's buffers here.
3552 * XXX it should have been freed by the
3556 if (do_warning == 0) {
3558 device_printf(sc->sc_dev,
3559 "%s: %s: mbuf should've been"
3560 " unmapped/freed!\n",
3564 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3565 BUS_DMASYNC_POSTREAD);
3566 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3570 if (bf->bf_dmamap != NULL) {
3571 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3572 bf->bf_dmamap = NULL;
3578 * Reclaim node reference.
3580 ieee80211_free_node(ni);
3588 if (dd->dd_bufptr != NULL)
3589 kfree(dd->dd_bufptr, M_ATHDEV);
3590 memset(dd, 0, sizeof(*dd));
3594 ath_desc_alloc(struct ath_softc *sc)
3598 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3599 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER);
3603 sc->sc_txbuf_cnt = ath_txbuf;
3605 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
3606 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
3609 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3614 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
3615 * flag doesn't have to be set in ath_getbuf_locked().
3618 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3619 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
3621 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3622 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3623 &sc->sc_txbuf_mgmt);
3630 ath_desc_free(struct ath_softc *sc)
3633 if (sc->sc_bdma.dd_desc_len != 0)
3634 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3635 if (sc->sc_txdma.dd_desc_len != 0)
3636 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3637 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
3638 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
3639 &sc->sc_txbuf_mgmt);
3642 static struct ieee80211_node *
3643 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3645 struct ieee80211com *ic = vap->iv_ic;
3646 struct ath_softc *sc = ic->ic_ifp->if_softc;
3647 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3648 struct ath_node *an;
3650 an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO);
3655 ath_rate_node_init(sc, an);
3657 /* Setup the mutex - there's no associd yet so set the name to NULL */
3658 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
3659 device_get_nameunit(sc->sc_dev), an);
3661 mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
3664 /* XXX setup ath_tid */
3665 ath_tx_tid_init(sc, an);
3667 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3668 ath_hal_ether_sprintf(mac), an);
3669 return &an->an_node;
3673 ath_node_cleanup(struct ieee80211_node *ni)
3675 struct ieee80211com *ic = ni->ni_ic;
3676 struct ath_softc *sc = ic->ic_ifp->if_softc;
3678 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3679 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni));
3681 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
3682 ath_tx_node_flush(sc, ATH_NODE(ni));
3683 ath_rate_node_cleanup(sc, ATH_NODE(ni));
3684 sc->sc_node_cleanup(ni);
3688 ath_node_free(struct ieee80211_node *ni)
3690 struct ieee80211com *ic = ni->ni_ic;
3691 struct ath_softc *sc = ic->ic_ifp->if_softc;
3693 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %s: an %p\n", __func__,
3694 ath_hal_ether_sprintf(ni->ni_macaddr), ATH_NODE(ni));
3696 mtx_destroy(&ATH_NODE(ni)->an_mtx);
3698 sc->sc_node_free(ni);
3702 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3704 struct ieee80211com *ic = ni->ni_ic;
3705 struct ath_softc *sc = ic->ic_ifp->if_softc;
3706 struct ath_hal *ah = sc->sc_ah;
3708 *rssi = ic->ic_node_getrssi(ni);
3709 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3710 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
3712 *noise = -95; /* nominally correct */
3716 * Set the default antenna.
3719 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3721 struct ath_hal *ah = sc->sc_ah;
3723 /* XXX block beacon interrupts */
3724 ath_hal_setdefantenna(ah, antenna);
3725 if (sc->sc_defant != antenna)
3726 sc->sc_stats.ast_ant_defswitch++;
3727 sc->sc_defant = antenna;
3728 sc->sc_rxotherant = 0;
3732 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
3734 txq->axq_qnum = qnum;
3737 txq->axq_aggr_depth = 0;
3738 txq->axq_intrcnt = 0;
3739 txq->axq_link = NULL;
3740 txq->axq_softc = sc;
3741 TAILQ_INIT(&txq->axq_q);
3742 TAILQ_INIT(&txq->axq_tidq);
3743 TAILQ_INIT(&txq->fifo.axq_q);
3744 ATH_TXQ_LOCK_INIT(sc, txq);
3748 * Setup a h/w transmit queue.
3750 static struct ath_txq *
3751 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
3753 #define N(a) (sizeof(a)/sizeof(a[0]))
3754 struct ath_hal *ah = sc->sc_ah;
3758 memset(&qi, 0, sizeof(qi));
3759 qi.tqi_subtype = subtype;
3760 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
3761 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
3762 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
3764 * Enable interrupts only for EOL and DESC conditions.
3765 * We mark tx descriptors to receive a DESC interrupt
3766 * when a tx queue gets deep; otherwise waiting for the
3767 * EOL to reap descriptors. Note that this is done to
3768 * reduce interrupt load and this only defers reaping
3769 * descriptors, never transmitting frames. Aside from
3770 * reducing interrupts this also permits more concurrency.
3771 * The only potential downside is if the tx queue backs
3772 * up in which case the top half of the kernel may backup
3773 * due to a lack of tx descriptors.
3776 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
3777 HAL_TXQ_TXOKINT_ENABLE;
3779 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
3780 HAL_TXQ_TXDESCINT_ENABLE;
3782 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
3785 * NB: don't print a message, this happens
3786 * normally on parts with too few tx queues
3790 if (qnum >= N(sc->sc_txq)) {
3791 device_printf(sc->sc_dev,
3792 "hal qnum %u out of range, max %zu!\n",
3793 qnum, N(sc->sc_txq));
3794 ath_hal_releasetxqueue(ah, qnum);
3797 if (!ATH_TXQ_SETUP(sc, qnum)) {
3798 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
3799 sc->sc_txqsetup |= 1<<qnum;
3801 return &sc->sc_txq[qnum];
3806 * Setup a hardware data transmit queue for the specified
3807 * access control. The hal may not support all requested
3808 * queues in which case it will return a reference to a
3809 * previously setup queue. We record the mapping from ac's
3810 * to h/w queues for use by ath_tx_start and also track
3811 * the set of h/w queues being used to optimize work in the
3812 * transmit interrupt handler and related routines.
3815 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
3817 #define N(a) (sizeof(a)/sizeof(a[0]))
3818 struct ath_txq *txq;
3820 if (ac >= N(sc->sc_ac2q)) {
3821 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
3822 ac, N(sc->sc_ac2q));
3825 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
3828 sc->sc_ac2q[ac] = txq;
3836 * Update WME parameters for a transmit queue.
3839 ath_txq_update(struct ath_softc *sc, int ac)
3841 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
3842 #define ATH_TXOP_TO_US(v) (v<<5)
3843 struct ifnet *ifp = sc->sc_ifp;
3844 struct ieee80211com *ic = ifp->if_l2com;
3845 struct ath_txq *txq = sc->sc_ac2q[ac];
3846 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
3847 struct ath_hal *ah = sc->sc_ah;
3850 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
3851 #ifdef IEEE80211_SUPPORT_TDMA
3854 * AIFS is zero so there's no pre-transmit wait. The
3855 * burst time defines the slot duration and is configured
3856 * through net80211. The QCU is setup to not do post-xmit
3857 * back off, lockout all lower-priority QCU's, and fire
3858 * off the DMA beacon alert timer which is setup based
3859 * on the slot configuration.
3861 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3862 | HAL_TXQ_TXERRINT_ENABLE
3863 | HAL_TXQ_TXURNINT_ENABLE
3864 | HAL_TXQ_TXEOLINT_ENABLE
3866 | HAL_TXQ_BACKOFF_DISABLE
3867 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
3871 qi.tqi_readyTime = sc->sc_tdmaslotlen;
3872 qi.tqi_burstTime = qi.tqi_readyTime;
3876 * XXX shouldn't this just use the default flags
3877 * used in the previous queue setup?
3879 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
3880 | HAL_TXQ_TXERRINT_ENABLE
3881 | HAL_TXQ_TXDESCINT_ENABLE
3882 | HAL_TXQ_TXURNINT_ENABLE
3883 | HAL_TXQ_TXEOLINT_ENABLE
3885 qi.tqi_aifs = wmep->wmep_aifsn;
3886 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
3887 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
3888 qi.tqi_readyTime = 0;
3889 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
3890 #ifdef IEEE80211_SUPPORT_TDMA
3894 DPRINTF(sc, ATH_DEBUG_RESET,
3895 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
3896 __func__, txq->axq_qnum, qi.tqi_qflags,
3897 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
3899 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
3900 if_printf(ifp, "unable to update hardware queue "
3901 "parameters for %s traffic!\n",
3902 ieee80211_wme_acnames[ac]);
3905 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
3908 #undef ATH_TXOP_TO_US
3909 #undef ATH_EXPONENT_TO_VALUE
3913 * Callback from the 802.11 layer to update WME parameters.
3916 ath_wme_update(struct ieee80211com *ic)
3918 struct ath_softc *sc = ic->ic_ifp->if_softc;
3920 return !ath_txq_update(sc, WME_AC_BE) ||
3921 !ath_txq_update(sc, WME_AC_BK) ||
3922 !ath_txq_update(sc, WME_AC_VI) ||
3923 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
3927 * Reclaim resources for a setup queue.
3930 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
3933 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
3934 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
3935 ATH_TXQ_LOCK_DESTROY(txq);
3939 * Reclaim all tx queue resources.
3942 ath_tx_cleanup(struct ath_softc *sc)
3946 ATH_TXBUF_LOCK_DESTROY(sc);
3947 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
3948 if (ATH_TXQ_SETUP(sc, i))
3949 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
3953 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
3954 * using the current rates in sc_rixmap.
3957 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
3959 int rix = sc->sc_rixmap[rate];
3960 /* NB: return lowest rix for invalid rate */
3961 return (rix == 0xff ? 0 : rix);
3965 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
3968 struct ieee80211_node *ni = bf->bf_node;
3969 struct ifnet *ifp = sc->sc_ifp;
3970 struct ieee80211com *ic = ifp->if_l2com;
3973 if (ts->ts_status == 0) {
3974 u_int8_t txant = ts->ts_antenna;
3975 sc->sc_stats.ast_ant_tx[txant]++;
3976 sc->sc_ant_tx[txant]++;
3977 if (ts->ts_finaltsi != 0)
3978 sc->sc_stats.ast_tx_altrate++;
3979 pri = M_WME_GETAC(bf->bf_m);
3980 if (pri >= WME_AC_VO)
3981 ic->ic_wme.wme_hipri_traffic++;
3982 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
3983 ni->ni_inact = ni->ni_inact_reload;
3985 if (ts->ts_status & HAL_TXERR_XRETRY)
3986 sc->sc_stats.ast_tx_xretries++;
3987 if (ts->ts_status & HAL_TXERR_FIFO)
3988 sc->sc_stats.ast_tx_fifoerr++;
3989 if (ts->ts_status & HAL_TXERR_FILT)
3990 sc->sc_stats.ast_tx_filtered++;
3991 if (ts->ts_status & HAL_TXERR_XTXOP)
3992 sc->sc_stats.ast_tx_xtxop++;
3993 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
3994 sc->sc_stats.ast_tx_timerexpired++;
3996 if (bf->bf_m->m_flags & M_FF)
3997 sc->sc_stats.ast_ff_txerr++;
3999 /* XXX when is this valid? */
4000 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR)
4001 sc->sc_stats.ast_tx_desccfgerr++;
4003 * This can be valid for successful frame transmission!
4004 * If there's a TX FIFO underrun during aggregate transmission,
4005 * the MAC will pad the rest of the aggregate with delimiters.
4006 * If a BA is returned, the frame is marked as "OK" and it's up
4007 * to the TX completion code to notice which frames weren't
4008 * successfully transmitted.
4010 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN)
4011 sc->sc_stats.ast_tx_data_underrun++;
4012 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN)
4013 sc->sc_stats.ast_tx_delim_underrun++;
4015 sr = ts->ts_shortretry;
4016 lr = ts->ts_longretry;
4017 sc->sc_stats.ast_tx_shortretry += sr;
4018 sc->sc_stats.ast_tx_longretry += lr;
4023 * The default completion. If fail is 1, this means
4024 * "please don't retry the frame, and just return -1 status
4025 * to the net80211 stack.
4028 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4030 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4036 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
4037 ts->ts_status : HAL_TXERR_XRETRY;
4040 if (bf->bf_state.bfs_dobaw)
4041 device_printf(sc->sc_dev,
4042 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
4045 SEQNO(bf->bf_state.bfs_seqno));
4047 if (bf->bf_next != NULL)
4048 device_printf(sc->sc_dev,
4049 "%s: bf %p: seqno %d: bf_next not NULL!\n",
4052 SEQNO(bf->bf_state.bfs_seqno));
4055 * Check if the node software queue is empty; if so
4056 * then clear the TIM.
4058 * This needs to be done before the buffer is freed as
4059 * otherwise the node reference will have been released
4060 * and the node may not actually exist any longer.
4062 * XXX I don't like this belonging here, but it's cleaner
4063 * to do it here right now then all the other places
4064 * where ath_tx_default_comp() is called.
4066 * XXX TODO: during drain, ensure that the callback is
4067 * being called so we get a chance to update the TIM.
4071 ath_tx_update_tim(sc, bf->bf_node, 0);
4076 * Do any tx complete callback. Note this must
4077 * be done before releasing the node reference.
4078 * This will free the mbuf, release the net80211
4079 * node and recycle the ath_buf.
4081 ath_tx_freebuf(sc, bf, st);
4085 * Update rate control with the given completion status.
4088 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4089 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4090 int nframes, int nbad)
4092 struct ath_node *an;
4094 /* Only for unicast frames */
4099 ATH_NODE_UNLOCK_ASSERT(an);
4101 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4103 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4104 ATH_NODE_UNLOCK(an);
4109 * Process the completion of the given buffer.
4111 * This calls the rate control update and then the buffer completion.
4112 * This will either free the buffer or requeue it. In any case, the
4113 * bf pointer should be treated as invalid after this function is called.
4116 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
4117 struct ath_tx_status *ts, struct ath_buf *bf)
4119 struct ieee80211_node *ni = bf->bf_node;
4120 struct ath_node *an = NULL;
4122 ATH_TX_UNLOCK_ASSERT(sc);
4123 ATH_TXQ_UNLOCK_ASSERT(txq);
4125 /* If unicast frame, update general statistics */
4128 /* update statistics */
4129 ath_tx_update_stats(sc, ts, bf);
4133 * Call the completion handler.
4134 * The completion handler is responsible for
4135 * calling the rate control code.
4137 * Frames with no completion handler get the
4138 * rate control code called here.
4140 if (bf->bf_comp == NULL) {
4141 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4142 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
4144 * XXX assume this isn't an aggregate
4147 ath_tx_update_ratectrl(sc, ni,
4148 bf->bf_state.bfs_rc, ts,
4149 bf->bf_state.bfs_pktlen, 1,
4150 (ts->ts_status == 0 ? 0 : 1));
4152 ath_tx_default_comp(sc, bf, 0);
4154 bf->bf_comp(sc, bf, 0);
4160 * Process completed xmit descriptors from the specified queue.
4161 * Kick the packet scheduler if needed. This can occur from this
4165 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4167 struct ath_hal *ah = sc->sc_ah;
4169 struct ath_desc *ds;
4170 struct ath_tx_status *ts;
4171 struct ieee80211_node *ni;
4172 #ifdef IEEE80211_SUPPORT_SUPERG
4173 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4174 #endif /* IEEE80211_SUPPORT_SUPERG */
4178 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4179 __func__, txq->axq_qnum,
4180 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4183 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
4184 "ath_tx_processq: txq=%u head %p link %p depth %p",
4186 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4193 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4194 bf = TAILQ_FIRST(&txq->axq_q);
4196 ATH_TXQ_UNLOCK(txq);
4199 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4200 ts = &bf->bf_status.ds_txstat;
4202 status = ath_hal_txprocdesc(ah, ds, ts);
4204 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4205 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4207 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
4208 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4211 #ifdef ATH_DEBUG_ALQ
4212 if (if_ath_alq_checkdebug(&sc->sc_alq,
4213 ATH_ALQ_EDMA_TXSTATUS)) {
4214 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
4215 sc->sc_tx_statuslen,
4220 if (status == HAL_EINPROGRESS) {
4221 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
4222 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
4223 txq->axq_qnum, bf, ds);
4224 ATH_TXQ_UNLOCK(txq);
4227 ATH_TXQ_REMOVE(txq, bf, bf_list);
4232 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) {
4233 device_printf(sc->sc_dev,
4234 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n",
4238 bf->bf_state.bfs_tx_queue);
4240 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) {
4241 device_printf(sc->sc_dev,
4242 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n",
4246 bf->bf_last->bf_state.bfs_tx_queue);
4250 if (txq->axq_depth > 0) {
4252 * More frames follow. Mark the buffer busy
4253 * so it's not re-used while the hardware may
4254 * still re-read the link field in the descriptor.
4256 * Use the last buffer in an aggregate as that
4257 * is where the hardware may be - intermediate
4258 * descriptors won't be "busy".
4260 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4262 txq->axq_link = NULL;
4264 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4266 if (bf->bf_state.bfs_aggr)
4267 txq->axq_aggr_depth--;
4271 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
4272 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
4273 txq->axq_qnum, bf, ds, ni, ts->ts_status);
4275 * If unicast frame was ack'd update RSSI,
4276 * including the last rx time used to
4277 * workaround phantom bmiss interrupts.
4279 if (ni != NULL && ts->ts_status == 0 &&
4280 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
4282 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4283 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4286 ATH_TXQ_UNLOCK(txq);
4289 * Update statistics and call completion
4291 ath_tx_process_buf_completion(sc, txq, ts, bf);
4293 /* XXX at this point, bf and ni may be totally invalid */
4295 #ifdef IEEE80211_SUPPORT_SUPERG
4297 * Flush fast-frame staging queue when traffic slows.
4299 if (txq->axq_depth <= 1)
4300 ieee80211_ff_flush(ic, txq->axq_ac);
4303 /* Kick the software TXQ scheduler */
4306 ath_txq_sched(sc, txq);
4310 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4311 "ath_tx_processq: txq=%u: done",
4317 #define TXQACTIVE(t, q) ( (t) & (1 << (q)))
4320 * Deferred processing of transmit interrupt; special-cased
4321 * for a single hardware transmit queue (e.g. 5210 and 5211).
4324 ath_tx_proc_q0(void *arg, int npending)
4326 struct ath_softc *sc = arg;
4328 struct ifnet *ifp = sc->sc_ifp;
4332 wlan_serialize_enter();
4334 sc->sc_txproc_cnt++;
4335 txqs = sc->sc_txq_active;
4336 sc->sc_txq_active &= ~txqs;
4339 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4340 "ath_tx_proc_q0: txqs=0x%08x", txqs);
4342 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4343 /* XXX why is lastrx updated in tx code? */
4344 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4345 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4346 ath_tx_processq(sc, sc->sc_cabq, 1);
4348 /* remove, DragonFly uses OACTIVE to control if_start calls */
4349 IF_LOCK(&ifp->if_snd);
4350 ifq_clr_oactive(&ifp->if_snd);
4351 IF_UNLOCK(&ifp->if_snd);
4353 sc->sc_wd_timer = 0;
4356 ath_led_event(sc, sc->sc_txrix);
4359 sc->sc_txproc_cnt--;
4363 wlan_serialize_exit();
4367 * Deferred processing of transmit interrupt; special-cased
4368 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4371 ath_tx_proc_q0123(void *arg, int npending)
4373 struct ath_softc *sc = arg;
4375 struct ifnet *ifp = sc->sc_ifp;
4380 wlan_serialize_enter();
4382 sc->sc_txproc_cnt++;
4383 txqs = sc->sc_txq_active;
4384 sc->sc_txq_active &= ~txqs;
4387 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4388 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
4391 * Process each active queue.
4394 if (TXQACTIVE(txqs, 0))
4395 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4396 if (TXQACTIVE(txqs, 1))
4397 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4398 if (TXQACTIVE(txqs, 2))
4399 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4400 if (TXQACTIVE(txqs, 3))
4401 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4402 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4403 ath_tx_processq(sc, sc->sc_cabq, 1);
4405 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4408 /* remove, DragonFly uses OACTIVE to control if_start calls */
4409 IF_LOCK(&ifp->if_snd);
4410 ifq_clr_oactive(&ifp->if_snd);
4411 IF_UNLOCK(&ifp->if_snd);
4413 sc->sc_wd_timer = 0;
4416 ath_led_event(sc, sc->sc_txrix);
4419 sc->sc_txproc_cnt--;
4423 wlan_serialize_exit();
4427 * Deferred processing of transmit interrupt.
4430 ath_tx_proc(void *arg, int npending)
4432 struct ath_softc *sc = arg;
4434 struct ifnet *ifp = sc->sc_ifp;
4439 wlan_serialize_enter();
4441 sc->sc_txproc_cnt++;
4442 txqs = sc->sc_txq_active;
4443 sc->sc_txq_active &= ~txqs;
4446 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
4449 * Process each active queue.
4452 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4453 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
4454 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
4456 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4459 /* remove, DragonFly uses OACTIVE to control if_start calls */
4460 /* XXX check this inside of IF_LOCK? */
4461 IF_LOCK(&ifp->if_snd);
4462 ifq_clr_oactive(&ifp->if_snd);
4463 IF_UNLOCK(&ifp->if_snd);
4465 sc->sc_wd_timer = 0;
4468 ath_led_event(sc, sc->sc_txrix);
4471 sc->sc_txproc_cnt--;
4475 wlan_serialize_exit();
4480 * Deferred processing of TXQ rescheduling.
4483 ath_txq_sched_tasklet(void *arg, int npending)
4485 struct ath_softc *sc = arg;
4488 wlan_serialize_enter();
4490 /* XXX is skipping ok? */
4493 if (sc->sc_inreset_cnt > 0) {
4494 device_printf(sc->sc_dev,
4495 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
4497 wlan_serialize_exit();
4501 sc->sc_txproc_cnt++;
4505 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4506 if (ATH_TXQ_SETUP(sc, i)) {
4507 ath_txq_sched(sc, &sc->sc_txq[i]);
4513 sc->sc_txproc_cnt--;
4515 wlan_serialize_exit();
4519 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
4522 ATH_TXBUF_LOCK_ASSERT(sc);
4524 if (bf->bf_flags & ATH_BUF_MGMT)
4525 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
4527 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4529 if (sc->sc_txbuf_cnt > ath_txbuf) {
4530 device_printf(sc->sc_dev,
4531 "%s: sc_txbuf_cnt > %d?\n",
4534 sc->sc_txbuf_cnt = ath_txbuf;
4540 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
4543 ATH_TXBUF_LOCK_ASSERT(sc);
4545 if (bf->bf_flags & ATH_BUF_MGMT)
4546 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
4548 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
4550 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
4551 device_printf(sc->sc_dev,
4552 "%s: sc_txbuf_cnt > %d?\n",
4555 sc->sc_txbuf_cnt = ATH_TXBUF;
4561 * Free the holding buffer if it exists
4564 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
4566 ATH_TXBUF_UNLOCK_ASSERT(sc);
4567 ATH_TXQ_LOCK_ASSERT(txq);
4569 if (txq->axq_holdingbf == NULL)
4572 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
4575 ath_returnbuf_tail(sc, txq->axq_holdingbf);
4576 ATH_TXBUF_UNLOCK(sc);
4578 txq->axq_holdingbf = NULL;
4582 * Add this buffer to the holding queue, freeing the previous
4586 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
4588 struct ath_txq *txq;
4590 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
4592 ATH_TXBUF_UNLOCK_ASSERT(sc);
4593 ATH_TXQ_LOCK_ASSERT(txq);
4595 /* XXX assert ATH_BUF_BUSY is set */
4597 /* XXX assert the tx queue is under the max number */
4598 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
4599 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
4602 bf->bf_state.bfs_tx_queue);
4603 bf->bf_flags &= ~ATH_BUF_BUSY;
4604 ath_returnbuf_tail(sc, bf);
4607 ath_txq_freeholdingbuf(sc, txq);
4608 txq->axq_holdingbf = bf;
4612 * Return a buffer to the pool and update the 'busy' flag on the
4613 * previous 'tail' entry.
4615 * This _must_ only be called when the buffer is involved in a completed
4616 * TX. The logic is that if it was part of an active TX, the previous
4617 * buffer on the list is now not involved in a halted TX DMA queue, waiting
4618 * for restart (eg for TDMA.)
4620 * The caller must free the mbuf and recycle the node reference.
4622 * XXX This method of handling busy / holding buffers is insanely stupid.
4623 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would
4624 * be much nicer if buffers in the processq() methods would instead be
4625 * always completed there (pushed onto a txq or ath_bufhead) so we knew
4626 * exactly what hardware queue they came from in the first place.
4629 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
4631 struct ath_txq *txq;
4633 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
4635 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
4636 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
4639 * If this buffer is busy, push it onto the holding queue.
4641 if (bf->bf_flags & ATH_BUF_BUSY) {
4643 ath_txq_addholdingbuf(sc, bf);
4644 ATH_TXQ_UNLOCK(txq);
4649 * Not a busy buffer, so free normally
4652 ath_returnbuf_tail(sc, bf);
4653 ATH_TXBUF_UNLOCK(sc);
4657 * This is currently used by ath_tx_draintxq() and
4658 * ath_tx_tid_free_pkts().
4660 * It recycles a single ath_buf.
4663 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
4665 struct ieee80211_node *ni = bf->bf_node;
4666 struct mbuf *m0 = bf->bf_m;
4669 * Make sure that we only sync/unload if there's an mbuf.
4670 * If not (eg we cloned a buffer), the unload will have already
4673 if (bf->bf_m != NULL) {
4674 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4675 BUS_DMASYNC_POSTWRITE);
4676 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4682 /* Free the buffer, it's not needed any longer */
4683 ath_freebuf(sc, bf);
4685 /* Pass the buffer back to net80211 - completing it */
4686 ieee80211_tx_complete(ni, m0, status);
4689 static struct ath_buf *
4690 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
4694 ATH_TXQ_LOCK_ASSERT(txq);
4697 * Drain the FIFO queue first, then if it's
4698 * empty, move to the normal frame queue.
4700 bf = TAILQ_FIRST(&txq->fifo.axq_q);
4703 * Is it the last buffer in this set?
4704 * Decrement the FIFO counter.
4706 if (bf->bf_flags & ATH_BUF_FIFOEND) {
4707 if (txq->axq_fifo_depth == 0) {
4708 device_printf(sc->sc_dev,
4709 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
4712 txq->fifo.axq_depth);
4714 txq->axq_fifo_depth--;
4716 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
4723 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
4724 device_printf(sc->sc_dev,
4725 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
4728 txq->axq_fifo_depth,
4729 txq->fifo.axq_depth);
4733 * Now drain the pending queue.
4735 bf = TAILQ_FIRST(&txq->axq_q);
4737 txq->axq_link = NULL;
4740 ATH_TXQ_REMOVE(txq, bf, bf_list);
4745 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
4748 struct ath_hal *ah = sc->sc_ah;
4754 * NB: this assumes output has been stopped and
4755 * we do not need to block ath_tx_proc
4757 for (ix = 0;; ix++) {
4759 bf = ath_tx_draintxq_get_one(sc, txq);
4761 ATH_TXQ_UNLOCK(txq);
4764 if (bf->bf_state.bfs_aggr)
4765 txq->axq_aggr_depth--;
4767 if (sc->sc_debug & ATH_DEBUG_RESET) {
4768 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4772 * EDMA operation has a TX completion FIFO
4773 * separate from the TX descriptor, so this
4774 * method of checking the "completion" status
4777 if (! sc->sc_isedma) {
4778 status = (ath_hal_txprocdesc(ah,
4780 &bf->bf_status.ds_txstat) == HAL_OK);
4782 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
4783 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
4784 bf->bf_m->m_len, 0, -1);
4786 #endif /* ATH_DEBUG */
4788 * Since we're now doing magic in the completion
4789 * functions, we -must- call it for aggregation
4790 * destinations or BAW tracking will get upset.
4793 * Clear ATH_BUF_BUSY; the completion handler
4794 * will free the buffer.
4796 ATH_TXQ_UNLOCK(txq);
4797 bf->bf_flags &= ~ATH_BUF_BUSY;
4799 bf->bf_comp(sc, bf, 1);
4801 ath_tx_default_comp(sc, bf, 1);
4805 * Free the holding buffer if it exists
4808 ath_txq_freeholdingbuf(sc, txq);
4809 ATH_TXQ_UNLOCK(txq);
4812 * Drain software queued frames which are on
4815 ath_tx_txq_drain(sc, txq);
4819 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
4821 struct ath_hal *ah = sc->sc_ah;
4823 ATH_TXQ_LOCK_ASSERT(txq);
4825 DPRINTF(sc, ATH_DEBUG_RESET,
4826 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, "
4827 "link %p, holdingbf=%p\n",
4830 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
4831 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)),
4832 (int) ath_hal_numtxpending(ah, txq->axq_qnum),
4835 txq->axq_holdingbf);
4837 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
4838 /* We've stopped TX DMA, so mark this as stopped. */
4839 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING;
4842 if ((sc->sc_debug & ATH_DEBUG_RESET)
4843 && (txq->axq_holdingbf != NULL)) {
4844 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0);
4850 ath_stoptxdma(struct ath_softc *sc)
4852 struct ath_hal *ah = sc->sc_ah;
4855 /* XXX return value */
4859 if (!sc->sc_invalid) {
4860 /* don't touch the hardware if marked invalid */
4861 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
4862 __func__, sc->sc_bhalq,
4863 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
4866 /* stop the beacon queue */
4867 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
4869 /* Stop the data queues */
4870 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4871 if (ATH_TXQ_SETUP(sc, i)) {
4872 ATH_TXQ_LOCK(&sc->sc_txq[i]);
4873 ath_tx_stopdma(sc, &sc->sc_txq[i]);
4874 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
4884 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq)
4886 struct ath_hal *ah = sc->sc_ah;
4890 if (! (sc->sc_debug & ATH_DEBUG_RESET))
4893 device_printf(sc->sc_dev, "%s: Q%d: begin\n",
4894 __func__, txq->axq_qnum);
4895 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
4896 ath_printtxbuf(sc, bf, txq->axq_qnum, i,
4897 ath_hal_txprocdesc(ah, bf->bf_lastds,
4898 &bf->bf_status.ds_txstat) == HAL_OK);
4901 device_printf(sc->sc_dev, "%s: Q%d: end\n",
4902 __func__, txq->axq_qnum);
4904 #endif /* ATH_DEBUG */
4907 * Drain the transmit queues and reclaim resources.
4910 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
4912 struct ath_hal *ah = sc->sc_ah;
4914 struct ifnet *ifp = sc->sc_ifp;
4917 struct ath_buf *bf_last;
4919 (void) ath_stoptxdma(sc);
4922 * Dump the queue contents
4924 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
4926 * XXX TODO: should we just handle the completed TX frames
4927 * here, whether or not the reset is a full one or not?
4929 if (ATH_TXQ_SETUP(sc, i)) {
4931 if (sc->sc_debug & ATH_DEBUG_RESET)
4932 ath_tx_dump(sc, &sc->sc_txq[i]);
4933 #endif /* ATH_DEBUG */
4934 if (reset_type == ATH_RESET_NOLOSS) {
4935 ath_tx_processq(sc, &sc->sc_txq[i], 0);
4936 ATH_TXQ_LOCK(&sc->sc_txq[i]);
4938 * Free the holding buffer; DMA is now
4941 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
4943 * Setup the link pointer to be the
4944 * _last_ buffer/descriptor in the list.
4945 * If there's nothing in the list, set it
4948 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i],
4950 if (bf_last != NULL) {
4951 ath_hal_gettxdesclinkptr(ah,
4953 &sc->sc_txq[i].axq_link);
4955 sc->sc_txq[i].axq_link = NULL;
4957 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
4959 ath_tx_draintxq(sc, &sc->sc_txq[i]);
4963 if (sc->sc_debug & ATH_DEBUG_RESET) {
4964 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
4965 if (bf != NULL && bf->bf_m != NULL) {
4966 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
4967 ath_hal_txprocdesc(ah, bf->bf_lastds,
4968 &bf->bf_status.ds_txstat) == HAL_OK);
4969 ieee80211_dump_pkt(ifp->if_l2com,
4970 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
4974 #endif /* ATH_DEBUG */
4976 /* remove, DragonFly uses OACTIVE to control if_start calls */
4977 IF_LOCK(&ifp->if_snd);
4978 ifq_clr_oactive(&ifp->if_snd);
4979 IF_UNLOCK(&ifp->if_snd);
4981 sc->sc_wd_timer = 0;
4985 * Update internal state after a channel change.
4988 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
4990 enum ieee80211_phymode mode;
4993 * Change channels and update the h/w rate map
4994 * if we're switching; e.g. 11a to 11b/g.
4996 mode = ieee80211_chan2mode(chan);
4997 if (mode != sc->sc_curmode)
4998 ath_setcurmode(sc, mode);
4999 sc->sc_curchan = chan;
5003 * Set/change channels. If the channel is really being changed,
5004 * it's done by resetting the chip. To accomplish this we must
5005 * first cleanup any pending DMA, then restart stuff after a la
5009 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5011 struct ifnet *ifp = sc->sc_ifp;
5012 struct ieee80211com *ic = ifp->if_l2com;
5013 struct ath_hal *ah = sc->sc_ah;
5016 /* Treat this as an interface reset */
5017 ATH_PCU_UNLOCK_ASSERT(sc);
5018 ATH_UNLOCK_ASSERT(sc);
5020 /* (Try to) stop TX/RX from occuring */
5021 taskqueue_block(sc->sc_tq);
5025 /* Stop new RX/TX/interrupt completion */
5026 if (ath_reset_grablock(sc, 1) == 0) {
5027 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5031 ath_hal_intrset(ah, 0);
5033 /* Stop pending RX/TX completion */
5034 ath_txrx_stop_locked(sc);
5038 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5039 __func__, ieee80211_chan2ieee(ic, chan),
5040 chan->ic_freq, chan->ic_flags);
5041 if (chan != sc->sc_curchan) {
5044 * To switch channels clear any pending DMA operations;
5045 * wait long enough for the RX fifo to drain, reset the
5046 * hardware at the new frequency, and then re-enable
5047 * the relevant bits of the h/w.
5050 ath_hal_intrset(ah, 0); /* disable interrupts */
5052 ath_stoprecv(sc, 1); /* turn off frame recv */
5054 * First, handle completed TX/RX frames.
5057 ath_draintxq(sc, ATH_RESET_NOLOSS);
5059 * Next, flush the non-scheduled frames.
5061 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5063 ath_update_chainmasks(sc, chan);
5064 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
5065 sc->sc_cur_rxchainmask);
5066 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5067 if_printf(ifp, "%s: unable to reset "
5068 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5069 __func__, ieee80211_chan2ieee(ic, chan),
5070 chan->ic_freq, chan->ic_flags, status);
5074 sc->sc_diversity = ath_hal_getdiversity(ah);
5076 /* Let DFS at it in case it's a DFS channel */
5077 ath_dfs_radar_enable(sc, chan);
5079 /* Let spectral at in case spectral is enabled */
5080 ath_spectral_enable(sc, chan);
5083 * Let bluetooth coexistence at in case it's needed for this
5086 ath_btcoex_enable(sc, ic->ic_curchan);
5089 * If we're doing TDMA, enforce the TXOP limitation for chips
5092 if (sc->sc_hasenforcetxop && sc->sc_tdma)
5093 ath_hal_setenforcetxop(sc->sc_ah, 1);
5095 ath_hal_setenforcetxop(sc->sc_ah, 0);
5098 * Re-enable rx framework.
5100 if (ath_startrecv(sc) != 0) {
5101 if_printf(ifp, "%s: unable to restart recv logic\n",
5108 * Change channels and update the h/w rate map
5109 * if we're switching; e.g. 11a to 11b/g.
5111 ath_chan_change(sc, chan);
5114 * Reset clears the beacon timers; reset them
5117 if (sc->sc_beacons) { /* restart beacons */
5118 #ifdef IEEE80211_SUPPORT_TDMA
5120 ath_tdma_config(sc, NULL);
5123 ath_beacon_config(sc, NULL);
5127 * Re-enable interrupts.
5130 ath_hal_intrset(ah, sc->sc_imask);
5136 sc->sc_inreset_cnt--;
5137 /* XXX only do this if sc_inreset_cnt == 0? */
5138 ath_hal_intrset(ah, sc->sc_imask);
5142 /* remove, DragonFly uses OACTIVE to control if_start calls */
5143 IF_LOCK(&ifp->if_snd);
5144 ifq_clr_oactive(&ifp->if_snd);
5145 IF_UNLOCK(&ifp->if_snd);
5148 /* XXX ath_start? */
5154 * Periodically recalibrate the PHY to account
5155 * for temperature/environment changes.
5158 ath_calibrate(void *arg)
5160 struct ath_softc *sc = arg;
5161 struct ath_hal *ah = sc->sc_ah;
5162 struct ifnet *ifp = sc->sc_ifp;
5163 struct ieee80211com *ic = ifp->if_l2com;
5164 HAL_BOOL longCal, isCalDone = AH_TRUE;
5165 HAL_BOOL aniCal, shortCal = AH_FALSE;
5168 wlan_serialize_enter();
5169 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5171 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5172 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5173 if (sc->sc_doresetcal)
5174 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5176 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5178 sc->sc_stats.ast_ani_cal++;
5179 sc->sc_lastani = ticks;
5180 ath_hal_ani_poll(ah, sc->sc_curchan);
5184 sc->sc_stats.ast_per_cal++;
5185 sc->sc_lastlongcal = ticks;
5186 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5188 * Rfgain is out of bounds, reset the chip
5189 * to load new gain values.
5191 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5192 "%s: rfgain change\n", __func__);
5193 sc->sc_stats.ast_per_rfgain++;
5194 sc->sc_resetcal = 0;
5195 sc->sc_doresetcal = AH_TRUE;
5196 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5197 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5201 * If this long cal is after an idle period, then
5202 * reset the data collection state so we start fresh.
5204 if (sc->sc_resetcal) {
5205 (void) ath_hal_calreset(ah, sc->sc_curchan);
5206 sc->sc_lastcalreset = ticks;
5207 sc->sc_lastshortcal = ticks;
5208 sc->sc_resetcal = 0;
5209 sc->sc_doresetcal = AH_TRUE;
5213 /* Only call if we're doing a short/long cal, not for ANI calibration */
5214 if (shortCal || longCal) {
5215 isCalDone = AH_FALSE;
5216 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5219 * Calibrate noise floor data again in case of change.
5221 ath_hal_process_noisefloor(ah);
5224 DPRINTF(sc, ATH_DEBUG_ANY,
5225 "%s: calibration of channel %u failed\n",
5226 __func__, sc->sc_curchan->ic_freq);
5227 sc->sc_stats.ast_per_calfail++;
5230 sc->sc_lastshortcal = ticks;
5235 * Use a shorter interval to potentially collect multiple
5236 * data samples required to complete calibration. Once
5237 * we're told the work is done we drop back to a longer
5238 * interval between requests. We're more aggressive doing
5239 * work when operating as an AP to improve operation right
5242 sc->sc_lastshortcal = ticks;
5243 nextcal = ath_shortcalinterval*hz/1000;
5244 if (sc->sc_opmode != HAL_M_HOSTAP)
5246 sc->sc_doresetcal = AH_TRUE;
5248 /* nextcal should be the shortest time for next event */
5249 nextcal = ath_longcalinterval*hz;
5250 if (sc->sc_lastcalreset == 0)
5251 sc->sc_lastcalreset = sc->sc_lastlongcal;
5252 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5253 sc->sc_resetcal = 1; /* setup reset next trip */
5254 sc->sc_doresetcal = AH_FALSE;
5256 /* ANI calibration may occur more often than short/long/resetcal */
5257 if (ath_anicalinterval > 0)
5258 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5261 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5262 __func__, nextcal, isCalDone ? "" : "!");
5263 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5265 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5267 /* NB: don't rearm timer */
5270 wlan_serialize_exit();
5274 ath_scan_start(struct ieee80211com *ic)
5276 struct ifnet *ifp = ic->ic_ifp;
5277 struct ath_softc *sc = ifp->if_softc;
5278 struct ath_hal *ah = sc->sc_ah;
5281 /* XXX calibration timer? */
5284 sc->sc_scanning = 1;
5285 sc->sc_syncbeacon = 0;
5286 rfilt = ath_calcrxfilter(sc);
5290 ath_hal_setrxfilter(ah, rfilt);
5291 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5294 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5295 __func__, rfilt, ath_hal_ether_sprintf(ifp->if_broadcastaddr));
5299 ath_scan_end(struct ieee80211com *ic)
5301 struct ifnet *ifp = ic->ic_ifp;
5302 struct ath_softc *sc = ifp->if_softc;
5303 struct ath_hal *ah = sc->sc_ah;
5307 sc->sc_scanning = 0;
5308 rfilt = ath_calcrxfilter(sc);
5312 ath_hal_setrxfilter(ah, rfilt);
5313 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5315 ath_hal_process_noisefloor(ah);
5318 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5319 __func__, rfilt, ath_hal_ether_sprintf(sc->sc_curbssid),
5323 #ifdef ATH_ENABLE_11N
5325 * For now, just do a channel change.
5327 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
5328 * control state and resetting the hardware without dropping frames out
5331 * The unfortunate trouble here is making absolutely sure that the
5332 * channel width change has propagated enough so the hardware
5333 * absolutely isn't handed bogus frames for it's current operating
5334 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
5335 * does occur in parallel, we need to make certain we've blocked
5336 * any further ongoing TX (and RX, that can cause raw TX)
5337 * before we do this.
5340 ath_update_chw(struct ieee80211com *ic)
5342 struct ifnet *ifp = ic->ic_ifp;
5343 struct ath_softc *sc = ifp->if_softc;
5345 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
5346 ath_set_channel(ic);
5348 #endif /* ATH_ENABLE_11N */
5351 ath_set_channel(struct ieee80211com *ic)
5353 struct ifnet *ifp = ic->ic_ifp;
5354 struct ath_softc *sc = ifp->if_softc;
5356 (void) ath_chan_set(sc, ic->ic_curchan);
5358 * If we are returning to our bss channel then mark state
5359 * so the next recv'd beacon's tsf will be used to sync the
5360 * beacon timers. Note that since we only hear beacons in
5361 * sta/ibss mode this has no effect in other operating modes.
5364 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5365 sc->sc_syncbeacon = 1;
5370 * Walk the vap list and check if there any vap's in RUN state.
5373 ath_isanyrunningvaps(struct ieee80211vap *this)
5375 struct ieee80211com *ic = this->iv_ic;
5376 struct ieee80211vap *vap;
5378 IEEE80211_LOCK_ASSERT(ic);
5380 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5381 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5388 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5390 struct ieee80211com *ic = vap->iv_ic;
5391 struct ath_softc *sc = ic->ic_ifp->if_softc;
5392 struct ath_vap *avp = ATH_VAP(vap);
5393 struct ath_hal *ah = sc->sc_ah;
5394 struct ieee80211_node *ni = NULL;
5395 int i, error, stamode;
5397 int csa_run_transition = 0;
5399 static const HAL_LED_STATE leds[] = {
5400 HAL_LED_INIT, /* IEEE80211_S_INIT */
5401 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5402 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5403 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5404 HAL_LED_RUN, /* IEEE80211_S_CAC */
5405 HAL_LED_RUN, /* IEEE80211_S_RUN */
5406 HAL_LED_RUN, /* IEEE80211_S_CSA */
5407 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5410 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5411 ieee80211_state_name[vap->iv_state],
5412 ieee80211_state_name[nstate]);
5415 * net80211 _should_ have the comlock asserted at this point.
5416 * There are some comments around the calls to vap->iv_newstate
5417 * which indicate that it (newstate) may end up dropping the
5418 * lock. This and the subsequent lock assert check after newstate
5419 * are an attempt to catch these and figure out how/why.
5421 IEEE80211_LOCK_ASSERT(ic);
5423 if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
5424 csa_run_transition = 1;
5426 wlan_serialize_exit();
5427 callout_drain(&sc->sc_cal_ch);
5428 wlan_serialize_enter();
5429 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
5431 if (nstate == IEEE80211_S_SCAN) {
5433 * Scanning: turn off beacon miss and don't beacon.
5434 * Mark beacon state so when we reach RUN state we'll
5435 * [re]setup beacons. Unblock the task q thread so
5436 * deferred interrupt processing is done.
5439 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5440 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5442 taskqueue_unblock(sc->sc_tq);
5445 ni = ieee80211_ref_node(vap->iv_bss);
5446 rfilt = ath_calcrxfilter(sc);
5447 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5448 vap->iv_opmode == IEEE80211_M_AHDEMO ||
5449 vap->iv_opmode == IEEE80211_M_IBSS);
5450 if (stamode && nstate == IEEE80211_S_RUN) {
5451 sc->sc_curaid = ni->ni_associd;
5452 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5453 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5455 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5457 ath_hal_ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5458 ath_hal_setrxfilter(ah, rfilt);
5460 /* XXX is this to restore keycache on resume? */
5461 if (vap->iv_opmode != IEEE80211_M_STA &&
5462 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5463 for (i = 0; i < IEEE80211_WEP_NKID; i++)
5464 if (ath_hal_keyisvalid(ah, i))
5465 ath_hal_keysetmac(ah, i, ni->ni_bssid);
5469 * Invoke the parent method to do net80211 work.
5471 error = avp->av_newstate(vap, nstate, arg);
5476 * See above: ensure av_newstate() doesn't drop the lock
5479 IEEE80211_LOCK_ASSERT(ic);
5481 if (nstate == IEEE80211_S_RUN) {
5482 /* NB: collect bss node again, it may have changed */
5483 ieee80211_free_node(ni);
5484 ni = ieee80211_ref_node(vap->iv_bss);
5486 DPRINTF(sc, ATH_DEBUG_STATE,
5487 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5488 "capinfo 0x%04x chan %d\n", __func__,
5489 vap->iv_flags, ni->ni_intval,
5490 ath_hal_ether_sprintf(ni->ni_bssid),
5491 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5493 switch (vap->iv_opmode) {
5494 #ifdef IEEE80211_SUPPORT_TDMA
5495 case IEEE80211_M_AHDEMO:
5496 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5500 case IEEE80211_M_HOSTAP:
5501 case IEEE80211_M_IBSS:
5502 case IEEE80211_M_MBSS:
5504 * Allocate and setup the beacon frame.
5506 * Stop any previous beacon DMA. This may be
5507 * necessary, for example, when an ibss merge
5508 * causes reconfiguration; there will be a state
5509 * transition from RUN->RUN that means we may
5510 * be called with beacon transmission active.
5512 ath_hal_stoptxdma(ah, sc->sc_bhalq);
5514 error = ath_beacon_alloc(sc, ni);
5518 * If joining an adhoc network defer beacon timer
5519 * configuration to the next beacon frame so we
5520 * have a current TSF to use. Otherwise we're
5521 * starting an ibss/bss so there's no need to delay;
5522 * if this is the first vap moving to RUN state, then
5523 * beacon state needs to be [re]configured.
5525 if (vap->iv_opmode == IEEE80211_M_IBSS &&
5526 ni->ni_tstamp.tsf != 0) {
5527 sc->sc_syncbeacon = 1;
5528 } else if (!sc->sc_beacons) {
5529 #ifdef IEEE80211_SUPPORT_TDMA
5530 if (vap->iv_caps & IEEE80211_C_TDMA)
5531 ath_tdma_config(sc, vap);
5534 ath_beacon_config(sc, vap);
5538 case IEEE80211_M_STA:
5540 * Defer beacon timer configuration to the next
5541 * beacon frame so we have a current TSF to use
5542 * (any TSF collected when scanning is likely old).
5543 * However if it's due to a CSA -> RUN transition,
5544 * force a beacon update so we pick up a lack of
5545 * beacons from an AP in CAC and thus force a
5548 * And, there's also corner cases here where
5549 * after a scan, the AP may have disappeared.
5550 * In that case, we may not receive an actual
5551 * beacon to update the beacon timer and thus we
5552 * won't get notified of the missing beacons.
5554 sc->sc_syncbeacon = 1;
5556 if (csa_run_transition)
5558 ath_beacon_config(sc, vap);
5563 * Reconfigure beacons during reset; as otherwise
5564 * we won't get the beacon timers reprogrammed
5565 * after a reset and thus we won't pick up a
5566 * beacon miss interrupt.
5568 * Hopefully we'll see a beacon before the BMISS
5569 * timer fires (too often), leading to a STA
5574 case IEEE80211_M_MONITOR:
5576 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5577 * transitions so we must re-enable interrupts here to
5578 * handle the case of a single monitor mode vap.
5580 ath_hal_intrset(ah, sc->sc_imask);
5582 case IEEE80211_M_WDS:
5588 * Let the hal process statistics collected during a
5589 * scan so it can provide calibrated noise floor data.
5591 ath_hal_process_noisefloor(ah);
5593 * Reset rssi stats; maybe not the best place...
5595 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5596 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5597 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5599 * Finally, start any timers and the task q thread
5600 * (in case we didn't go through SCAN state).
5602 if (ath_longcalinterval != 0) {
5603 /* start periodic recalibration timer */
5604 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5606 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5607 "%s: calibration disabled\n", __func__);
5609 taskqueue_unblock(sc->sc_tq);
5610 } else if (nstate == IEEE80211_S_INIT) {
5612 * If there are no vaps left in RUN state then
5613 * shutdown host/driver operation:
5614 * o disable interrupts
5615 * o disable the task queue thread
5616 * o mark beacon processing as stopped
5618 if (!ath_isanyrunningvaps(vap)) {
5619 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5620 /* disable interrupts */
5621 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5622 taskqueue_block(sc->sc_tq);
5625 #ifdef IEEE80211_SUPPORT_TDMA
5626 ath_hal_setcca(ah, AH_TRUE);
5630 ieee80211_free_node(ni);
5635 * Allocate a key cache slot to the station so we can
5636 * setup a mapping from key index to node. The key cache
5637 * slot is needed for managing antenna state and for
5638 * compression when stations do not use crypto. We do
5639 * it uniliaterally here; if crypto is employed this slot
5640 * will be reassigned.
5643 ath_setup_stationkey(struct ieee80211_node *ni)
5645 struct ieee80211vap *vap = ni->ni_vap;
5646 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5647 ieee80211_keyix keyix, rxkeyix;
5649 /* XXX should take a locked ref to vap->iv_bss */
5650 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5652 * Key cache is full; we'll fall back to doing
5653 * the more expensive lookup in software. Note
5654 * this also means no h/w compression.
5656 /* XXX msg+statistic */
5659 ni->ni_ucastkey.wk_keyix = keyix;
5660 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5661 /* NB: must mark device key to get called back on delete */
5662 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5663 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5664 /* NB: this will create a pass-thru key entry */
5665 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
5670 * Setup driver-specific state for a newly associated node.
5671 * Note that we're called also on a re-associate, the isnew
5672 * param tells us if this is the first time or not.
5675 ath_newassoc(struct ieee80211_node *ni, int isnew)
5677 struct ath_node *an = ATH_NODE(ni);
5678 struct ieee80211vap *vap = ni->ni_vap;
5679 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5680 const struct ieee80211_txparam *tp = ni->ni_txparms;
5682 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5683 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5685 ath_rate_newassoc(sc, an, isnew);
5688 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5689 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5690 ath_setup_stationkey(ni);
5693 * If we're reassociating, make sure that any paused queues
5696 * Now, we may hvae frames in the hardware queue for this node.
5697 * So if we are reassociating and there are frames in the queue,
5698 * we need to go through the cleanup path to ensure that they're
5699 * marked as non-aggregate.
5702 DPRINTF(sc, ATH_DEBUG_NODE,
5703 "%s: %s: reassoc; is_powersave=%d\n",
5705 ath_hal_ether_sprintf(ni->ni_macaddr),
5706 an->an_is_powersave);
5708 /* XXX for now, we can't hold the lock across assoc */
5709 ath_tx_node_reassoc(sc, an);
5711 /* XXX for now, we can't hold the lock across wakeup */
5712 if (an->an_is_powersave)
5713 ath_tx_node_wakeup(sc, an);
5718 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
5719 int nchans, struct ieee80211_channel chans[])
5721 struct ath_softc *sc = ic->ic_ifp->if_softc;
5722 struct ath_hal *ah = sc->sc_ah;
5725 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5726 "%s: rd %u cc %u location %c%s\n",
5727 __func__, reg->regdomain, reg->country, reg->location,
5728 reg->ecm ? " ecm" : "");
5730 status = ath_hal_set_channels(ah, chans, nchans,
5731 reg->country, reg->regdomain);
5732 if (status != HAL_OK) {
5733 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
5735 return EINVAL; /* XXX */
5742 ath_getradiocaps(struct ieee80211com *ic,
5743 int maxchans, int *nchans, struct ieee80211_channel chans[])
5745 struct ath_softc *sc = ic->ic_ifp->if_softc;
5746 struct ath_hal *ah = sc->sc_ah;
5748 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
5749 __func__, SKU_DEBUG, CTRY_DEFAULT);
5751 /* XXX check return */
5752 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
5753 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
5758 ath_getchannels(struct ath_softc *sc)
5760 struct ifnet *ifp = sc->sc_ifp;
5761 struct ieee80211com *ic = ifp->if_l2com;
5762 struct ath_hal *ah = sc->sc_ah;
5766 * Collect channel set based on EEPROM contents.
5768 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
5769 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
5770 if (status != HAL_OK) {
5771 if_printf(ifp, "%s: unable to collect channel list from hal, "
5772 "status %d\n", __func__, status);
5775 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
5776 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
5777 /* XXX map Atheros sku's to net80211 SKU's */
5778 /* XXX net80211 types too small */
5779 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
5780 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
5781 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
5782 ic->ic_regdomain.isocc[1] = ' ';
5784 ic->ic_regdomain.ecm = 1;
5785 ic->ic_regdomain.location = 'I';
5787 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5788 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
5789 __func__, sc->sc_eerd, sc->sc_eecc,
5790 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
5791 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
5796 ath_rate_setup(struct ath_softc *sc, u_int mode)
5798 struct ath_hal *ah = sc->sc_ah;
5799 const HAL_RATE_TABLE *rt;
5802 case IEEE80211_MODE_11A:
5803 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5805 case IEEE80211_MODE_HALF:
5806 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5808 case IEEE80211_MODE_QUARTER:
5809 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5811 case IEEE80211_MODE_11B:
5812 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5814 case IEEE80211_MODE_11G:
5815 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5817 case IEEE80211_MODE_TURBO_A:
5818 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5820 case IEEE80211_MODE_TURBO_G:
5821 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5823 case IEEE80211_MODE_STURBO_A:
5824 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5826 case IEEE80211_MODE_11NA:
5827 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5829 case IEEE80211_MODE_11NG:
5830 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5833 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5837 sc->sc_rates[mode] = rt;
5838 return (rt != NULL);
5842 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5844 #define N(a) (sizeof(a)/sizeof(a[0]))
5845 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
5846 static const struct {
5847 u_int rate; /* tx/rx 802.11 rate */
5848 u_int16_t timeOn; /* LED on time (ms) */
5849 u_int16_t timeOff; /* LED off time (ms) */
5865 /* XXX half/quarter rates */
5867 const HAL_RATE_TABLE *rt;
5870 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5871 rt = sc->sc_rates[mode];
5872 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5873 for (i = 0; i < rt->rateCount; i++) {
5874 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5875 if (rt->info[i].phy != IEEE80211_T_HT)
5876 sc->sc_rixmap[ieeerate] = i;
5878 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5880 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5881 for (i = 0; i < N(sc->sc_hwmap); i++) {
5882 if (i >= rt->rateCount) {
5883 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5884 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5887 sc->sc_hwmap[i].ieeerate =
5888 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5889 if (rt->info[i].phy == IEEE80211_T_HT)
5890 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5891 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5892 if (rt->info[i].shortPreamble ||
5893 rt->info[i].phy == IEEE80211_T_OFDM)
5894 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5895 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5896 for (j = 0; j < N(blinkrates)-1; j++)
5897 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5899 /* NB: this uses the last entry if the rate isn't found */
5900 /* XXX beware of overlow */
5901 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5902 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5904 sc->sc_currates = rt;
5905 sc->sc_curmode = mode;
5907 * All protection frames are transmited at 2Mb/s for
5908 * 11g, otherwise at 1Mb/s.
5910 if (mode == IEEE80211_MODE_11G)
5911 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5913 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5914 /* NB: caller is responsible for resetting rate control state */
5919 ath_watchdog(void *arg)
5921 struct ath_softc *sc = arg;
5924 wlan_serialize_enter();
5925 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
5926 struct ifnet *ifp = sc->sc_ifp;
5929 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
5931 if_printf(ifp, "%s hang detected (0x%x)\n",
5932 hangs & 0xff ? "bb" : "mac", hangs);
5934 if_printf(ifp, "device timeout\n");
5937 sc->sc_stats.ast_watchdog++;
5941 * We can't hold the lock across the ath_reset() call.
5943 * And since this routine can't hold a lock and sleep,
5944 * do the reset deferred.
5947 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5950 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
5951 wlan_serialize_exit();
5955 * (DragonFly network start)
5958 ath_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
5960 struct ath_softc *sc = ifp->if_softc;
5963 wlan_assert_serialized();
5964 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
5966 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
5967 ifq_purge(&ifp->if_snd);
5970 ifq_set_oactive(&ifp->if_snd);
5972 m = ifq_dequeue(&ifp->if_snd);
5975 ath_transmit(ifp, m);
5977 ifq_clr_oactive(&ifp->if_snd);
5981 * Fetch the rate control statistics for the given node.
5984 ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
5986 struct ath_node *an;
5987 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5988 struct ieee80211_node *ni;
5991 /* Perform a lookup on the given node */
5992 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
5998 /* Lock the ath_node */
6002 /* Fetch the rate control stats for this node */
6003 error = ath_rate_fetch_node_stats(sc, an, rs);
6005 /* No matter what happens here, just drop through */
6007 /* Unlock the ath_node */
6008 ATH_NODE_UNLOCK(an);
6010 /* Unref the node */
6011 ieee80211_node_decref(ni);
6019 * Diagnostic interface to the HAL. This is used by various
6020 * tools to do things like retrieve register contents for
6021 * debugging. The mechanism is intentionally opaque so that
6022 * it can change frequently w/o concern for compatiblity.
6025 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6027 struct ath_hal *ah = sc->sc_ah;
6028 u_int id = ad->ad_id & ATH_DIAG_ID;
6029 void *indata = NULL;
6030 void *outdata = NULL;
6031 u_int32_t insize = ad->ad_in_size;
6032 u_int32_t outsize = ad->ad_out_size;
6035 if (ad->ad_id & ATH_DIAG_IN) {
6039 indata = kmalloc(insize, M_TEMP, M_INTWAIT);
6040 if (indata == NULL) {
6044 error = copyin(ad->ad_in_data, indata, insize);
6048 if (ad->ad_id & ATH_DIAG_DYN) {
6050 * Allocate a buffer for the results (otherwise the HAL
6051 * returns a pointer to a buffer where we can read the
6052 * results). Note that we depend on the HAL leaving this
6053 * pointer for us to use below in reclaiming the buffer;
6054 * may want to be more defensive.
6056 outdata = kmalloc(outsize, M_TEMP, M_INTWAIT);
6057 if (outdata == NULL) {
6062 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6063 if (outsize < ad->ad_out_size)
6064 ad->ad_out_size = outsize;
6065 if (outdata != NULL)
6066 error = copyout(outdata, ad->ad_out_data,
6072 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6073 kfree(indata, M_TEMP);
6074 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6075 kfree(outdata, M_TEMP);
6078 #endif /* ATH_DIAGAPI */
6081 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,
6082 struct ucred *cr __unused)
6084 #define IS_RUNNING(ifp) \
6085 ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING))
6086 struct ath_softc *sc = ifp->if_softc;
6087 struct ieee80211com *ic = ifp->if_l2com;
6088 struct ifreq *ifr = (struct ifreq *)data;
6089 const HAL_RATE_TABLE *rt;
6095 if (IS_RUNNING(ifp)) {
6097 * To avoid rescanning another access point,
6098 * do not call ath_init() here. Instead,
6099 * only reflect promisc mode settings.
6102 } else if (ifp->if_flags & IFF_UP) {
6104 * Beware of being called during attach/detach
6105 * to reset promiscuous mode. In that case we
6106 * will still be marked UP but not RUNNING.
6107 * However trying to re-init the interface
6108 * is the wrong thing to do as we've already
6109 * torn down much of our state. There's
6110 * probably a better way to deal with this.
6112 if (!sc->sc_invalid)
6113 ath_init(sc); /* XXX lose error */
6115 ath_stop_locked(ifp);
6117 /* XXX must wakeup in places like ath_vap_delete */
6118 if (!sc->sc_invalid)
6119 ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6126 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6129 /* NB: embed these numbers to get a consistent view */
6130 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6131 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6132 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6133 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6134 #ifdef IEEE80211_SUPPORT_TDMA
6135 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6136 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6138 rt = sc->sc_currates;
6139 sc->sc_stats.ast_tx_rate =
6140 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6141 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
6142 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
6143 return copyout(&sc->sc_stats,
6144 ifr->ifr_data, sizeof (sc->sc_stats));
6145 case SIOCGATHAGSTATS:
6146 return copyout(&sc->sc_aggr_stats,
6147 ifr->ifr_data, sizeof (sc->sc_aggr_stats));
6149 error = priv_check(curthread, PRIV_DRIVER);
6151 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6152 memset(&sc->sc_aggr_stats, 0,
6153 sizeof(sc->sc_aggr_stats));
6154 memset(&sc->sc_intr_stats, 0,
6155 sizeof(sc->sc_intr_stats));
6160 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6162 case SIOCGATHPHYERR:
6163 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
6166 case SIOCGATHSPECTRAL:
6167 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr);
6169 case SIOCGATHNODERATESTATS:
6170 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
6173 error = ether_ioctl(ifp, cmd, data);
6184 * Announce various information on device/driver attach.
6187 ath_announce(struct ath_softc *sc)
6189 struct ifnet *ifp = sc->sc_ifp;
6190 struct ath_hal *ah = sc->sc_ah;
6192 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6193 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6194 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6195 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6196 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6199 for (i = 0; i <= WME_AC_VO; i++) {
6200 struct ath_txq *txq = sc->sc_ac2q[i];
6201 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6202 txq->axq_qnum, ieee80211_wme_acnames[i]);
6204 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6205 sc->sc_cabq->axq_qnum);
6206 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6208 if (ath_rxbuf != ATH_RXBUF)
6209 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6210 if (ath_txbuf != ATH_TXBUF)
6211 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6212 if (sc->sc_mcastkey && bootverbose)
6213 if_printf(ifp, "using multicast key search\n");
6217 ath_dfs_tasklet(void *p, int npending)
6219 struct ath_softc *sc = (struct ath_softc *) p;
6220 struct ifnet *ifp = sc->sc_ifp;
6221 struct ieee80211com *ic = ifp->if_l2com;
6224 * If previous processing has found a radar event,
6225 * signal this to the net80211 layer to begin DFS
6228 wlan_serialize_enter();
6229 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6230 /* DFS event found, initiate channel change */
6232 * XXX doesn't currently tell us whether the event
6233 * XXX was found in the primary or extension
6237 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6238 IEEE80211_UNLOCK(ic);
6240 wlan_serialize_exit();
6245 * Enable/disable power save. This must be called with
6246 * no TX driver locks currently held, so it should only
6247 * be called from the RX path (which doesn't hold any
6251 ath_node_powersave(struct ieee80211_node *ni, int enable)
6254 struct ath_node *an = ATH_NODE(ni);
6255 struct ieee80211com *ic = ni->ni_ic;
6256 struct ath_softc *sc = ic->ic_ifp->if_softc;
6257 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6259 /* XXX and no TXQ locks should be held here */
6261 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6s: enable=%d\n",
6263 ath_hal_ether_sprintf(ni->ni_macaddr),
6266 /* Suspend or resume software queue handling */
6268 ath_tx_node_sleep(sc, an);
6270 ath_tx_node_wakeup(sc, an);
6272 /* Update net80211 state */
6273 if (avp->av_node_ps)
6274 avp->av_node_ps(ni, enable);
6276 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6278 /* Update net80211 state */
6279 if (avp->av_node_ps)
6280 avp->av_node_ps(ni, enable);
6281 #endif/* ATH_SW_PSQ */
6287 * Notification from net80211 that the powersave queue state has
6290 * Since the software queue also may have some frames:
6292 * + if the node software queue has frames and the TID state
6293 * is 0, we set the TIM;
6294 * + if the node and the stack are both empty, we clear the TIM bit.
6295 * + If the stack tries to set the bit, always set it.
6296 * + If the stack tries to clear the bit, only clear it if the
6297 * software queue in question is also cleared.
6299 * TODO: this is called during node teardown; so let's ensure this
6300 * is all correctly handled and that the TIM bit is cleared.
6301 * It may be that the node flush is called _AFTER_ the net80211
6302 * stack clears the TIM.
6304 * Here is the racy part. Since it's possible >1 concurrent,
6305 * overlapping TXes will appear complete with a TX completion in
6306 * another thread, it's possible that the concurrent TIM calls will
6307 * clash. We can't hold the node lock here because setting the
6308 * TIM grabs the net80211 comlock and this may cause a LOR.
6309 * The solution is either to totally serialise _everything_ at
6310 * this point (ie, all TX, completion and any reset/flush go into
6311 * one taskqueue) or a new "ath TIM lock" needs to be created that
6312 * just wraps the driver state change and this call to avp->av_set_tim().
6314 * The same race exists in the net80211 power save queue handling
6315 * as well. Since multiple transmitting threads may queue frames
6316 * into the driver, as well as ps-poll and the driver transmitting
6317 * frames (and thus clearing the psq), it's quite possible that
6318 * a packet entering the PSQ and a ps-poll being handled will
6319 * race, causing the TIM to be cleared and not re-set.
6322 ath_node_set_tim(struct ieee80211_node *ni, int enable)
6325 struct ieee80211com *ic = ni->ni_ic;
6326 struct ath_softc *sc = ic->ic_ifp->if_softc;
6327 struct ath_node *an = ATH_NODE(ni);
6328 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6332 an->an_stack_psq = enable;
6335 * This will get called for all operating modes,
6336 * even if avp->av_set_tim is unset.
6337 * It's currently set for hostap/ibss modes; but
6338 * the same infrastructure is used for both STA
6339 * and AP/IBSS node power save.
6341 if (avp->av_set_tim == NULL) {
6347 * If setting the bit, always set it here.
6348 * If clearing the bit, only clear it if the
6349 * software queue is also empty.
6351 * If the node has left power save, just clear the TIM
6352 * bit regardless of the state of the power save queue.
6354 * XXX TODO: although atomics are used, it's quite possible
6355 * that a race will occur between this and setting/clearing
6356 * in another thread. TX completion will occur always in
6357 * one thread, however setting/clearing the TIM bit can come
6358 * from a variety of different process contexts!
6360 if (enable && an->an_tim_set == 1) {
6361 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6362 "%s: %s: enable=%d, tim_set=1, ignoring\n",
6364 ath_hal_ether_sprintf(ni->ni_macaddr),
6367 } else if (enable) {
6368 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6369 "%s: %s: enable=%d, enabling TIM\n",
6371 ath_hal_ether_sprintf(ni->ni_macaddr),
6375 changed = avp->av_set_tim(ni, enable);
6376 } else if (an->an_swq_depth == 0) {
6378 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6379 "%s: %s: enable=%d, an_swq_depth == 0, disabling\n",
6381 ath_hal_ether_sprintf(ni->ni_macaddr),
6385 changed = avp->av_set_tim(ni, enable);
6386 } else if (! an->an_is_powersave) {
6388 * disable regardless; the node isn't in powersave now
6390 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6391 "%s: %s: enable=%d, an_pwrsave=0, disabling\n",
6393 ath_hal_ether_sprintf(ni->ni_macaddr),
6397 changed = avp->av_set_tim(ni, enable);
6400 * psq disable, node is currently in powersave, node
6401 * software queue isn't empty, so don't clear the TIM bit
6405 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6406 "%s: %s: enable=%d, an_swq_depth > 0, ignoring\n",
6408 ath_hal_ether_sprintf(ni->ni_macaddr),
6415 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6418 * Some operating modes don't set av_set_tim(), so don't
6421 if (avp->av_set_tim == NULL)
6424 return (avp->av_set_tim(ni, enable));
6425 #endif /* ATH_SW_PSQ */
6429 * Set or update the TIM from the software queue.
6431 * Check the software queue depth before attempting to do lock
6432 * anything; that avoids trying to obtain the lock. Then,
6433 * re-check afterwards to ensure nothing has changed in the
6436 * set: This is designed to be called from the TX path, after
6437 * a frame has been queued; to see if the swq > 0.
6439 * clear: This is designed to be called from the buffer completion point
6440 * (right now it's ath_tx_default_comp()) where the state of
6441 * a software queue has changed.
6443 * It makes sense to place it at buffer free / completion rather
6444 * than after each software queue operation, as there's no real
6445 * point in churning the TIM bit as the last frames in the software
6446 * queue are transmitted. If they fail and we retry them, we'd
6447 * just be setting the TIM bit again anyway.
6450 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
6454 struct ath_node *an;
6455 struct ath_vap *avp;
6457 /* Don't do this for broadcast/etc frames */
6462 avp = ATH_VAP(ni->ni_vap);
6465 * And for operating modes without the TIM handler set, let's
6468 if (avp->av_set_tim == NULL)
6471 ATH_TX_LOCK_ASSERT(sc);
6474 if (an->an_is_powersave &&
6475 an->an_tim_set == 0 &&
6476 an->an_swq_depth != 0) {
6477 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6478 "%s: %s: swq_depth>0, tim_set=0, set!\n",
6480 ath_hal_ether_sprintf(ni->ni_macaddr));
6482 (void) avp->av_set_tim(ni, 1);
6486 * Don't bother grabbing the lock unless the queue is empty.
6488 if (&an->an_swq_depth != 0)
6491 if (an->an_is_powersave &&
6492 an->an_stack_psq == 0 &&
6493 an->an_tim_set == 1 &&
6494 an->an_swq_depth == 0) {
6495 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6496 "%s: %s: swq_depth=0, tim_set=1, psq_set=0,"
6499 ath_hal_ether_sprintf(ni->ni_macaddr));
6501 (void) avp->av_set_tim(ni, 0);
6506 #endif /* ATH_SW_PSQ */
6511 * Received a ps-poll frame from net80211.
6513 * Here we get a chance to serve out a software-queued frame ourselves
6514 * before we punt it to net80211 to transmit us one itself - either
6515 * because there's traffic in the net80211 psq, or a NULL frame to
6516 * indicate there's nothing else.
6519 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
6522 struct ath_node *an;
6523 struct ath_vap *avp;
6524 struct ieee80211com *ic = ni->ni_ic;
6525 struct ath_softc *sc = ic->ic_ifp->if_softc;
6533 * Unassociated (temporary node) station.
6535 if (ni->ni_associd == 0)
6539 * We do have an active node, so let's begin looking into it.
6542 avp = ATH_VAP(ni->ni_vap);
6545 * For now, we just call the original ps-poll method.
6546 * Once we're ready to flip this on:
6548 * + Set leak to 1, as no matter what we're going to have
6550 * + Check the software queue and if there's something in it,
6551 * schedule the highest TID thas has traffic from this node.
6552 * Then make sure we schedule the software scheduler to
6553 * run so it picks up said frame.
6555 * That way whatever happens, we'll at least send _a_ frame
6556 * to the given node.
6558 * Again, yes, it's crappy QoS if the node has multiple
6559 * TIDs worth of traffic - but let's get it working first
6560 * before we optimise it.
6562 * Also yes, there's definitely latency here - we're not
6563 * direct dispatching to the hardware in this path (and
6564 * we're likely being called from the packet receive path,
6565 * so going back into TX may be a little hairy!) but again
6566 * I'd like to get this working first before optimising
6573 * Legacy - we're called and the node isn't asleep.
6576 if (! an->an_is_powersave) {
6577 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6578 "%s: %6D: not in powersave?\n",
6583 if (avp->av_recv_pspoll)
6584 avp->av_recv_pspoll(ni, m);
6589 * We're in powersave.
6593 an->an_leak_count = 1;
6596 * Now, if there's no frames in the node, just punt to
6599 * Don't bother checking if the TIM bit is set, we really
6600 * only care if there are any frames here!
6602 if (an->an_swq_depth == 0) {
6604 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6605 "%s: %6D: SWQ empty; punting to net80211\n",
6609 if (avp->av_recv_pspoll)
6610 avp->av_recv_pspoll(ni, m);
6615 * Ok, let's schedule the highest TID that has traffic
6616 * and then schedule something.
6618 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) {
6619 struct ath_tid *atid = &an->an_tid[tid];
6623 if (atid->axq_depth == 0)
6625 ath_tx_tid_sched(sc, atid);
6627 * XXX we could do a direct call to the TXQ
6628 * scheduler code here to optimise latency
6629 * at the expense of a REALLY deep callstack.
6632 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
6633 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6634 "%s: %6D: leaking frame to TID %d\n",
6645 * XXX nothing in the TIDs at this point? Eek.
6647 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
6648 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n",
6652 if (avp->av_recv_pspoll)
6653 avp->av_recv_pspoll(ni, m);
6655 if (avp->av_recv_pspoll)
6656 avp->av_recv_pspoll(ni, m);
6657 #endif /* ATH_SW_PSQ */
6662 MODULE_VERSION(if_ath, 1);
6663 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
6664 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
6665 MODULE_DEPEND(if_ath, alq, 1, 1, 1);