2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #define CTLFLAG_RWTUN CTLFLAG_RW
36 * Driver for the Atheros Wireless LAN controller.
38 * This software is derived from work of Atsushi Onoe; his contribution
39 * is greatly appreciated.
45 * This is needed for register operations which are performed
46 * by the driver - eg, calls to ath_hal_gettsf32().
48 * It's also required for any AH_DEBUG checks in here, eg the
49 * module dependencies.
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/sysctl.h>
58 #include <sys/malloc.h>
60 #include <sys/mutex.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/sockio.h>
64 #include <sys/errno.h>
65 #include <sys/callout.h>
67 #include <sys/endian.h>
68 #include <sys/kthread.h>
69 #include <sys/taskqueue.h>
71 #include <sys/module.h>
75 #include <net/if_var.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_llc.h>
82 #include <net/ifq_var.h>
84 #include <netproto/802_11/ieee80211_var.h>
85 #include <netproto/802_11/ieee80211_regdomain.h>
86 #ifdef IEEE80211_SUPPORT_SUPERG
87 #include <netproto/802_11/ieee80211_superg.h>
89 #ifdef IEEE80211_SUPPORT_TDMA
90 #include <netproto/802_11/ieee80211_tdma.h>
96 #include <netinet/in.h>
97 #include <netinet/if_ether.h>
100 #include <dev/netif/ath/ath/if_athvar.h>
101 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
102 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
104 #include <dev/netif/ath/ath/if_ath_debug.h>
105 #include <dev/netif/ath/ath/if_ath_misc.h>
106 #include <dev/netif/ath/ath/if_ath_tsf.h>
107 #include <dev/netif/ath/ath/if_ath_tx.h>
108 #include <dev/netif/ath/ath/if_ath_sysctl.h>
109 #include <dev/netif/ath/ath/if_ath_led.h>
110 #include <dev/netif/ath/ath/if_ath_keycache.h>
111 #include <dev/netif/ath/ath/if_ath_rx.h>
112 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
113 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
114 #include <dev/netif/ath/ath/if_ath_beacon.h>
115 #include <dev/netif/ath/ath/if_ath_btcoex.h>
116 #include <dev/netif/ath/ath/if_ath_spectral.h>
117 #include <dev/netif/ath/ath/if_ath_lna_div.h>
118 #include <dev/netif/ath/ath/if_athdfs.h>
121 #include <dev/netif/ath/ath/ath_tx99/ath_tx99.h>
125 #include <dev/netif/ath/ath/if_ath_alq.h>
129 * Only enable this if you're working on PS-POLL support.
134 * ATH_BCBUF determines the number of vap's that can transmit
135 * beacons and also (currently) the number of vap's that can
136 * have unique mac addresses/bssid. When staggering beacons
137 * 4 is probably a good max as otherwise the beacons become
138 * very closely spaced and there is limited time for cab q traffic
139 * to go out. You can burst beacons instead but that is not good
140 * for stations in power save and at some point you really want
141 * another radio (and channel).
143 * The limit on the number of mac addresses is tied to our use of
144 * the U/L bit and tracking addresses in a byte; it would be
145 * worthwhile to allow more for applications like proxy sta.
147 CTASSERT(ATH_BCBUF <= 8);
149 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
150 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
151 const uint8_t [IEEE80211_ADDR_LEN],
152 const uint8_t [IEEE80211_ADDR_LEN]);
153 static void ath_vap_delete(struct ieee80211vap *);
154 static void ath_init(void *);
155 static void ath_stop_locked(struct ifnet *);
156 static void ath_stop(struct ifnet *);
157 static int ath_reset_vap(struct ieee80211vap *, u_long);
158 static int ath_transmit(struct ifnet *ifp, struct mbuf *m);
159 #if !defined(__DragonFly__)
160 static void ath_qflush(struct ifnet *ifp);
162 static int ath_media_change(struct ifnet *);
163 static void ath_watchdog(void *);
164 #if defined(__DragonFly__)
165 static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred * __unused);
167 static int ath_ioctl(struct ifnet *, u_long, caddr_t);
169 static void ath_fatal_proc(void *, int);
170 static void ath_bmiss_vap(struct ieee80211vap *);
171 static void ath_bmiss_proc(void *, int);
172 static void ath_key_update_begin(struct ieee80211vap *);
173 static void ath_key_update_end(struct ieee80211vap *);
174 static void ath_update_mcast_hw(struct ath_softc *);
175 static void ath_update_mcast(struct ifnet *);
176 static void ath_update_promisc(struct ifnet *);
177 static void ath_updateslot(struct ifnet *);
178 static void ath_bstuck_proc(void *, int);
179 static void ath_reset_proc(void *, int);
180 static int ath_desc_alloc(struct ath_softc *);
181 static void ath_desc_free(struct ath_softc *);
182 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
183 const uint8_t [IEEE80211_ADDR_LEN]);
184 static void ath_node_cleanup(struct ieee80211_node *);
185 static void ath_node_free(struct ieee80211_node *);
186 static void ath_node_getsignal(const struct ieee80211_node *,
188 static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
189 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
190 static int ath_tx_setup(struct ath_softc *, int, int);
191 static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
192 static void ath_tx_cleanup(struct ath_softc *);
193 static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
195 static void ath_tx_proc_q0(void *, int);
196 static void ath_tx_proc_q0123(void *, int);
197 static void ath_tx_proc(void *, int);
198 static void ath_txq_sched_tasklet(void *, int);
199 static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
200 static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
201 static void ath_scan_start(struct ieee80211com *);
202 static void ath_scan_end(struct ieee80211com *);
203 static void ath_set_channel(struct ieee80211com *);
204 #ifdef ATH_ENABLE_11N
205 static void ath_update_chw(struct ieee80211com *);
206 #endif /* ATH_ENABLE_11N */
207 static void ath_calibrate(void *);
208 static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
209 static void ath_setup_stationkey(struct ieee80211_node *);
210 static void ath_newassoc(struct ieee80211_node *, int);
211 static int ath_setregdomain(struct ieee80211com *,
212 struct ieee80211_regdomain *, int,
213 struct ieee80211_channel []);
214 static void ath_getradiocaps(struct ieee80211com *, int, int *,
215 struct ieee80211_channel []);
216 static int ath_getchannels(struct ath_softc *);
218 static int ath_rate_setup(struct ath_softc *, u_int mode);
219 static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
221 static void ath_announce(struct ath_softc *);
223 static void ath_dfs_tasklet(void *, int);
224 static void ath_node_powersave(struct ieee80211_node *, int);
225 static int ath_node_set_tim(struct ieee80211_node *, int);
226 static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
228 #if defined(__DragonFly__)
229 static void ath_start(struct ifnet *ifp, struct ifaltq_subque *ifsq);
232 #ifdef IEEE80211_SUPPORT_TDMA
233 #include <dev/netif/ath/ath/if_ath_tdma.h>
236 SYSCTL_DECL(_hw_ath);
238 /* XXX validate sysctl values */
239 static int ath_longcalinterval = 30; /* long cals every 30 secs */
240 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
241 0, "long chip calibration interval (secs)");
242 static int ath_shortcalinterval = 100; /* short cals every 100 ms */
243 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
244 0, "short chip calibration interval (msecs)");
245 static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
246 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
247 0, "reset chip calibration results (secs)");
248 static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
249 SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
250 0, "ANI calibration (msecs)");
252 int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
253 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &ath_rxbuf,
254 0, "rx buffers allocated");
255 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
257 int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
258 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RWTUN, &ath_txbuf,
259 0, "tx buffers allocated");
260 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
262 int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
263 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RWTUN, &ath_txbuf_mgmt,
264 0, "tx (mgmt) buffers allocated");
265 TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
267 int ath_bstuck_threshold = 4; /* max missed beacons */
268 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
269 0, "max missed beacon xmits before chip reset");
271 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
274 ath_legacy_attach_comp_func(struct ath_softc *sc)
278 * Special case certain configurations. Note the
279 * CAB queue is handled by these specially so don't
280 * include them when checking the txq setup mask.
282 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
284 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
287 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
290 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
296 * Set the target power mode.
298 * If this is called during a point in time where
299 * the hardware is being programmed elsewhere, it will
300 * simply store it away and update it when all current
301 * uses of the hardware are completed.
304 _ath_power_setpower(struct ath_softc *sc, int power_state, const char *file, int line)
308 sc->sc_target_powerstate = power_state;
310 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
315 sc->sc_powersave_refcnt);
317 if (sc->sc_powersave_refcnt == 0 &&
318 power_state != sc->sc_cur_powerstate) {
319 sc->sc_cur_powerstate = power_state;
320 ath_hal_setpower(sc->sc_ah, power_state);
323 * If the NIC is force-awake, then set the
324 * self-gen frame state appropriately.
326 * If the nic is in network sleep or full-sleep,
327 * we let the above call leave the self-gen
330 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
331 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
332 ath_hal_setselfgenpower(sc->sc_ah,
333 sc->sc_target_selfgen_state);
339 * Set the current self-generated frames state.
341 * This is separate from the target power mode. The chip may be
342 * awake but the desired state is "sleep", so frames sent to the
343 * destination has PWRMGT=1 in the 802.11 header. The NIC also
344 * needs to know to set PWRMGT=1 in self-generated frames.
347 _ath_power_set_selfgen(struct ath_softc *sc, int power_state, const char *file, int line)
352 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
357 sc->sc_target_selfgen_state);
359 sc->sc_target_selfgen_state = power_state;
362 * If the NIC is force-awake, then set the power state.
363 * Network-state and full-sleep will already transition it to
364 * mark self-gen frames as sleeping - and we can't
365 * guarantee the NIC is awake to program the self-gen frame
368 if (sc->sc_cur_powerstate == HAL_PM_AWAKE) {
369 ath_hal_setselfgenpower(sc->sc_ah, power_state);
374 * Set the hardware power mode and take a reference.
376 * This doesn't update the target power mode in the driver;
377 * it just updates the hardware power state.
379 * XXX it should only ever force the hardware awake; it should
380 * never be called to set it asleep.
383 _ath_power_set_power_state(struct ath_softc *sc, int power_state, const char *file, int line)
387 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) state=%d, refcnt=%d\n",
392 sc->sc_powersave_refcnt);
394 sc->sc_powersave_refcnt++;
396 if (power_state != sc->sc_cur_powerstate) {
397 ath_hal_setpower(sc->sc_ah, power_state);
398 sc->sc_cur_powerstate = power_state;
401 * Adjust the self-gen powerstate if appropriate.
403 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
404 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
405 ath_hal_setselfgenpower(sc->sc_ah,
406 sc->sc_target_selfgen_state);
413 * Restore the power save mode to what it once was.
415 * This will decrement the reference counter and once it hits
416 * zero, it'll restore the powersave state.
419 _ath_power_restore_power_state(struct ath_softc *sc, const char *file, int line)
424 DPRINTF(sc, ATH_DEBUG_PWRSAVE, "%s: (%s:%d) refcnt=%d, target state=%d\n",
428 sc->sc_powersave_refcnt,
429 sc->sc_target_powerstate);
431 if (sc->sc_powersave_refcnt == 0)
432 device_printf(sc->sc_dev, "%s: refcnt=0?\n", __func__);
434 sc->sc_powersave_refcnt--;
436 if (sc->sc_powersave_refcnt == 0 &&
437 sc->sc_target_powerstate != sc->sc_cur_powerstate) {
438 sc->sc_cur_powerstate = sc->sc_target_powerstate;
439 ath_hal_setpower(sc->sc_ah, sc->sc_target_powerstate);
443 * Adjust the self-gen powerstate if appropriate.
445 if (sc->sc_cur_powerstate == HAL_PM_AWAKE &&
446 sc->sc_target_selfgen_state != HAL_PM_AWAKE) {
447 ath_hal_setselfgenpower(sc->sc_ah,
448 sc->sc_target_selfgen_state);
454 * Configure the initial HAL configuration values based on bus
455 * specific parameters.
457 * Some PCI IDs and other information may need tweaking.
459 * XXX TODO: ath9k and the Atheros HAL only program comm2g_switch_enable
460 * if BT antenna diversity isn't enabled.
462 * So, let's also figure out how to enable BT diversity for AR9485.
465 ath_setup_hal_config(struct ath_softc *sc, HAL_OPS_CONFIG *ah_config)
467 /* XXX TODO: only for PCI devices? */
469 if (sc->sc_pci_devinfo & (ATH_PCI_CUS198 | ATH_PCI_CUS230)) {
470 ah_config->ath_hal_ext_lna_ctl_gpio = 0x200; /* bit 9 */
471 ah_config->ath_hal_ext_atten_margin_cfg = AH_TRUE;
472 ah_config->ath_hal_min_gainidx = AH_TRUE;
473 ah_config->ath_hal_ant_ctrl_comm2g_switch_enable = 0x000bbb88;
474 /* XXX low_rssi_thresh */
475 /* XXX fast_div_bias */
476 device_printf(sc->sc_dev, "configuring for %s\n",
477 (sc->sc_pci_devinfo & ATH_PCI_CUS198) ?
478 "CUS198" : "CUS230");
481 if (sc->sc_pci_devinfo & ATH_PCI_CUS217)
482 device_printf(sc->sc_dev, "CUS217 card detected\n");
484 if (sc->sc_pci_devinfo & ATH_PCI_CUS252)
485 device_printf(sc->sc_dev, "CUS252 card detected\n");
487 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_1ANT)
488 device_printf(sc->sc_dev, "WB335 1-ANT card detected\n");
490 if (sc->sc_pci_devinfo & ATH_PCI_AR9565_2ANT)
491 device_printf(sc->sc_dev, "WB335 2-ANT card detected\n");
493 if (sc->sc_pci_devinfo & ATH_PCI_KILLER)
494 device_printf(sc->sc_dev, "Killer Wireless card detected\n");
498 * Some WB335 cards do not support antenna diversity. Since
499 * we use a hardcoded value for AR9565 instead of using the
500 * EEPROM/OTP data, remove the combining feature from
501 * the HW capabilities bitmap.
503 if (sc->sc_pci_devinfo & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
504 if (!(sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV))
505 pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
508 if (sc->sc_pci_devinfo & ATH9K_PCI_BT_ANT_DIV) {
509 pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
510 device_printf(sc->sc_dev, "Set BT/WLAN RX diversity capability\n");
514 if (sc->sc_pci_devinfo & ATH_PCI_D3_L1_WAR) {
515 ah_config->ath_hal_pcie_waen = 0x0040473b;
516 device_printf(sc->sc_dev, "Enable WAR for ASPM D3/L1\n");
520 if (sc->sc_pci_devinfo & ATH9K_PCI_NO_PLL_PWRSAVE) {
521 ah->config.no_pll_pwrsave = true;
522 device_printf(sc->sc_dev, "Disable PLL PowerSave\n");
528 #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
529 #define HAL_MODE_HT40 \
530 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
531 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
533 ath_attach(u_int16_t devid, struct ath_softc *sc)
536 struct ieee80211com *ic;
537 struct ath_hal *ah = NULL;
541 uint8_t macaddr[IEEE80211_ADDR_LEN];
542 int rx_chainmask, tx_chainmask;
543 HAL_OPS_CONFIG ah_config;
545 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
547 wlan_serialize_enter();
549 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
551 device_printf(sc->sc_dev, "can not if_alloc()\n");
558 /* set these up early for if_printf use */
559 if_initname(ifp, device_get_name(sc->sc_dev),
560 device_get_unit(sc->sc_dev));
564 * Configure the initial configuration data.
566 * This is stuff that may be needed early during attach
567 * rather than done via configuration calls later.
569 bzero(&ah_config, sizeof(ah_config));
570 ath_setup_hal_config(sc, &ah_config);
572 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
573 sc->sc_eepromdata, &ah_config, &status);
575 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
581 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
583 sc->sc_debug = ath_debug;
587 * Setup the DMA/EDMA functions based on the current
590 * This is required before the descriptors are allocated.
592 if (ath_hal_hasedma(sc->sc_ah)) {
594 ath_recv_setup_edma(sc);
595 ath_xmit_setup_edma(sc);
597 ath_recv_setup_legacy(sc);
598 ath_xmit_setup_legacy(sc);
601 if (ath_hal_hasmybeacon(sc->sc_ah)) {
602 sc->sc_do_mybeacon = 1;
606 * Check if the MAC has multi-rate retry support.
607 * We do this by trying to setup a fake extended
608 * descriptor. MAC's that don't have support will
609 * return false w/o doing anything. MAC's that do
610 * support it will return true w/o doing anything.
612 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
615 * Check if the device has hardware counters for PHY
616 * errors. If so we need to enable the MIB interrupt
617 * so we can act on stat triggers.
619 if (ath_hal_hwphycounters(ah))
623 * Get the hardware key cache size.
625 sc->sc_keymax = ath_hal_keycachesize(ah);
626 if (sc->sc_keymax > ATH_KEYMAX) {
627 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
628 ATH_KEYMAX, sc->sc_keymax);
629 sc->sc_keymax = ATH_KEYMAX;
632 * Reset the key cache since some parts do not
633 * reset the contents on initial power up.
635 for (i = 0; i < sc->sc_keymax; i++)
636 ath_hal_keyreset(ah, i);
639 * Collect the default channel list.
641 error = ath_getchannels(sc);
646 * Setup rate tables for all potential media types.
648 ath_rate_setup(sc, IEEE80211_MODE_11A);
649 ath_rate_setup(sc, IEEE80211_MODE_11B);
650 ath_rate_setup(sc, IEEE80211_MODE_11G);
651 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
652 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
653 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
654 ath_rate_setup(sc, IEEE80211_MODE_11NA);
655 ath_rate_setup(sc, IEEE80211_MODE_11NG);
656 ath_rate_setup(sc, IEEE80211_MODE_HALF);
657 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
659 /* NB: setup here so ath_rate_update is happy */
660 ath_setcurmode(sc, IEEE80211_MODE_11A);
663 * Allocate TX descriptors and populate the lists.
665 error = ath_desc_alloc(sc);
667 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
671 error = ath_txdma_setup(sc);
673 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
679 * Allocate RX descriptors and populate the lists.
681 error = ath_rxdma_setup(sc);
683 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
688 callout_init_lk(&sc->sc_cal_ch, &sc->sc_mtx);
689 callout_init_lk(&sc->sc_wd_ch, &sc->sc_mtx);
691 ATH_TXBUF_LOCK_INIT(sc);
693 sc->sc_tq = taskqueue_create("ath_taskq", M_INTWAIT,
694 taskqueue_thread_enqueue, &sc->sc_tq);
695 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
696 "%s taskq", ifp->if_xname);
698 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
699 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
700 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
701 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
702 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
703 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
706 * Allocate hardware transmit queues: one queue for
707 * beacon frames and one data queue for each QoS
708 * priority. Note that the hal handles resetting
709 * these queues at the needed time.
713 sc->sc_bhalq = ath_beaconq_setup(sc);
714 if (sc->sc_bhalq == (u_int) -1) {
715 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
719 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
720 if (sc->sc_cabq == NULL) {
721 if_printf(ifp, "unable to setup CAB xmit queue!\n");
725 /* NB: insure BK queue is the lowest priority h/w queue */
726 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
727 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
728 ieee80211_wme_acnames[WME_AC_BK]);
732 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
733 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
734 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
736 * Not enough hardware tx queues to properly do WME;
737 * just punt and assign them all to the same h/w queue.
738 * We could do a better job of this if, for example,
739 * we allocate queues when we switch from station to
742 if (sc->sc_ac2q[WME_AC_VI] != NULL)
743 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
744 if (sc->sc_ac2q[WME_AC_BE] != NULL)
745 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
746 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
747 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
748 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
752 * Attach the TX completion function.
754 * The non-EDMA chips may have some special case optimisations;
755 * this method gives everyone a chance to attach cleanly.
757 sc->sc_tx.xmit_attach_comp_func(sc);
760 * Setup rate control. Some rate control modules
761 * call back to change the anntena state so expose
762 * the necessary entry points.
763 * XXX maybe belongs in struct ath_ratectrl?
765 sc->sc_setdefantenna = ath_setdefantenna;
766 sc->sc_rc = ath_rate_attach(sc);
767 if (sc->sc_rc == NULL) {
772 /* Attach DFS module */
773 if (! ath_dfs_attach(sc)) {
774 device_printf(sc->sc_dev,
775 "%s: unable to attach DFS\n", __func__);
780 /* Attach spectral module */
781 if (ath_spectral_attach(sc) < 0) {
782 device_printf(sc->sc_dev,
783 "%s: unable to attach spectral\n", __func__);
788 /* Attach bluetooth coexistence module */
789 if (ath_btcoex_attach(sc) < 0) {
790 device_printf(sc->sc_dev,
791 "%s: unable to attach bluetooth coexistence\n", __func__);
796 /* Attach LNA diversity module */
797 if (ath_lna_div_attach(sc) < 0) {
798 device_printf(sc->sc_dev,
799 "%s: unable to attach LNA diversity\n", __func__);
804 /* Start DFS processing tasklet */
805 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
807 /* Configure LED state */
810 sc->sc_ledon = 0; /* low true */
811 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
812 callout_init_mp(&sc->sc_ledtimer);
815 * Don't setup hardware-based blinking.
817 * Although some NICs may have this configured in the
818 * default reset register values, the user may wish
819 * to alter which pins have which function.
821 * The reference driver attaches the MAC network LED to GPIO1 and
822 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
823 * NIC has these reversed.
825 sc->sc_hardled = (1 == 0);
826 sc->sc_led_net_pin = -1;
827 sc->sc_led_pwr_pin = -1;
829 * Auto-enable soft led processing for IBM cards and for
830 * 5211 minipci cards. Users can also manually enable/disable
831 * support with a sysctl.
833 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
835 ath_hal_setledstate(ah, HAL_LED_INIT);
838 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
839 #if defined(__DragonFly__)
840 ifp->if_start = ath_start;
842 ifp->if_transmit = ath_transmit;
843 ifp->if_qflush = ath_qflush;
845 ifp->if_ioctl = ath_ioctl;
846 ifp->if_init = ath_init;
847 #if defined(__DragonFly__)
848 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
850 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
851 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
852 IFQ_SET_READY(&ifp->if_snd);
856 /* XXX not right but it's not used anywhere important */
857 ic->ic_phytype = IEEE80211_T_OFDM;
858 ic->ic_opmode = IEEE80211_M_STA;
860 IEEE80211_C_STA /* station mode */
861 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
862 | IEEE80211_C_HOSTAP /* hostap mode */
863 | IEEE80211_C_MONITOR /* monitor mode */
864 | IEEE80211_C_AHDEMO /* adhoc demo mode */
865 | IEEE80211_C_WDS /* 4-address traffic works */
866 | IEEE80211_C_MBSS /* mesh point link mode */
867 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
868 | IEEE80211_C_SHSLOT /* short slot time supported */
869 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
870 #ifndef ATH_ENABLE_11N
871 | IEEE80211_C_BGSCAN /* capable of bg scanning */
873 | IEEE80211_C_TXFRAG /* handle tx frags */
874 #ifdef ATH_ENABLE_DFS
875 | IEEE80211_C_DFS /* Enable radar detection */
877 | IEEE80211_C_PMGT /* Station side power mgmt */
878 | IEEE80211_C_SWSLEEP
881 * Query the hal to figure out h/w crypto support.
883 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
884 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
885 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
886 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
887 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
888 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
889 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
890 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
891 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
892 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
894 * Check if h/w does the MIC and/or whether the
895 * separate key cache entries are required to
896 * handle both tx+rx MIC keys.
898 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
899 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
901 * If the h/w supports storing tx+rx MIC keys
902 * in one cache slot automatically enable use.
904 if (ath_hal_hastkipsplit(ah) ||
905 !ath_hal_settkipsplit(ah, AH_FALSE))
908 * If the h/w can do TKIP MIC together with WME then
909 * we use it; otherwise we force the MIC to be done
910 * in software by the net80211 layer.
912 if (ath_hal_haswmetkipmic(ah))
913 sc->sc_wmetkipmic = 1;
915 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
917 * Check for multicast key search support.
919 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
920 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
921 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
923 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
925 * Mark key cache slots associated with global keys
926 * as in use. If we knew TKIP was not to be used we
927 * could leave the +32, +64, and +32+64 slots free.
929 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
930 setbit(sc->sc_keymap, i);
931 setbit(sc->sc_keymap, i+64);
932 if (sc->sc_splitmic) {
933 setbit(sc->sc_keymap, i+32);
934 setbit(sc->sc_keymap, i+32+64);
938 * TPC support can be done either with a global cap or
939 * per-packet support. The latter is not available on
940 * all parts. We're a bit pedantic here as all parts
941 * support a global cap.
943 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
944 ic->ic_caps |= IEEE80211_C_TXPMGT;
947 * Mark WME capability only if we have sufficient
948 * hardware queues to do proper priority scheduling.
950 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
951 ic->ic_caps |= IEEE80211_C_WME;
953 * Check for misc other capabilities.
955 if (ath_hal_hasbursting(ah))
956 ic->ic_caps |= IEEE80211_C_BURST;
957 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
958 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
959 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
960 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
961 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
962 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah);
963 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah);
964 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah);
966 if (ath_hal_hasfastframes(ah))
967 ic->ic_caps |= IEEE80211_C_FF;
968 wmodes = ath_hal_getwirelessmodes(ah);
969 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
970 ic->ic_caps |= IEEE80211_C_TURBOP;
971 #ifdef IEEE80211_SUPPORT_TDMA
972 if (ath_hal_macversion(ah) > 0x78) {
973 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
974 ic->ic_tdma_update = ath_tdma_update;
979 * TODO: enforce that at least this many frames are available
980 * in the txbuf list before allowing data frames (raw or
981 * otherwise) to be transmitted.
983 sc->sc_txq_data_minfree = 10;
985 * Leave this as default to maintain legacy behaviour.
986 * Shortening the cabq/mcastq may end up causing some
987 * undesirable behaviour.
989 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
992 * How deep can the node software TX queue get whilst it's asleep.
994 sc->sc_txq_node_psq_maxdepth = 16;
997 * Default the maximum queue depth for a given node
998 * to 1/4'th the TX buffers, or 64, whichever
1001 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
1003 /* Enable CABQ by default */
1004 sc->sc_cabq_enable = 1;
1007 * Allow the TX and RX chainmasks to be overridden by
1008 * environment variables and/or device.hints.
1010 * This must be done early - before the hardware is
1011 * calibrated or before the 802.11n stream calculation
1014 if (resource_int_value(device_get_name(sc->sc_dev),
1015 device_get_unit(sc->sc_dev), "rx_chainmask",
1016 &rx_chainmask) == 0) {
1017 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
1019 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
1021 if (resource_int_value(device_get_name(sc->sc_dev),
1022 device_get_unit(sc->sc_dev), "tx_chainmask",
1023 &tx_chainmask) == 0) {
1024 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
1026 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
1030 * Query the TX/RX chainmask configuration.
1032 * This is only relevant for 11n devices.
1034 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
1035 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
1038 * Disable MRR with protected frames by default.
1039 * Only 802.11n series NICs can handle this.
1041 sc->sc_mrrprot = 0; /* XXX should be a capability */
1044 * Query the enterprise mode information the HAL.
1046 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
1047 &sc->sc_ent_cfg) == HAL_OK)
1050 #ifdef ATH_ENABLE_11N
1052 * Query HT capabilities
1054 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
1055 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
1058 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
1060 sc->sc_mrrprot = 1; /* XXX should be a capability */
1062 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
1063 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
1064 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
1065 | IEEE80211_HTCAP_MAXAMSDU_3839
1066 /* max A-MSDU length */
1067 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
1071 * Enable short-GI for HT20 only if the hardware
1072 * advertises support.
1073 * Notably, anything earlier than the AR9287 doesn't.
1075 if ((ath_hal_getcapability(ah,
1076 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
1077 (wmodes & HAL_MODE_HT20)) {
1078 device_printf(sc->sc_dev,
1079 "[HT] enabling short-GI in 20MHz mode\n");
1080 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
1083 if (wmodes & HAL_MODE_HT40)
1084 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
1085 | IEEE80211_HTCAP_SHORTGI40;
1088 * TX/RX streams need to be taken into account when
1089 * negotiating which MCS rates it'll receive and
1090 * what MCS rates are available for TX.
1092 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
1093 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
1094 ic->ic_txstream = txs;
1095 ic->ic_rxstream = rxs;
1098 * Setup TX and RX STBC based on what the HAL allows and
1099 * the currently configured chainmask set.
1100 * Ie - don't enable STBC TX if only one chain is enabled.
1101 * STBC RX is fine on a single RX chain; it just won't
1102 * provide any real benefit.
1104 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
1107 device_printf(sc->sc_dev,
1108 "[HT] 1 stream STBC receive enabled\n");
1109 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
1111 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
1114 device_printf(sc->sc_dev,
1115 "[HT] 1 stream STBC transmit enabled\n");
1116 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
1119 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
1120 &sc->sc_rts_aggr_limit);
1121 if (sc->sc_rts_aggr_limit != (64 * 1024))
1122 device_printf(sc->sc_dev,
1123 "[HT] RTS aggregates limited to %d KiB\n",
1124 sc->sc_rts_aggr_limit / 1024);
1126 device_printf(sc->sc_dev,
1127 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
1132 * Initial aggregation settings.
1134 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH;
1135 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH;
1136 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
1137 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
1138 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
1139 sc->sc_delim_min_pad = 0;
1142 * Check if the hardware requires PCI register serialisation.
1143 * Some of the Owl based MACs require this.
1146 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
1147 0, NULL) == HAL_OK) {
1148 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
1149 device_printf(sc->sc_dev,
1150 "Enabling register serialisation\n");
1154 * Initialise the deferred completed RX buffer list.
1156 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
1157 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
1160 * Indicate we need the 802.11 header padded to a
1161 * 32-bit boundary for 4-address and QoS frames.
1163 ic->ic_flags |= IEEE80211_F_DATAPAD;
1166 * Query the hal about antenna support.
1168 sc->sc_defant = ath_hal_getdefantenna(ah);
1171 * Not all chips have the VEOL support we want to
1172 * use with IBSS beacons; check here for it.
1174 sc->sc_hasveol = ath_hal_hasveol(ah);
1176 /* get mac address from hardware */
1177 ath_hal_getmac(ah, macaddr);
1178 if (sc->sc_hasbmask)
1179 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
1181 /* NB: used to size node table key mapping array */
1182 ic->ic_max_keyix = sc->sc_keymax;
1184 * Call MI attach routine.
1186 * WLAN serializer must _not_ be held for ieee80211_ifattach(),
1187 * since it could dead-lock the domsg to netisrs.
1189 wlan_serialize_exit();
1190 ieee80211_ifattach(ic, macaddr);
1191 wlan_serialize_enter();
1192 ic->ic_setregdomain = ath_setregdomain;
1193 ic->ic_getradiocaps = ath_getradiocaps;
1194 sc->sc_opmode = HAL_M_STA;
1196 /* override default methods */
1197 ic->ic_newassoc = ath_newassoc;
1198 ic->ic_updateslot = ath_updateslot;
1199 ic->ic_wme.wme_update = ath_wme_update;
1200 ic->ic_vap_create = ath_vap_create;
1201 ic->ic_vap_delete = ath_vap_delete;
1202 ic->ic_raw_xmit = ath_raw_xmit;
1203 ic->ic_update_mcast = ath_update_mcast;
1204 ic->ic_update_promisc = ath_update_promisc;
1205 ic->ic_node_alloc = ath_node_alloc;
1206 sc->sc_node_free = ic->ic_node_free;
1207 ic->ic_node_free = ath_node_free;
1208 sc->sc_node_cleanup = ic->ic_node_cleanup;
1209 ic->ic_node_cleanup = ath_node_cleanup;
1210 ic->ic_node_getsignal = ath_node_getsignal;
1211 ic->ic_scan_start = ath_scan_start;
1212 ic->ic_scan_end = ath_scan_end;
1213 ic->ic_set_channel = ath_set_channel;
1214 #ifdef ATH_ENABLE_11N
1215 /* 802.11n specific - but just override anyway */
1216 sc->sc_addba_request = ic->ic_addba_request;
1217 sc->sc_addba_response = ic->ic_addba_response;
1218 sc->sc_addba_stop = ic->ic_addba_stop;
1219 sc->sc_bar_response = ic->ic_bar_response;
1220 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
1222 ic->ic_addba_request = ath_addba_request;
1223 ic->ic_addba_response = ath_addba_response;
1224 ic->ic_addba_response_timeout = ath_addba_response_timeout;
1225 ic->ic_addba_stop = ath_addba_stop;
1226 ic->ic_bar_response = ath_bar_response;
1228 ic->ic_update_chw = ath_update_chw;
1229 #endif /* ATH_ENABLE_11N */
1231 #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
1233 * There's one vendor bitmap entry in the RX radiotap
1234 * header; make sure that's taken into account.
1236 ieee80211_radiotap_attachv(ic,
1237 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
1238 ATH_TX_RADIOTAP_PRESENT,
1239 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
1240 ATH_RX_RADIOTAP_PRESENT);
1243 * No vendor bitmap/extensions are present.
1245 ieee80211_radiotap_attach(ic,
1246 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
1247 ATH_TX_RADIOTAP_PRESENT,
1248 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
1249 ATH_RX_RADIOTAP_PRESENT);
1250 #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
1253 * Setup the ALQ logging if required
1255 #ifdef ATH_DEBUG_ALQ
1256 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
1257 if_ath_alq_setcfg(&sc->sc_alq,
1258 sc->sc_ah->ah_macVersion,
1259 sc->sc_ah->ah_macRev,
1260 sc->sc_ah->ah_phyRev,
1261 sc->sc_ah->ah_magic);
1265 * Setup dynamic sysctl's now that country code and
1266 * regdomain are available from the hal.
1268 ath_sysctlattach(sc);
1269 ath_sysctl_stats_attach(sc);
1270 ath_sysctl_hal_attach(sc);
1273 ieee80211_announce(ic);
1277 * Put it to sleep for now.
1280 ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
1283 wlan_serialize_exit();
1289 ath_txdma_teardown(sc);
1290 ath_rxdma_teardown(sc);
1296 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
1298 #if !defined(__DragonFly__)
1299 if (ifp != NULL && ifp->if_vnet) {
1300 CURVNET_SET(ifp->if_vnet);
1308 wlan_serialize_exit();
1314 ath_detach(struct ath_softc *sc)
1316 struct ifnet *ifp = sc->sc_ifp;
1318 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1319 __func__, ifp->if_flags);
1322 * NB: the order of these is important:
1323 * o stop the chip so no more interrupts will fire
1324 * o call the 802.11 layer before detaching the hal to
1325 * insure callbacks into the driver to delete global
1326 * key cache entries can be handled
1327 * o free the taskqueue which drains any pending tasks
1328 * o reclaim the tx queue data structures after calling
1329 * the 802.11 layer as we'll get called back to reclaim
1330 * node state and potentially want to use them
1331 * o to cleanup the tx queues the hal is called, so detach
1333 * Other than that, it's straightforward...
1337 * XXX Wake the hardware up first. ath_stop() will still
1338 * wake it up first, but I'd rather do it here just to
1339 * ensure it's awake.
1342 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1343 ath_power_setpower(sc, HAL_PM_AWAKE);
1347 * Stop things cleanly.
1351 wlan_serialize_enter();
1352 ieee80211_ifdetach(ifp->if_l2com);
1353 wlan_serialize_exit();
1354 taskqueue_free(sc->sc_tq);
1355 #ifdef ATH_TX99_DIAG
1356 if (sc->sc_tx99 != NULL)
1357 sc->sc_tx99->detach(sc->sc_tx99);
1359 ath_rate_detach(sc->sc_rc);
1360 #ifdef ATH_DEBUG_ALQ
1361 if_ath_alq_tidyup(&sc->sc_alq);
1363 ath_lna_div_detach(sc);
1364 ath_btcoex_detach(sc);
1365 ath_spectral_detach(sc);
1368 ath_txdma_teardown(sc);
1369 ath_rxdma_teardown(sc);
1371 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
1373 CURVNET_SET(ifp->if_vnet);
1381 * MAC address handling for multiple BSS on the same radio.
1382 * The first vap uses the MAC address from the EEPROM. For
1383 * subsequent vap's we set the U/L bit (bit 1) in the MAC
1384 * address and use the next six bits as an index.
1387 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
1391 if (clone && sc->sc_hasbmask) {
1392 /* NB: we only do this if h/w supports multiple bssid */
1393 for (i = 0; i < 8; i++)
1394 if ((sc->sc_bssidmask & (1<<i)) == 0)
1397 mac[0] |= (i << 2)|0x2;
1400 sc->sc_bssidmask |= 1<<i;
1401 sc->sc_hwbssidmask[0] &= ~mac[0];
1407 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
1409 int i = mac[0] >> 2;
1412 if (i != 0 || --sc->sc_nbssid0 == 0) {
1413 sc->sc_bssidmask &= ~(1<<i);
1414 /* recalculate bssid mask from remaining addresses */
1416 for (i = 1; i < 8; i++)
1417 if (sc->sc_bssidmask & (1<<i))
1418 mask &= ~((i<<2)|0x2);
1419 sc->sc_hwbssidmask[0] |= mask;
1424 * Assign a beacon xmit slot. We try to space out
1425 * assignments so when beacons are staggered the
1426 * traffic coming out of the cab q has maximal time
1427 * to go out before the next beacon is scheduled.
1430 assign_bslot(struct ath_softc *sc)
1435 for (slot = 0; slot < ATH_BCBUF; slot++)
1436 if (sc->sc_bslot[slot] == NULL) {
1437 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1438 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1441 /* NB: keep looking for a double slot */
1446 static struct ieee80211vap *
1447 ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1448 enum ieee80211_opmode opmode, int flags,
1449 const uint8_t bssid[IEEE80211_ADDR_LEN],
1450 const uint8_t mac0[IEEE80211_ADDR_LEN])
1452 struct ath_softc *sc = ic->ic_ifp->if_softc;
1453 struct ath_vap *avp;
1454 struct ieee80211vap *vap;
1455 uint8_t mac[IEEE80211_ADDR_LEN];
1456 int needbeacon, error;
1457 enum ieee80211_opmode ic_opmode;
1459 avp = kmalloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO);
1461 IEEE80211_ADDR_COPY(mac, mac0);
1464 ic_opmode = opmode; /* default to opmode of new vap */
1466 case IEEE80211_M_STA:
1467 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1468 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1473 * With multiple vaps we must fall back
1474 * to s/w beacon miss handling.
1476 flags |= IEEE80211_CLONE_NOBEACONS;
1478 if (flags & IEEE80211_CLONE_NOBEACONS) {
1480 * Station mode w/o beacons are implemented w/ AP mode.
1482 ic_opmode = IEEE80211_M_HOSTAP;
1485 case IEEE80211_M_IBSS:
1486 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1487 device_printf(sc->sc_dev,
1488 "only 1 ibss vap supported\n");
1493 case IEEE80211_M_AHDEMO:
1494 #ifdef IEEE80211_SUPPORT_TDMA
1495 if (flags & IEEE80211_CLONE_TDMA) {
1496 if (sc->sc_nvaps != 0) {
1497 device_printf(sc->sc_dev,
1498 "only 1 tdma vap supported\n");
1502 flags |= IEEE80211_CLONE_NOBEACONS;
1506 case IEEE80211_M_MONITOR:
1507 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1509 * Adopt existing mode. Adding a monitor or ahdemo
1510 * vap to an existing configuration is of dubious
1511 * value but should be ok.
1513 /* XXX not right for monitor mode */
1514 ic_opmode = ic->ic_opmode;
1517 case IEEE80211_M_HOSTAP:
1518 case IEEE80211_M_MBSS:
1521 case IEEE80211_M_WDS:
1522 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1523 device_printf(sc->sc_dev,
1524 "wds not supported in sta mode\n");
1528 * Silently remove any request for a unique
1529 * bssid; WDS vap's always share the local
1532 flags &= ~IEEE80211_CLONE_BSSID;
1533 if (sc->sc_nvaps == 0)
1534 ic_opmode = IEEE80211_M_HOSTAP;
1536 ic_opmode = ic->ic_opmode;
1539 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1543 * Check that a beacon buffer is available; the code below assumes it.
1545 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
1546 device_printf(sc->sc_dev, "no beacon buffer available\n");
1551 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1552 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1553 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1557 /* XXX can't hold mutex across if_alloc */
1559 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1563 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1568 /* h/w crypto support */
1569 vap->iv_key_alloc = ath_key_alloc;
1570 vap->iv_key_delete = ath_key_delete;
1571 vap->iv_key_set = ath_key_set;
1572 vap->iv_key_update_begin = ath_key_update_begin;
1573 vap->iv_key_update_end = ath_key_update_end;
1575 /* override various methods */
1576 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1577 vap->iv_recv_mgmt = ath_recv_mgmt;
1578 vap->iv_reset = ath_reset_vap;
1579 vap->iv_update_beacon = ath_beacon_update;
1580 avp->av_newstate = vap->iv_newstate;
1581 vap->iv_newstate = ath_newstate;
1582 avp->av_bmiss = vap->iv_bmiss;
1583 vap->iv_bmiss = ath_bmiss_vap;
1585 avp->av_node_ps = vap->iv_node_ps;
1586 vap->iv_node_ps = ath_node_powersave;
1588 avp->av_set_tim = vap->iv_set_tim;
1589 vap->iv_set_tim = ath_node_set_tim;
1591 avp->av_recv_pspoll = vap->iv_recv_pspoll;
1592 vap->iv_recv_pspoll = ath_node_recv_pspoll;
1594 /* Set default parameters */
1597 * Anything earlier than some AR9300 series MACs don't
1598 * support a smaller MPDU density.
1600 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1602 * All NICs can handle the maximum size, however
1603 * AR5416 based MACs can only TX aggregates w/ RTS
1604 * protection when the total aggregate size is <= 8k.
1605 * However, for now that's enforced by the TX path.
1607 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1612 * Allocate beacon state and setup the q for buffered
1613 * multicast frames. We know a beacon buffer is
1614 * available because we checked above.
1616 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1617 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
1618 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1620 * Assign the vap to a beacon xmit slot. As above
1621 * this cannot fail to find a free one.
1623 avp->av_bslot = assign_bslot(sc);
1624 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1625 ("beacon slot %u not empty", avp->av_bslot));
1626 sc->sc_bslot[avp->av_bslot] = vap;
1629 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1631 * Multple vaps are to transmit beacons and we
1632 * have h/w support for TSF adjusting; enable
1633 * use of staggered beacons.
1635 sc->sc_stagbeacons = 1;
1637 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1640 ic->ic_opmode = ic_opmode;
1641 if (opmode != IEEE80211_M_WDS) {
1643 if (opmode == IEEE80211_M_STA)
1645 if (opmode == IEEE80211_M_MBSS)
1648 switch (ic_opmode) {
1649 case IEEE80211_M_IBSS:
1650 sc->sc_opmode = HAL_M_IBSS;
1652 case IEEE80211_M_STA:
1653 sc->sc_opmode = HAL_M_STA;
1655 case IEEE80211_M_AHDEMO:
1656 #ifdef IEEE80211_SUPPORT_TDMA
1657 if (vap->iv_caps & IEEE80211_C_TDMA) {
1659 /* NB: disable tsf adjust */
1660 sc->sc_stagbeacons = 0;
1663 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1668 case IEEE80211_M_HOSTAP:
1669 case IEEE80211_M_MBSS:
1670 sc->sc_opmode = HAL_M_HOSTAP;
1672 case IEEE80211_M_MONITOR:
1673 sc->sc_opmode = HAL_M_MONITOR;
1676 /* XXX should not happen */
1679 if (sc->sc_hastsfadd) {
1681 * Configure whether or not TSF adjust should be done.
1683 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1685 if (flags & IEEE80211_CLONE_NOBEACONS) {
1687 * Enable s/w beacon miss handling.
1693 /* complete setup */
1694 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1697 reclaim_address(sc, mac);
1698 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1700 kfree(avp, M_80211_VAP);
1706 ath_vap_delete(struct ieee80211vap *vap)
1708 struct ieee80211com *ic = vap->iv_ic;
1709 struct ifnet *ifp = ic->ic_ifp;
1710 struct ath_softc *sc = ifp->if_softc;
1711 struct ath_hal *ah = sc->sc_ah;
1712 struct ath_vap *avp = ATH_VAP(vap);
1715 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1718 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
1719 if (ifp->if_flags & IFF_RUNNING) {
1721 * Quiesce the hardware while we remove the vap. In
1722 * particular we need to reclaim all references to
1723 * the vap state by any frames pending on the tx queues.
1725 ath_hal_intrset(ah, 0); /* disable interrupts */
1726 /* XXX Do all frames from all vaps/nodes need draining here? */
1727 ath_stoprecv(sc, 1); /* stop recv side */
1728 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1731 /* .. leave the hardware awake for now. */
1733 ieee80211_vap_detach(vap);
1736 * XXX Danger Will Robinson! Danger!
1738 * Because ieee80211_vap_detach() can queue a frame (the station
1739 * diassociate message?) after we've drained the TXQ and
1740 * flushed the software TXQ, we will end up with a frame queued
1741 * to a node whose vap is about to be freed.
1743 * To work around this, flush the hardware/software again.
1744 * This may be racy - the ath task may be running and the packet
1745 * may be being scheduled between sw->hw txq. Tsk.
1747 * TODO: figure out why a new node gets allocated somewhere around
1748 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1752 ath_draintxq(sc, ATH_RESET_DEFAULT);
1756 * Reclaim beacon state. Note this must be done before
1757 * the vap instance is reclaimed as we may have a reference
1758 * to it in the buffer for the beacon frame.
1760 if (avp->av_bcbuf != NULL) {
1761 if (avp->av_bslot != -1) {
1762 sc->sc_bslot[avp->av_bslot] = NULL;
1765 ath_beacon_return(sc, avp->av_bcbuf);
1766 avp->av_bcbuf = NULL;
1767 if (sc->sc_nbcnvaps == 0) {
1768 sc->sc_stagbeacons = 0;
1769 if (sc->sc_hastsfadd)
1770 ath_hal_settsfadjust(sc->sc_ah, 0);
1773 * Reclaim any pending mcast frames for the vap.
1775 ath_tx_draintxq(sc, &avp->av_mcastq);
1778 * Update bookkeeping.
1780 if (vap->iv_opmode == IEEE80211_M_STA) {
1782 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1784 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1785 vap->iv_opmode == IEEE80211_M_MBSS) {
1786 reclaim_address(sc, vap->iv_myaddr);
1787 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1788 if (vap->iv_opmode == IEEE80211_M_MBSS)
1791 if (vap->iv_opmode != IEEE80211_M_WDS)
1793 #ifdef IEEE80211_SUPPORT_TDMA
1794 /* TDMA operation ceases when the last vap is destroyed */
1795 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1800 kfree(avp, M_80211_VAP);
1802 if (ifp->if_flags & IFF_RUNNING) {
1804 * Restart rx+tx machines if still running (RUNNING will
1805 * be reset if we just destroyed the last vap).
1807 if (ath_startrecv(sc) != 0)
1808 if_printf(ifp, "%s: unable to restart recv logic\n",
1810 if (sc->sc_beacons) { /* restart beacons */
1811 #ifdef IEEE80211_SUPPORT_TDMA
1813 ath_tdma_config(sc, NULL);
1816 ath_beacon_config(sc, NULL);
1818 ath_hal_intrset(ah, sc->sc_imask);
1821 /* Ok, let the hardware asleep. */
1822 ath_power_restore_power_state(sc);
1827 ath_suspend(struct ath_softc *sc)
1829 struct ifnet *ifp = sc->sc_ifp;
1830 struct ieee80211com *ic = ifp->if_l2com;
1832 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1833 __func__, ifp->if_flags);
1835 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1837 ieee80211_suspend_all(ic);
1839 * NB: don't worry about putting the chip in low power
1840 * mode; pci will power off our socket on suspend and
1841 * CardBus detaches the device.
1843 * XXX TODO: well, that's great, except for non-cardbus
1848 * XXX This doesn't wait until all pending taskqueue
1849 * items and parallel transmit/receive/other threads
1852 ath_hal_intrset(sc->sc_ah, 0);
1853 taskqueue_block(sc->sc_tq);
1856 callout_stop_sync(&sc->sc_cal_ch);
1860 * XXX ensure sc_invalid is 1
1863 /* Disable the PCIe PHY, complete with workarounds */
1864 ath_hal_enablepcie(sc->sc_ah, 1, 1);
1868 * Reset the key cache since some parts do not reset the
1869 * contents on resume. First we clear all entries, then
1870 * re-load keys that the 802.11 layer assumes are setup
1874 ath_reset_keycache(struct ath_softc *sc)
1876 struct ifnet *ifp = sc->sc_ifp;
1877 struct ieee80211com *ic = ifp->if_l2com;
1878 struct ath_hal *ah = sc->sc_ah;
1882 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1883 for (i = 0; i < sc->sc_keymax; i++)
1884 ath_hal_keyreset(ah, i);
1885 ath_power_restore_power_state(sc);
1887 ieee80211_crypto_reload_keys(ic);
1891 * Fetch the current chainmask configuration based on the current
1892 * operating channel and options.
1895 ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
1899 * Set TX chainmask to the currently configured chainmask;
1900 * the TX chainmask depends upon the current operating mode.
1902 sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
1903 if (IEEE80211_IS_CHAN_HT(chan)) {
1904 sc->sc_cur_txchainmask = sc->sc_txchainmask;
1906 sc->sc_cur_txchainmask = 1;
1909 DPRINTF(sc, ATH_DEBUG_RESET,
1910 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n",
1912 sc->sc_cur_txchainmask,
1913 sc->sc_cur_rxchainmask);
1917 ath_resume(struct ath_softc *sc)
1919 struct ifnet *ifp = sc->sc_ifp;
1920 struct ieee80211com *ic = ifp->if_l2com;
1921 struct ath_hal *ah = sc->sc_ah;
1924 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1925 __func__, ifp->if_flags);
1927 /* Re-enable PCIe, re-enable the PCIe bus */
1928 ath_hal_enablepcie(ah, 0, 0);
1931 * Must reset the chip before we reload the
1932 * keycache as we were powered down on suspend.
1934 ath_update_chainmasks(sc,
1935 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
1936 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1937 sc->sc_cur_rxchainmask);
1939 /* Ensure we set the current power state to on */
1941 ath_power_setselfgen(sc, HAL_PM_AWAKE);
1942 ath_power_set_power_state(sc, HAL_PM_AWAKE);
1943 ath_power_setpower(sc, HAL_PM_AWAKE);
1946 ath_hal_reset(ah, sc->sc_opmode,
1947 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1949 ath_reset_keycache(sc);
1952 sc->sc_rx_stopped = 1;
1953 sc->sc_rx_resetted = 1;
1956 /* Let DFS at it in case it's a DFS channel */
1957 ath_dfs_radar_enable(sc, ic->ic_curchan);
1959 /* Let spectral at in case spectral is enabled */
1960 ath_spectral_enable(sc, ic->ic_curchan);
1963 * Let bluetooth coexistence at in case it's needed for this channel
1965 ath_btcoex_enable(sc, ic->ic_curchan);
1968 * If we're doing TDMA, enforce the TXOP limitation for chips that
1971 if (sc->sc_hasenforcetxop && sc->sc_tdma)
1972 ath_hal_setenforcetxop(sc->sc_ah, 1);
1974 ath_hal_setenforcetxop(sc->sc_ah, 0);
1976 /* Restore the LED configuration */
1978 ath_hal_setledstate(ah, HAL_LED_INIT);
1980 if (sc->sc_resume_up)
1981 ieee80211_resume_all(ic);
1984 ath_power_restore_power_state(sc);
1991 ath_shutdown(struct ath_softc *sc)
1993 struct ifnet *ifp = sc->sc_ifp;
1995 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1996 __func__, ifp->if_flags);
1999 /* NB: no point powering down chip as we're about to reboot */
2003 * Interrupt handler. Most of the actual processing is deferred.
2008 struct ath_softc *sc = arg;
2009 struct ifnet *ifp = sc->sc_ifp;
2010 struct ath_hal *ah = sc->sc_ah;
2015 * If we're inside a reset path, just print a warning and
2016 * clear the ISR. The reset routine will finish it for us.
2019 if (sc->sc_inreset_cnt) {
2021 ath_hal_getisr(ah, &status); /* clear ISR */
2022 ath_hal_intrset(ah, 0); /* disable further intr's */
2023 DPRINTF(sc, ATH_DEBUG_ANY,
2024 "%s: in reset, ignoring: status=0x%x\n",
2030 if (sc->sc_invalid) {
2032 * The hardware is not ready/present, don't touch anything.
2033 * Note this can happen early on if the IRQ is shared.
2035 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
2039 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
2045 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2048 if ((ifp->if_flags & IFF_UP) == 0 ||
2049 (ifp->if_flags & IFF_RUNNING) == 0) {
2052 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
2053 __func__, ifp->if_flags);
2054 ath_hal_getisr(ah, &status); /* clear ISR */
2055 ath_hal_intrset(ah, 0); /* disable further intr's */
2059 ath_power_restore_power_state(sc);
2065 * Figure out the reason(s) for the interrupt. Note
2066 * that the hal returns a pseudo-ISR that may include
2067 * bits we haven't explicitly enabled so we mask the
2068 * value to insure we only process bits we requested.
2070 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
2071 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
2072 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
2073 #ifdef ATH_DEBUG_ALQ
2074 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
2076 #endif /* ATH_DEBUG_ALQ */
2077 #ifdef ATH_KTR_INTR_DEBUG
2078 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
2079 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
2080 ah->ah_intrstate[0],
2081 ah->ah_intrstate[1],
2082 ah->ah_intrstate[2],
2083 ah->ah_intrstate[3],
2084 ah->ah_intrstate[6]);
2087 /* Squirrel away SYNC interrupt debugging */
2088 if (ah->ah_syncstate != 0) {
2090 for (i = 0; i < 32; i++)
2091 if (ah->ah_syncstate & (i << i))
2092 sc->sc_intr_stats.sync_intr[i]++;
2095 status &= sc->sc_imask; /* discard unasked for bits */
2097 /* Short-circuit un-handled interrupts */
2098 if (status == 0x0) {
2102 ath_power_restore_power_state(sc);
2109 * Take a note that we're inside the interrupt handler, so
2110 * the reset routines know to wait.
2116 * Handle the interrupt. We won't run concurrent with the reset
2117 * or channel change routines as they'll wait for sc_intr_cnt
2118 * to be 0 before continuing.
2120 if (status & HAL_INT_FATAL) {
2121 sc->sc_stats.ast_hardware++;
2122 ath_hal_intrset(ah, 0); /* disable intr's until reset */
2123 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
2125 if (status & HAL_INT_SWBA) {
2127 * Software beacon alert--time to send a beacon.
2128 * Handle beacon transmission directly; deferring
2129 * this is too slow to meet timing constraints
2132 #ifdef IEEE80211_SUPPORT_TDMA
2134 if (sc->sc_tdmaswba == 0) {
2135 struct ieee80211com *ic = ifp->if_l2com;
2136 struct ieee80211vap *vap =
2137 TAILQ_FIRST(&ic->ic_vaps);
2138 ath_tdma_beacon_send(sc, vap);
2140 vap->iv_tdma->tdma_bintval;
2146 ath_beacon_proc(sc, 0);
2147 #ifdef IEEE80211_SUPPORT_SUPERG
2149 * Schedule the rx taskq in case there's no
2150 * traffic so any frames held on the staging
2151 * queue are aged and potentially flushed.
2153 sc->sc_rx.recv_sched(sc, 1);
2157 if (status & HAL_INT_RXEOL) {
2159 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
2160 if (! sc->sc_isedma) {
2163 * NB: the hardware should re-read the link when
2164 * RXE bit is written, but it doesn't work at
2165 * least on older hardware revs.
2167 sc->sc_stats.ast_rxeol++;
2169 * Disable RXEOL/RXORN - prevent an interrupt
2170 * storm until the PCU logic can be reset.
2171 * In case the interface is reset some other
2172 * way before "sc_kickpcu" is called, don't
2173 * modify sc_imask - that way if it is reset
2174 * by a call to ath_reset() somehow, the
2175 * interrupt mask will be correctly reprogrammed.
2177 imask = sc->sc_imask;
2178 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
2179 ath_hal_intrset(ah, imask);
2181 * Only blank sc_rxlink if we've not yet kicked
2184 * This isn't entirely correct - the correct solution
2185 * would be to have a PCU lock and engage that for
2186 * the duration of the PCU fiddling; which would include
2187 * running the RX process. Otherwise we could end up
2188 * messing up the RX descriptor chain and making the
2189 * RX desc list much shorter.
2191 if (! sc->sc_kickpcu)
2192 sc->sc_rxlink = NULL;
2197 * Enqueue an RX proc to handle whatever
2198 * is in the RX queue.
2199 * This will then kick the PCU if required.
2201 sc->sc_rx.recv_sched(sc, 1);
2203 if (status & HAL_INT_TXURN) {
2204 sc->sc_stats.ast_txurn++;
2205 /* bump tx trigger level */
2206 ath_hal_updatetxtriglevel(ah, AH_TRUE);
2209 * Handle both the legacy and RX EDMA interrupt bits.
2210 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
2212 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
2213 sc->sc_stats.ast_rx_intr++;
2214 sc->sc_rx.recv_sched(sc, 1);
2216 if (status & HAL_INT_TX) {
2217 sc->sc_stats.ast_tx_intr++;
2219 * Grab all the currently set bits in the HAL txq bitmap
2220 * and blank them. This is the only place we should be
2223 if (! sc->sc_isedma) {
2226 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
2227 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
2228 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
2231 sc->sc_txq_active | txqs);
2232 sc->sc_txq_active |= txqs;
2235 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
2237 if (status & HAL_INT_BMISS) {
2238 sc->sc_stats.ast_bmiss++;
2239 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
2241 if (status & HAL_INT_GTT)
2242 sc->sc_stats.ast_tx_timeout++;
2243 if (status & HAL_INT_CST)
2244 sc->sc_stats.ast_tx_cst++;
2245 if (status & HAL_INT_MIB) {
2246 sc->sc_stats.ast_mib++;
2249 * Disable interrupts until we service the MIB
2250 * interrupt; otherwise it will continue to fire.
2252 ath_hal_intrset(ah, 0);
2254 * Let the hal handle the event. We assume it will
2255 * clear whatever condition caused the interrupt.
2257 ath_hal_mibevent(ah, &sc->sc_halstats);
2259 * Don't reset the interrupt if we've just
2260 * kicked the PCU, or we may get a nested
2261 * RXEOL before the rxproc has had a chance
2264 if (sc->sc_kickpcu == 0)
2265 ath_hal_intrset(ah, sc->sc_imask);
2268 if (status & HAL_INT_RXORN) {
2269 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
2270 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
2271 sc->sc_stats.ast_rxorn++;
2273 if (status & HAL_INT_TSFOOR) {
2274 device_printf(sc->sc_dev, "%s: TSFOOR\n", __func__);
2275 sc->sc_syncbeacon = 1;
2283 ath_power_restore_power_state(sc);
2288 ath_fatal_proc(void *arg, int pending)
2290 struct ath_softc *sc = arg;
2291 struct ifnet *ifp = sc->sc_ifp;
2296 if_printf(ifp, "hardware error; resetting\n");
2298 * Fatal errors are unrecoverable. Typically these
2299 * are caused by DMA errors. Collect h/w state from
2300 * the hal so we can diagnose what's going on.
2302 wlan_serialize_enter();
2303 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
2304 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
2306 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
2307 state[0], state[1] , state[2], state[3],
2308 state[4], state[5]);
2310 ath_reset(ifp, ATH_RESET_NOLOSS);
2311 wlan_serialize_exit();
2315 ath_bmiss_vap(struct ieee80211vap *vap)
2317 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2320 * Workaround phantom bmiss interrupts by sanity-checking
2321 * the time of our last rx'd frame. If it is within the
2322 * beacon miss interval then ignore the interrupt. If it's
2323 * truly a bmiss we'll get another interrupt soon and that'll
2324 * be dispatched up for processing. Note this applies only
2325 * for h/w beacon miss events.
2329 * XXX TODO: Just read the TSF during the interrupt path;
2330 * that way we don't have to wake up again just to read it
2334 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2337 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
2338 struct ifnet *ifp = vap->iv_ic->ic_ifp;
2339 struct ath_softc *sc = ifp->if_softc;
2340 u_int64_t lastrx = sc->sc_lastrx;
2341 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
2342 /* XXX should take a locked ref to iv_bss */
2343 u_int bmisstimeout =
2344 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
2346 DPRINTF(sc, ATH_DEBUG_BEACON,
2347 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
2348 __func__, (unsigned long long) tsf,
2349 (unsigned long long)(tsf - lastrx),
2350 (unsigned long long) lastrx, bmisstimeout);
2352 if (tsf - lastrx <= bmisstimeout) {
2353 sc->sc_stats.ast_bmiss_phantom++;
2356 ath_power_restore_power_state(sc);
2364 * There's no need to keep the hardware awake during the call
2368 ath_power_restore_power_state(sc);
2372 * Attempt to force a beacon resync.
2374 sc->sc_syncbeacon = 1;
2376 ATH_VAP(vap)->av_bmiss(vap);
2379 /* XXX this needs a force wakeup! */
2381 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
2386 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
2388 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
2389 *hangs = *(uint32_t *)sp;
2394 ath_bmiss_proc(void *arg, int pending)
2396 struct ath_softc *sc = arg;
2397 struct ifnet *ifp = sc->sc_ifp;
2400 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
2403 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2406 ath_beacon_miss(sc);
2409 * Do a reset upon any becaon miss event.
2411 * It may be a non-recognised RX clear hang which needs a reset
2414 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
2415 ath_reset(ifp, ATH_RESET_NOLOSS);
2416 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
2418 ath_reset(ifp, ATH_RESET_NOLOSS);
2419 ieee80211_beacon_miss(ifp->if_l2com);
2422 /* Force a beacon resync, in case they've drifted */
2423 sc->sc_syncbeacon = 1;
2426 ath_power_restore_power_state(sc);
2431 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
2432 * calcs together with WME. If necessary disable the crypto
2433 * hardware and mark the 802.11 state so keys will be setup
2434 * with the MIC work done in software.
2437 ath_settkipmic(struct ath_softc *sc)
2439 struct ifnet *ifp = sc->sc_ifp;
2440 struct ieee80211com *ic = ifp->if_l2com;
2442 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
2443 if (ic->ic_flags & IEEE80211_F_WME) {
2444 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
2445 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
2447 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
2448 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
2456 struct ath_softc *sc = (struct ath_softc *) arg;
2457 struct ifnet *ifp = sc->sc_ifp;
2458 struct ieee80211com *ic = ifp->if_l2com;
2459 struct ath_hal *ah = sc->sc_ah;
2462 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
2463 __func__, ifp->if_flags);
2467 * Force the sleep state awake.
2469 ath_power_setselfgen(sc, HAL_PM_AWAKE);
2470 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2471 ath_power_setpower(sc, HAL_PM_AWAKE);
2474 * Stop anything previously setup. This is safe
2475 * whether this is the first time through or not.
2477 ath_stop_locked(ifp);
2480 * The basic interface to setting the hardware in a good
2481 * state is ``reset''. On return the hardware is known to
2482 * be powered up and with interrupts disabled. This must
2483 * be followed by initialization of the appropriate bits
2484 * and then setup of the interrupt mask.
2487 ath_update_chainmasks(sc, ic->ic_curchan);
2488 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2489 sc->sc_cur_rxchainmask);
2491 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
2492 if_printf(ifp, "unable to reset hardware; hal status %u\n",
2499 sc->sc_rx_stopped = 1;
2500 sc->sc_rx_resetted = 1;
2503 ath_chan_change(sc, ic->ic_curchan);
2505 /* Let DFS at it in case it's a DFS channel */
2506 ath_dfs_radar_enable(sc, ic->ic_curchan);
2508 /* Let spectral at in case spectral is enabled */
2509 ath_spectral_enable(sc, ic->ic_curchan);
2512 * Let bluetooth coexistence at in case it's needed for this channel
2514 ath_btcoex_enable(sc, ic->ic_curchan);
2517 * If we're doing TDMA, enforce the TXOP limitation for chips that
2520 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2521 ath_hal_setenforcetxop(sc->sc_ah, 1);
2523 ath_hal_setenforcetxop(sc->sc_ah, 0);
2526 * Likewise this is set during reset so update
2527 * state cached in the driver.
2529 sc->sc_diversity = ath_hal_getdiversity(ah);
2530 sc->sc_lastlongcal = 0;
2531 sc->sc_resetcal = 1;
2532 sc->sc_lastcalreset = 0;
2534 sc->sc_lastshortcal = 0;
2535 sc->sc_doresetcal = AH_FALSE;
2537 * Beacon timers were cleared here; give ath_newstate()
2538 * a hint that the beacon timers should be poked when
2539 * things transition to the RUN state.
2544 * Setup the hardware after reset: the key cache
2545 * is filled as needed and the receive engine is
2546 * set going. Frame transmit is handled entirely
2547 * in the frame output path; there's nothing to do
2548 * here except setup the interrupt mask.
2550 if (ath_startrecv(sc) != 0) {
2551 if_printf(ifp, "unable to start recv logic\n");
2552 ath_power_restore_power_state(sc);
2558 * Enable interrupts.
2560 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
2561 | HAL_INT_RXORN | HAL_INT_TXURN
2562 | HAL_INT_FATAL | HAL_INT_GLOBAL;
2565 * Enable RX EDMA bits. Note these overlap with
2566 * HAL_INT_RX and HAL_INT_RXDESC respectively.
2569 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
2572 * If we're an EDMA NIC, we don't care about RXEOL.
2573 * Writing a new descriptor in will simply restart
2576 if (! sc->sc_isedma)
2577 sc->sc_imask |= HAL_INT_RXEOL;
2580 * Enable MIB interrupts when there are hardware phy counters.
2581 * Note we only do this (at the moment) for station mode.
2583 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
2584 sc->sc_imask |= HAL_INT_MIB;
2587 * XXX add capability for this.
2589 * If we're in STA mode (and maybe IBSS?) then register for
2590 * TSFOOR interrupts.
2592 if (ic->ic_opmode == IEEE80211_M_STA)
2593 sc->sc_imask |= HAL_INT_TSFOOR;
2595 /* Enable global TX timeout and carrier sense timeout if available */
2596 if (ath_hal_gtxto_supported(ah))
2597 sc->sc_imask |= HAL_INT_GTT;
2599 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
2600 __func__, sc->sc_imask);
2602 ifp->if_flags |= IFF_RUNNING;
2603 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
2604 ath_hal_intrset(ah, sc->sc_imask);
2606 ath_power_restore_power_state(sc);
2609 #ifdef ATH_TX99_DIAG
2610 if (sc->sc_tx99 != NULL)
2611 sc->sc_tx99->start(sc->sc_tx99);
2614 ieee80211_start_all(ic); /* start all vap's */
2618 ath_stop_locked(struct ifnet *ifp)
2620 struct ath_softc *sc = ifp->if_softc;
2621 struct ath_hal *ah = sc->sc_ah;
2623 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
2624 __func__, sc->sc_invalid, ifp->if_flags);
2626 ATH_LOCK_ASSERT(sc);
2629 * Wake the hardware up before fiddling with it.
2631 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2633 if (ifp->if_flags & IFF_RUNNING) {
2635 * Shutdown the hardware and driver:
2636 * reset 802.11 state machine
2638 * disable interrupts
2639 * turn off the radio
2640 * clear transmit machinery
2641 * clear receive machinery
2642 * drain and release tx queues
2643 * reclaim beacon resources
2644 * power down hardware
2646 * Note that some of this work is not possible if the
2647 * hardware is gone (invalid).
2649 #ifdef ATH_TX99_DIAG
2650 if (sc->sc_tx99 != NULL)
2651 sc->sc_tx99->stop(sc->sc_tx99);
2653 callout_stop_sync(&sc->sc_wd_ch);
2654 sc->sc_wd_timer = 0;
2655 ifp->if_flags &= ~IFF_RUNNING;
2656 if (!sc->sc_invalid) {
2657 if (sc->sc_softled) {
2658 callout_stop_sync(&sc->sc_ledtimer);
2659 ath_hal_gpioset(ah, sc->sc_ledpin,
2661 sc->sc_blinking = 0;
2663 ath_hal_intrset(ah, 0);
2665 /* XXX we should stop RX regardless of whether it's valid */
2666 if (!sc->sc_invalid) {
2667 ath_stoprecv(sc, 1);
2668 ath_hal_phydisable(ah);
2670 sc->sc_rxlink = NULL;
2671 ath_draintxq(sc, ATH_RESET_DEFAULT);
2672 ath_beacon_free(sc); /* XXX not needed */
2675 /* And now, restore the current power state */
2676 ath_power_restore_power_state(sc);
2680 * Wait until all pending TX/RX has completed.
2682 * This waits until all existing transmit, receive and interrupts
2683 * have completed. It's assumed that the caller has first
2684 * grabbed the reset lock so it doesn't try to do overlapping
2687 #define MAX_TXRX_ITERATIONS 100
2689 ath_txrx_stop_locked(struct ath_softc *sc)
2691 int i = MAX_TXRX_ITERATIONS;
2693 ATH_UNLOCK_ASSERT(sc);
2694 ATH_PCU_LOCK_ASSERT(sc);
2697 * Sleep until all the pending operations have completed.
2699 * The caller must ensure that reset has been incremented
2700 * or the pending operations may continue being queued.
2702 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2703 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2706 if (wlan_is_serialized()) {
2707 wlan_serialize_exit();
2708 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
2709 msecs_to_ticks(10));
2710 wlan_serialize_enter();
2712 lksleep(sc, &sc->sc_pcu_mtx, 0, "ath_txrx_stop",
2713 msecs_to_ticks(10));
2719 device_printf(sc->sc_dev,
2720 "%s: didn't finish after %d iterations\n",
2721 __func__, MAX_TXRX_ITERATIONS);
2723 #undef MAX_TXRX_ITERATIONS
2727 ath_txrx_stop(struct ath_softc *sc)
2729 ATH_UNLOCK_ASSERT(sc);
2730 ATH_PCU_UNLOCK_ASSERT(sc);
2733 ath_txrx_stop_locked(sc);
2739 ath_txrx_start(struct ath_softc *sc)
2742 taskqueue_unblock(sc->sc_tq);
2746 * Grab the reset lock, and wait around until noone else
2747 * is trying to do anything with it.
2749 * This is totally horrible but we can't hold this lock for
2750 * long enough to do TX/RX or we end up with net80211/ip stack
2751 * LORs and eventual deadlock.
2753 * "dowait" signals whether to spin, waiting for the reset
2754 * lock count to reach 0. This should (for now) only be used
2755 * during the reset path, as the rest of the code may not
2756 * be locking-reentrant enough to behave correctly.
2758 * Another, cleaner way should be found to serialise all of
2761 #define MAX_RESET_ITERATIONS 25
2763 ath_reset_grablock(struct ath_softc *sc, int dowait)
2766 int i = MAX_RESET_ITERATIONS;
2768 ATH_PCU_LOCK_ASSERT(sc);
2770 if (sc->sc_inreset_cnt == 0) {
2781 * 1 tick is likely not enough time for long calibrations
2782 * to complete. So we should wait quite a while.
2784 #if defined(__DragonFly__)
2785 tsleep(&sc->sc_inreset_cnt, 0,
2786 "ath_reset_grablock", (hz + 99) / 100);
2788 pause("ath_reset_grablock", msecs_to_ticks(100));
2795 * We always increment the refcounter, regardless
2796 * of whether we succeeded to get it in an exclusive
2799 sc->sc_inreset_cnt++;
2802 device_printf(sc->sc_dev,
2803 "%s: didn't finish after %d iterations\n",
2804 __func__, MAX_RESET_ITERATIONS);
2807 device_printf(sc->sc_dev,
2808 "%s: warning, recursive reset path!\n",
2813 #undef MAX_RESET_ITERATIONS
2816 * XXX TODO: write ath_reset_releaselock
2820 ath_stop(struct ifnet *ifp)
2822 struct ath_softc *sc = ifp->if_softc;
2825 ath_stop_locked(ifp);
2830 * Reset the hardware w/o losing operational state. This is
2831 * basically a more efficient way of doing ath_stop, ath_init,
2832 * followed by state transitions to the current 802.11
2833 * operational state. Used to recover from various errors and
2834 * to reset or reload hardware state.
2837 ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
2839 struct ath_softc *sc = ifp->if_softc;
2840 struct ieee80211com *ic = ifp->if_l2com;
2841 struct ath_hal *ah = sc->sc_ah;
2845 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2847 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2848 ATH_PCU_UNLOCK_ASSERT(sc);
2849 ATH_UNLOCK_ASSERT(sc);
2851 /* Try to (stop any further TX/RX from occuring */
2852 taskqueue_block(sc->sc_tq);
2855 * Wake the hardware up.
2858 ath_power_set_power_state(sc, HAL_PM_AWAKE);
2864 * Grab the reset lock before TX/RX is stopped.
2866 * This is needed to ensure that when the TX/RX actually does finish,
2867 * no further TX/RX/reset runs in parallel with this.
2869 if (ath_reset_grablock(sc, 1) == 0) {
2870 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2874 /* disable interrupts */
2875 ath_hal_intrset(ah, 0);
2878 * Now, ensure that any in progress TX/RX completes before we
2881 ath_txrx_stop_locked(sc);
2886 * Regardless of whether we're doing a no-loss flush or
2887 * not, stop the PCU and handle what's in the RX queue.
2888 * That way frames aren't dropped which shouldn't be.
2890 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2894 * Should now wait for pending TX/RX to complete
2895 * and block future ones from occuring. This needs to be
2896 * done before the TX queue is drained.
2898 ath_draintxq(sc, reset_type); /* stop xmit side */
2900 ath_settkipmic(sc); /* configure TKIP MIC handling */
2901 /* NB: indicate channel change so we do a full reset */
2902 ath_update_chainmasks(sc, ic->ic_curchan);
2903 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2904 sc->sc_cur_rxchainmask);
2905 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
2906 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2908 sc->sc_diversity = ath_hal_getdiversity(ah);
2911 sc->sc_rx_stopped = 1;
2912 sc->sc_rx_resetted = 1;
2915 /* Let DFS at it in case it's a DFS channel */
2916 ath_dfs_radar_enable(sc, ic->ic_curchan);
2918 /* Let spectral at in case spectral is enabled */
2919 ath_spectral_enable(sc, ic->ic_curchan);
2922 * Let bluetooth coexistence at in case it's needed for this channel
2924 ath_btcoex_enable(sc, ic->ic_curchan);
2927 * If we're doing TDMA, enforce the TXOP limitation for chips that
2930 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2931 ath_hal_setenforcetxop(sc->sc_ah, 1);
2933 ath_hal_setenforcetxop(sc->sc_ah, 0);
2935 if (ath_startrecv(sc) != 0) /* restart recv */
2936 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
2938 * We may be doing a reset in response to an ioctl
2939 * that changes the channel so update any state that
2940 * might change as a result.
2942 ath_chan_change(sc, ic->ic_curchan);
2943 if (sc->sc_beacons) { /* restart beacons */
2944 #ifdef IEEE80211_SUPPORT_TDMA
2946 ath_tdma_config(sc, NULL);
2949 ath_beacon_config(sc, NULL);
2953 * Release the reset lock and re-enable interrupts here.
2954 * If an interrupt was being processed in ath_intr(),
2955 * it would disable interrupts at this point. So we have
2956 * to atomically enable interrupts and decrement the
2957 * reset counter - this way ath_intr() doesn't end up
2958 * disabling interrupts without a corresponding enable
2959 * in the rest or channel change path.
2961 * Grab the TX reference in case we need to transmit.
2962 * That way a parallel transmit doesn't.
2965 sc->sc_inreset_cnt--;
2966 sc->sc_txstart_cnt++;
2967 /* XXX only do this if sc_inreset_cnt == 0? */
2968 ath_hal_intrset(ah, sc->sc_imask);
2972 * TX and RX can be started here. If it were started with
2973 * sc_inreset_cnt > 0, the TX and RX path would abort.
2974 * Thus if this is a nested call through the reset or
2975 * channel change code, TX completion will occur but
2976 * RX completion and ath_start / ath_tx_start will not
2980 /* Restart TX/RX as needed */
2983 /* XXX TODO: we need to hold the tx refcount here! */
2985 /* Restart TX completion and pending TX */
2986 if (reset_type == ATH_RESET_NOLOSS) {
2987 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2988 if (ATH_TXQ_SETUP(sc, i)) {
2989 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2990 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2991 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2994 ath_txq_sched(sc, &sc->sc_txq[i]);
3001 * This may have been set during an ath_start() call which
3002 * set this once it detected a concurrent TX was going on.
3005 IF_LOCK(&ifp->if_snd);
3006 #if defined(__DragonFly__)
3007 ifq_clr_oactive(&ifp->if_snd);
3009 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
3011 IF_UNLOCK(&ifp->if_snd);
3014 ath_power_restore_power_state(sc);
3018 sc->sc_txstart_cnt--;
3021 /* Handle any frames in the TX queue */
3023 * XXX should this be done by the caller, rather than
3026 ath_tx_kick(sc); /* restart xmit */
3031 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
3033 struct ieee80211com *ic = vap->iv_ic;
3034 struct ifnet *ifp = ic->ic_ifp;
3035 struct ath_softc *sc = ifp->if_softc;
3036 struct ath_hal *ah = sc->sc_ah;
3039 case IEEE80211_IOC_TXPOWER:
3041 * If per-packet TPC is enabled, then we have nothing
3042 * to do; otherwise we need to force the global limit.
3043 * All this can happen directly; no need to reset.
3045 if (!ath_hal_gettpc(ah))
3046 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
3049 /* XXX? Full or NOLOSS? */
3050 return ath_reset(ifp, ATH_RESET_FULL);
3054 _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
3058 ATH_TXBUF_LOCK_ASSERT(sc);
3060 if (btype == ATH_BUFTYPE_MGMT)
3061 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
3063 bf = TAILQ_FIRST(&sc->sc_txbuf);
3066 sc->sc_stats.ast_tx_getnobuf++;
3068 if (bf->bf_flags & ATH_BUF_BUSY) {
3069 sc->sc_stats.ast_tx_getbusybuf++;
3074 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
3075 if (btype == ATH_BUFTYPE_MGMT)
3076 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
3078 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
3082 * This shuldn't happen; however just to be
3083 * safe print a warning and fudge the txbuf
3086 if (sc->sc_txbuf_cnt < 0) {
3087 device_printf(sc->sc_dev,
3088 "%s: sc_txbuf_cnt < 0?\n",
3090 sc->sc_txbuf_cnt = 0;
3097 /* XXX should check which list, mgmt or otherwise */
3098 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
3099 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
3100 "out of xmit buffers" : "xmit buffer busy");
3104 /* XXX TODO: should do this at buffer list initialisation */
3105 /* XXX (then, ensure the buffer has the right flag set) */
3107 if (btype == ATH_BUFTYPE_MGMT)
3108 bf->bf_flags |= ATH_BUF_MGMT;
3110 bf->bf_flags &= (~ATH_BUF_MGMT);
3112 /* Valid bf here; clear some basic fields */
3113 bf->bf_next = NULL; /* XXX just to be sure */
3114 bf->bf_last = NULL; /* XXX again, just to be sure */
3115 bf->bf_comp = NULL; /* XXX again, just to be sure */
3116 bzero(&bf->bf_state, sizeof(bf->bf_state));
3119 * Track the descriptor ID only if doing EDMA
3121 if (sc->sc_isedma) {
3122 bf->bf_descid = sc->sc_txbuf_descid;
3123 sc->sc_txbuf_descid++;
3130 * When retrying a software frame, buffers marked ATH_BUF_BUSY
3131 * can't be thrown back on the queue as they could still be
3132 * in use by the hardware.
3134 * This duplicates the buffer, or returns NULL.
3136 * The descriptor is also copied but the link pointers and
3137 * the DMA segments aren't copied; this frame should thus
3138 * be again passed through the descriptor setup/chain routines
3139 * so the link is correct.
3141 * The caller must free the buffer using ath_freebuf().
3144 ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
3146 struct ath_buf *tbf;
3148 tbf = ath_getbuf(sc,
3149 (bf->bf_flags & ATH_BUF_MGMT) ?
3150 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
3152 return NULL; /* XXX failure? Why? */
3155 tbf->bf_next = NULL;
3156 tbf->bf_nseg = bf->bf_nseg;
3157 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
3158 tbf->bf_status = bf->bf_status;
3159 tbf->bf_m = bf->bf_m;
3160 tbf->bf_node = bf->bf_node;
3161 KASSERT((bf->bf_node != NULL), ("%s: bf_node=NULL!", __func__));
3162 /* will be setup by the chain/setup function */
3163 tbf->bf_lastds = NULL;
3164 /* for now, last == self */
3166 tbf->bf_comp = bf->bf_comp;
3168 /* NOTE: DMA segments will be setup by the setup/chain functions */
3170 /* The caller has to re-init the descriptor + links */
3173 * Free the DMA mapping here, before we NULL the mbuf.
3174 * We must only call bus_dmamap_unload() once per mbuf chain
3175 * or behaviour is undefined.
3177 if (bf->bf_m != NULL) {
3179 * XXX is this POSTWRITE call required?
3181 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3182 BUS_DMASYNC_POSTWRITE);
3183 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3190 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
3196 ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
3201 bf = _ath_getbuf_locked(sc, btype);
3203 * If a mgmt buffer was requested but we're out of those,
3204 * try requesting a normal one.
3206 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
3207 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
3208 ATH_TXBUF_UNLOCK(sc);
3210 struct ifnet *ifp = sc->sc_ifp;
3212 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
3213 sc->sc_stats.ast_tx_qstop++;
3214 IF_LOCK(&ifp->if_snd);
3215 #if defined(__DragonFly__)
3216 ifq_set_oactive(&ifp->if_snd);
3218 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3220 IF_UNLOCK(&ifp->if_snd);
3225 #if !defined(__DragonFly__)
3228 ath_qflush(struct ifnet *ifp)
3237 * Transmit a single frame.
3239 * net80211 will free the node reference if the transmit
3240 * fails, so don't free the node reference here.
3243 ath_transmit(struct ifnet *ifp, struct mbuf *m)
3245 struct ieee80211com *ic = ifp->if_l2com;
3246 struct ath_softc *sc = ic->ic_ifp->if_softc;
3247 struct ieee80211_node *ni;
3254 * Tell the reset path that we're currently transmitting.
3257 if (sc->sc_inreset_cnt > 0) {
3258 DPRINTF(sc, ATH_DEBUG_XMIT,
3259 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
3261 IF_LOCK(&ifp->if_snd);
3262 sc->sc_stats.ast_tx_qstop++;
3263 #if defined(__DragonFly__)
3264 /* removed, DragonFly uses OACTIVE to control if_start calls */
3265 /*ifq_set_oactive(&ifp->if_snd);*/
3267 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3269 IF_UNLOCK(&ifp->if_snd);
3270 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
3271 #if defined(__DragonFly__)
3275 return (ENOBUFS); /* XXX should be EINVAL or? */
3277 sc->sc_txstart_cnt++;
3280 /* Wake the hardware up already */
3282 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3285 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
3287 * Grab the TX lock - it's ok to do this here; we haven't
3288 * yet started transmitting.
3293 * Node reference, if there's one.
3295 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
3298 * Enforce how deep a node queue can get.
3300 * XXX it would be nicer if we kept an mbuf queue per
3301 * node and only whacked them into ath_bufs when we
3302 * are ready to schedule some traffic from them.
3303 * .. that may come later.
3305 * XXX we should also track the per-node hardware queue
3306 * depth so it is easy to limit the _SUM_ of the swq and
3307 * hwq frames. Since we only schedule two HWQ frames
3308 * at a time, this should be OK for now.
3310 if ((!(m->m_flags & M_EAPOL)) &&
3311 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
3312 sc->sc_stats.ast_tx_nodeq_overflow++;
3320 * Check how many TX buffers are available.
3322 * If this is for non-EAPOL traffic, just leave some
3323 * space free in order for buffer cloning and raw
3324 * frame transmission to occur.
3326 * If it's for EAPOL traffic, ignore this for now.
3327 * Management traffic will be sent via the raw transmit
3328 * method which bypasses this check.
3330 * This is needed to ensure that EAPOL frames during
3331 * (re) keying have a chance to go out.
3333 * See kern/138379 for more information.
3335 if ((!(m->m_flags & M_EAPOL)) &&
3336 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
3337 sc->sc_stats.ast_tx_nobuf++;
3345 * Grab a TX buffer and associated resources.
3347 * If it's an EAPOL frame, allocate a MGMT ath_buf.
3348 * That way even with temporary buffer exhaustion due to
3349 * the data path doesn't leave us without the ability
3350 * to transmit management frames.
3352 * Otherwise allocate a normal buffer.
3354 if (m->m_flags & M_EAPOL)
3355 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
3357 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
3361 * If we failed to allocate a buffer, fail.
3363 * We shouldn't fail normally, due to the check
3366 sc->sc_stats.ast_tx_nobuf++;
3367 IF_LOCK(&ifp->if_snd);
3368 #if defined(__DragonFly__)
3369 /* removed, DragonFly uses OACTIVE to control if_start calls */
3370 /*ifq_set_oactive(&ifp->if_snd);*/
3372 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
3374 IF_UNLOCK(&ifp->if_snd);
3382 * At this point we have a buffer; so we need to free it
3383 * if we hit any error conditions.
3387 * Check for fragmentation. If this frame
3388 * has been broken up verify we have enough
3389 * buffers to send all the fragments so all
3393 if ((m->m_flags & M_FRAG) &&
3394 !ath_txfrag_setup(sc, &frags, m, ni)) {
3395 DPRINTF(sc, ATH_DEBUG_XMIT,
3396 "%s: out of txfrag buffers\n", __func__);
3397 sc->sc_stats.ast_tx_nofrag++;
3398 #if defined(__DragonFly__)
3401 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3408 * At this point if we have any TX fragments, then we will
3409 * have bumped the node reference once for each of those.
3413 * XXX Is there anything actually _enforcing_ that the
3414 * fragments are being transmitted in one hit, rather than
3415 * being interleaved with other transmissions on that
3418 * The ATH TX output lock is the only thing serialising this
3423 * Calculate the "next fragment" length field in ath_buf
3424 * in order to let the transmit path know enough about
3425 * what to next write to the hardware.
3427 if (m->m_flags & M_FRAG) {
3428 struct ath_buf *fbf = bf;
3429 struct ath_buf *n_fbf = NULL;
3430 struct mbuf *fm = m->m_nextpkt;
3433 * We need to walk the list of fragments and set
3434 * the next size to the following buffer.
3435 * However, the first buffer isn't in the frag
3436 * list, so we have to do some gymnastics here.
3438 TAILQ_FOREACH(n_fbf, &frags, bf_list) {
3439 fbf->bf_nextfraglen = fm->m_pkthdr.len;
3446 * Bump the ifp output counter.
3448 * XXX should use atomics?
3450 #if defined(__DragonFly__)
3453 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
3457 * Pass the frame to the h/w for transmission.
3458 * Fragmented frames have each frag chained together
3459 * with m_nextpkt. We know there are sufficient ath_buf's
3460 * to send all the frags because of work done by
3461 * ath_txfrag_setup. We leave m_nextpkt set while
3462 * calling ath_tx_start so it can use it to extend the
3463 * the tx duration to cover the subsequent frag and
3464 * so it can reclaim all the mbufs in case of an error;
3465 * ath_tx_start clears m_nextpkt once it commits to
3466 * handing the frame to the hardware.
3468 * Note: if this fails, then the mbufs are freed but
3469 * not the node reference.
3471 next = m->m_nextpkt;
3472 if (ath_tx_start(sc, ni, bf, m)) {
3474 #if defined(__DragonFly__)
3477 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
3483 ath_returnbuf_head(sc, bf);
3485 * Free the rest of the node references and
3486 * buffers for the fragment list.
3488 ath_txfrag_cleanup(sc, &frags, ni);
3489 ATH_TXBUF_UNLOCK(sc);
3495 * Check here if the node is in power save state.
3497 ath_tx_update_tim(sc, ni, 1);
3501 * Beware of state changing between frags.
3502 * XXX check sta power-save state?
3504 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
3505 DPRINTF(sc, ATH_DEBUG_XMIT,
3506 "%s: flush fragmented packet, state %s\n",
3508 ieee80211_state_name[ni->ni_vap->iv_state]);
3514 bf = TAILQ_FIRST(&frags);
3515 KASSERT(bf != NULL, ("no buf for txfrag"));
3516 TAILQ_REMOVE(&frags, bf, bf_list);
3521 * Bump watchdog timer.
3523 sc->sc_wd_timer = 5;
3529 * Finished transmitting!
3532 sc->sc_txstart_cnt--;
3535 /* Sleep the hardware if required */
3537 ath_power_restore_power_state(sc);
3540 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
3546 ath_media_change(struct ifnet *ifp)
3548 int error = ieee80211_media_change(ifp);
3549 /* NB: only the fixed rate can change and that doesn't need a reset */
3550 return (error == ENETRESET ? 0 : error);
3554 * Block/unblock tx+rx processing while a key change is done.
3555 * We assume the caller serializes key management operations
3556 * so we only need to worry about synchronization with other
3557 * uses that originate in the driver.
3560 ath_key_update_begin(struct ieee80211vap *vap)
3562 struct ifnet *ifp = vap->iv_ic->ic_ifp;
3563 struct ath_softc *sc = ifp->if_softc;
3565 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3566 taskqueue_block(sc->sc_tq);
3570 ath_key_update_end(struct ieee80211vap *vap)
3572 struct ifnet *ifp = vap->iv_ic->ic_ifp;
3573 struct ath_softc *sc = ifp->if_softc;
3575 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
3576 taskqueue_unblock(sc->sc_tq);
3580 ath_update_promisc(struct ifnet *ifp)
3582 struct ath_softc *sc = ifp->if_softc;
3585 /* configure rx filter */
3587 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3588 rfilt = ath_calcrxfilter(sc);
3589 ath_hal_setrxfilter(sc->sc_ah, rfilt);
3590 ath_power_restore_power_state(sc);
3593 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
3597 * Driver-internal mcast update call.
3599 * Assumes the hardware is already awake.
3602 ath_update_mcast_hw(struct ath_softc *sc)
3604 struct ifnet *ifp = sc->sc_ifp;
3607 /* calculate and install multicast filter */
3608 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
3609 struct ifmultiaddr *ifma;
3611 * Merge multicast addresses to form the hardware filter.
3613 mfilt[0] = mfilt[1] = 0;
3614 #if defined(__DragonFly__)
3617 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
3619 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3624 /* calculate XOR of eight 6bit values */
3625 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
3626 val = LE_READ_4(dl + 0);
3627 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3628 val = LE_READ_4(dl + 3);
3629 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3631 mfilt[pos / 32] |= (1 << (pos % 32));
3633 #if defined(__DragonFly__)
3636 if_maddr_runlock(ifp);
3639 mfilt[0] = mfilt[1] = ~0;
3641 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
3643 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
3644 __func__, mfilt[0], mfilt[1]);
3648 * Called from the net80211 layer - force the hardware
3649 * awake before operating.
3652 ath_update_mcast(struct ifnet *ifp)
3654 struct ath_softc *sc = ifp->if_softc;
3657 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3660 ath_update_mcast_hw(sc);
3663 ath_power_restore_power_state(sc);
3668 ath_mode_init(struct ath_softc *sc)
3670 struct ifnet *ifp = sc->sc_ifp;
3671 struct ath_hal *ah = sc->sc_ah;
3674 /* configure rx filter */
3675 rfilt = ath_calcrxfilter(sc);
3676 ath_hal_setrxfilter(ah, rfilt);
3678 /* configure operational mode */
3679 ath_hal_setopmode(ah);
3681 #if !defined(__DragonFly__)
3682 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
3683 "%s: ah=%p, ifp=%p, if_addr=%p\n",
3687 (ifp == NULL) ? NULL : ifp->if_addr);
3690 /* handle any link-level address change */
3691 ath_hal_setmac(ah, IF_LLADDR(ifp));
3693 /* calculate and install multicast filter */
3694 ath_update_mcast_hw(sc);
3698 * Set the slot time based on the current setting.
3701 ath_setslottime(struct ath_softc *sc)
3703 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3704 struct ath_hal *ah = sc->sc_ah;
3707 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
3709 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
3711 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
3712 /* honor short/long slot time only in 11g */
3713 /* XXX shouldn't honor on pure g or turbo g channel */
3714 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3715 usec = HAL_SLOT_TIME_9;
3717 usec = HAL_SLOT_TIME_20;
3719 usec = HAL_SLOT_TIME_9;
3721 DPRINTF(sc, ATH_DEBUG_RESET,
3722 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
3723 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
3724 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
3726 /* Wake up the hardware first before updating the slot time */
3728 ath_power_set_power_state(sc, HAL_PM_AWAKE);
3729 ath_hal_setslottime(ah, usec);
3730 ath_power_restore_power_state(sc);
3731 sc->sc_updateslot = OK;
3736 * Callback from the 802.11 layer to update the
3737 * slot time based on the current setting.
3740 ath_updateslot(struct ifnet *ifp)
3742 struct ath_softc *sc = ifp->if_softc;
3743 struct ieee80211com *ic = ifp->if_l2com;
3746 * When not coordinating the BSS, change the hardware
3747 * immediately. For other operation we defer the change
3748 * until beacon updates have propagated to the stations.
3750 * XXX sc_updateslot isn't changed behind a lock?
3752 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3753 ic->ic_opmode == IEEE80211_M_MBSS)
3754 sc->sc_updateslot = UPDATE;
3756 ath_setslottime(sc);
3760 * Append the contents of src to dst; both queues
3761 * are assumed to be locked.
3764 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
3767 ATH_TXQ_LOCK_ASSERT(src);
3768 ATH_TXQ_LOCK_ASSERT(dst);
3770 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
3771 dst->axq_link = src->axq_link;
3772 src->axq_link = NULL;
3773 dst->axq_depth += src->axq_depth;
3774 dst->axq_aggr_depth += src->axq_aggr_depth;
3776 src->axq_aggr_depth = 0;
3780 * Reset the hardware, with no loss.
3782 * This can't be used for a general case reset.
3785 ath_reset_proc(void *arg, int pending)
3787 struct ath_softc *sc = arg;
3788 struct ifnet *ifp = sc->sc_ifp;
3791 if_printf(ifp, "%s: resetting\n", __func__);
3793 wlan_serialize_enter();
3794 ath_reset(ifp, ATH_RESET_NOLOSS);
3795 wlan_serialize_exit();
3799 * Reset the hardware after detecting beacons have stopped.
3802 ath_bstuck_proc(void *arg, int pending)
3804 struct ath_softc *sc = arg;
3805 struct ifnet *ifp = sc->sc_ifp;
3808 wlan_serialize_enter();
3809 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3810 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
3812 #ifdef ATH_DEBUG_ALQ
3813 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON))
3814 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL);
3817 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3819 sc->sc_stats.ast_bstuck++;
3821 * This assumes that there's no simultaneous channel mode change
3824 ath_reset(ifp, ATH_RESET_NOLOSS);
3825 wlan_serialize_exit();
3829 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3831 bus_addr_t *paddr = (bus_addr_t*) arg;
3832 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3833 *paddr = segs->ds_addr;
3837 * Allocate the descriptors and appropriate DMA tag/setup.
3839 * For some situations (eg EDMA TX completion), there isn't a requirement
3840 * for the ath_buf entries to be allocated.
3843 ath_descdma_alloc_desc(struct ath_softc *sc,
3844 struct ath_descdma *dd, ath_bufhead *head,
3845 const char *name, int ds_size, int ndesc)
3847 #define DS2PHYS(_dd, _ds) \
3848 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3849 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3850 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3851 struct ifnet *ifp = sc->sc_ifp;
3854 dd->dd_descsize = ds_size;
3856 DPRINTF(sc, ATH_DEBUG_RESET,
3857 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
3858 __func__, name, ndesc, dd->dd_descsize);
3861 dd->dd_desc_len = dd->dd_descsize * ndesc;
3864 * Merlin work-around:
3865 * Descriptors that cross the 4KB boundary can't be used.
3866 * Assume one skipped descriptor per 4KB page.
3868 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3869 int numpages = dd->dd_desc_len / 4096;
3870 dd->dd_desc_len += ds_size * numpages;
3874 * Setup DMA descriptor area.
3876 * BUS_DMA_ALLOCNOW is not used; we never use bounce
3877 * buffers for the descriptors themselves.
3879 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3880 PAGE_SIZE, 0, /* alignment, bounds */
3881 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3882 BUS_SPACE_MAXADDR, /* highaddr */
3883 NULL, NULL, /* filter, filterarg */
3884 dd->dd_desc_len, /* maxsize */
3886 dd->dd_desc_len, /* maxsegsize */
3888 #if !defined(__DragonFly__)
3889 NULL, /* lockfunc */
3894 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3898 /* allocate descriptors */
3899 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3900 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3903 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3904 "error %u\n", ndesc, dd->dd_name, error);
3908 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3909 dd->dd_desc, dd->dd_desc_len,
3910 ath_load_cb, &dd->dd_desc_paddr,
3913 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3914 dd->dd_name, error);
3918 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3919 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
3920 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
3921 /*XXX*/ (u_long) dd->dd_desc_len);
3926 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3928 bus_dma_tag_destroy(dd->dd_dmat);
3929 memset(dd, 0, sizeof(*dd));
3932 #undef ATH_DESC_4KB_BOUND_CHECK
3936 ath_descdma_setup(struct ath_softc *sc,
3937 struct ath_descdma *dd, ath_bufhead *head,
3938 const char *name, int ds_size, int nbuf, int ndesc)
3940 #define DS2PHYS(_dd, _ds) \
3941 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3942 #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3943 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3944 struct ifnet *ifp = sc->sc_ifp;
3947 int i, bsize, error;
3949 /* Allocate descriptors */
3950 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
3953 /* Assume any errors during allocation were dealt with */
3958 ds = (uint8_t *) dd->dd_desc;
3960 /* allocate rx buffers */
3961 bsize = sizeof(struct ath_buf) * nbuf;
3962 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
3964 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3965 dd->dd_name, bsize);
3971 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
3972 bf->bf_desc = (struct ath_desc *) ds;
3973 bf->bf_daddr = DS2PHYS(dd, ds);
3974 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3976 * Merlin WAR: Skip descriptor addresses which
3977 * cause 4KB boundary crossing along any point
3978 * in the descriptor.
3980 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3982 /* Start at the next page */
3983 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3984 bf->bf_desc = (struct ath_desc *) ds;
3985 bf->bf_daddr = DS2PHYS(dd, ds);
3988 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3991 if_printf(ifp, "unable to create dmamap for %s "
3992 "buffer %u, error %u\n", dd->dd_name, i, error);
3993 ath_descdma_cleanup(sc, dd, head);
3996 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3997 TAILQ_INSERT_TAIL(head, bf, bf_list);
4001 * XXX TODO: ensure that ds doesn't overflow the descriptor
4002 * allocation otherwise weird stuff will occur and crash your
4006 /* XXX this should likely just call ath_descdma_cleanup() */
4008 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
4009 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
4010 bus_dma_tag_destroy(dd->dd_dmat);
4011 memset(dd, 0, sizeof(*dd));
4014 #undef ATH_DESC_4KB_BOUND_CHECK
4018 * Allocate ath_buf entries but no descriptor contents.
4020 * This is for RX EDMA where the descriptors are the header part of
4024 ath_descdma_setup_rx_edma(struct ath_softc *sc,
4025 struct ath_descdma *dd, ath_bufhead *head,
4026 const char *name, int nbuf, int rx_status_len)
4028 struct ifnet *ifp = sc->sc_ifp;
4030 int i, bsize, error;
4032 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
4033 __func__, name, nbuf);
4037 * This is (mostly) purely for show. We're not allocating any actual
4038 * descriptors here as EDMA RX has the descriptor be part
4041 * However, dd_desc_len is used by ath_descdma_free() to determine
4042 * whether we have already freed this DMA mapping.
4044 dd->dd_desc_len = rx_status_len * nbuf;
4045 dd->dd_descsize = rx_status_len;
4047 /* allocate rx buffers */
4048 bsize = sizeof(struct ath_buf) * nbuf;
4049 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
4051 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
4052 dd->dd_name, bsize);
4059 for (i = 0; i < nbuf; i++, bf++) {
4062 bf->bf_lastds = NULL; /* Just an initial value */
4064 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
4067 if_printf(ifp, "unable to create dmamap for %s "
4068 "buffer %u, error %u\n", dd->dd_name, i, error);
4069 ath_descdma_cleanup(sc, dd, head);
4072 TAILQ_INSERT_TAIL(head, bf, bf_list);
4076 memset(dd, 0, sizeof(*dd));
4081 ath_descdma_cleanup(struct ath_softc *sc,
4082 struct ath_descdma *dd, ath_bufhead *head)
4085 struct ieee80211_node *ni;
4088 if (dd->dd_dmamap != 0) {
4089 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
4090 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
4091 bus_dma_tag_destroy(dd->dd_dmat);
4095 TAILQ_FOREACH(bf, head, bf_list) {
4098 * XXX warn if there's buffers here.
4099 * XXX it should have been freed by the
4103 if (do_warning == 0) {
4105 device_printf(sc->sc_dev,
4106 "%s: %s: mbuf should've been"
4107 " unmapped/freed!\n",
4111 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4112 BUS_DMASYNC_POSTREAD);
4113 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4117 if (bf->bf_dmamap != NULL) {
4118 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
4119 bf->bf_dmamap = NULL;
4125 * Reclaim node reference.
4127 ieee80211_free_node(ni);
4135 if (dd->dd_bufptr != NULL)
4136 kfree(dd->dd_bufptr, M_ATHDEV);
4137 memset(dd, 0, sizeof(*dd));
4141 ath_desc_alloc(struct ath_softc *sc)
4145 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
4146 "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER);
4150 sc->sc_txbuf_cnt = ath_txbuf;
4152 error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
4153 "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt,
4156 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
4161 * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
4162 * flag doesn't have to be set in ath_getbuf_locked().
4165 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
4166 "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1);
4168 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
4169 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
4170 &sc->sc_txbuf_mgmt);
4177 ath_desc_free(struct ath_softc *sc)
4180 if (sc->sc_bdma.dd_desc_len != 0)
4181 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
4182 if (sc->sc_txdma.dd_desc_len != 0)
4183 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
4184 if (sc->sc_txdma_mgmt.dd_desc_len != 0)
4185 ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
4186 &sc->sc_txbuf_mgmt);
4189 static struct ieee80211_node *
4190 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4192 struct ieee80211com *ic = vap->iv_ic;
4193 struct ath_softc *sc = ic->ic_ifp->if_softc;
4194 const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
4195 struct ath_node *an;
4197 an = kmalloc(space, M_80211_NODE, M_INTWAIT | M_ZERO);
4202 ath_rate_node_init(sc, an);
4204 /* Setup the mutex - there's no associd yet so set the name to NULL */
4205 ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
4206 device_get_nameunit(sc->sc_dev), an);
4207 lockinit(&an->an_mtx, an->an_name, 0, 0);
4209 /* XXX setup ath_tid */
4210 ath_tx_tid_init(sc, an);
4212 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an);
4213 return &an->an_node;
4217 ath_node_cleanup(struct ieee80211_node *ni)
4219 struct ieee80211com *ic = ni->ni_ic;
4220 struct ath_softc *sc = ic->ic_ifp->if_softc;
4222 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
4223 ni->ni_macaddr, ":", ATH_NODE(ni));
4225 /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */
4226 ath_tx_node_flush(sc, ATH_NODE(ni));
4227 ath_rate_node_cleanup(sc, ATH_NODE(ni));
4228 sc->sc_node_cleanup(ni);
4232 ath_node_free(struct ieee80211_node *ni)
4234 struct ieee80211com *ic = ni->ni_ic;
4235 struct ath_softc *sc = ic->ic_ifp->if_softc;
4237 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__,
4238 ni->ni_macaddr, ":", ATH_NODE(ni));
4239 lockuninit(&ATH_NODE(ni)->an_mtx);
4240 sc->sc_node_free(ni);
4244 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
4246 struct ieee80211com *ic = ni->ni_ic;
4247 struct ath_softc *sc = ic->ic_ifp->if_softc;
4248 struct ath_hal *ah = sc->sc_ah;
4250 *rssi = ic->ic_node_getrssi(ni);
4251 if (ni->ni_chan != IEEE80211_CHAN_ANYC)
4252 *noise = ath_hal_getchannoise(ah, ni->ni_chan);
4254 *noise = -95; /* nominally correct */
4258 * Set the default antenna.
4261 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
4263 struct ath_hal *ah = sc->sc_ah;
4265 /* XXX block beacon interrupts */
4266 ath_hal_setdefantenna(ah, antenna);
4267 if (sc->sc_defant != antenna)
4268 sc->sc_stats.ast_ant_defswitch++;
4269 sc->sc_defant = antenna;
4270 sc->sc_rxotherant = 0;
4274 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4276 txq->axq_qnum = qnum;
4279 txq->axq_aggr_depth = 0;
4280 txq->axq_intrcnt = 0;
4281 txq->axq_link = NULL;
4282 txq->axq_softc = sc;
4283 TAILQ_INIT(&txq->axq_q);
4284 TAILQ_INIT(&txq->axq_tidq);
4285 TAILQ_INIT(&txq->fifo.axq_q);
4286 ATH_TXQ_LOCK_INIT(sc, txq);
4290 * Setup a h/w transmit queue.
4292 static struct ath_txq *
4293 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4295 #define N(a) (sizeof(a)/sizeof(a[0]))
4296 struct ath_hal *ah = sc->sc_ah;
4300 memset(&qi, 0, sizeof(qi));
4301 qi.tqi_subtype = subtype;
4302 qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4303 qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4304 qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4306 * Enable interrupts only for EOL and DESC conditions.
4307 * We mark tx descriptors to receive a DESC interrupt
4308 * when a tx queue gets deep; otherwise waiting for the
4309 * EOL to reap descriptors. Note that this is done to
4310 * reduce interrupt load and this only defers reaping
4311 * descriptors, never transmitting frames. Aside from
4312 * reducing interrupts this also permits more concurrency.
4313 * The only potential downside is if the tx queue backs
4314 * up in which case the top half of the kernel may backup
4315 * due to a lack of tx descriptors.
4318 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
4319 HAL_TXQ_TXOKINT_ENABLE;
4321 qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE |
4322 HAL_TXQ_TXDESCINT_ENABLE;
4324 qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4327 * NB: don't print a message, this happens
4328 * normally on parts with too few tx queues
4332 if (qnum >= N(sc->sc_txq)) {
4333 device_printf(sc->sc_dev,
4334 "hal qnum %u out of range, max %zu!\n",
4335 qnum, N(sc->sc_txq));
4336 ath_hal_releasetxqueue(ah, qnum);
4339 if (!ATH_TXQ_SETUP(sc, qnum)) {
4340 ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4341 sc->sc_txqsetup |= 1<<qnum;
4343 return &sc->sc_txq[qnum];
4348 * Setup a hardware data transmit queue for the specified
4349 * access control. The hal may not support all requested
4350 * queues in which case it will return a reference to a
4351 * previously setup queue. We record the mapping from ac's
4352 * to h/w queues for use by ath_tx_start and also track
4353 * the set of h/w queues being used to optimize work in the
4354 * transmit interrupt handler and related routines.
4357 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4359 #define N(a) (sizeof(a)/sizeof(a[0]))
4360 struct ath_txq *txq;
4362 if (ac >= N(sc->sc_ac2q)) {
4363 device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4364 ac, N(sc->sc_ac2q));
4367 txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4370 sc->sc_ac2q[ac] = txq;
4378 * Update WME parameters for a transmit queue.
4381 ath_txq_update(struct ath_softc *sc, int ac)
4383 #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1)
4384 #define ATH_TXOP_TO_US(v) (v<<5)
4385 struct ifnet *ifp = sc->sc_ifp;
4386 struct ieee80211com *ic = ifp->if_l2com;
4387 struct ath_txq *txq = sc->sc_ac2q[ac];
4388 struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4389 struct ath_hal *ah = sc->sc_ah;
4392 ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4393 #ifdef IEEE80211_SUPPORT_TDMA
4396 * AIFS is zero so there's no pre-transmit wait. The
4397 * burst time defines the slot duration and is configured
4398 * through net80211. The QCU is setup to not do post-xmit
4399 * back off, lockout all lower-priority QCU's, and fire
4400 * off the DMA beacon alert timer which is setup based
4401 * on the slot configuration.
4403 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4404 | HAL_TXQ_TXERRINT_ENABLE
4405 | HAL_TXQ_TXURNINT_ENABLE
4406 | HAL_TXQ_TXEOLINT_ENABLE
4408 | HAL_TXQ_BACKOFF_DISABLE
4409 | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4413 qi.tqi_readyTime = sc->sc_tdmaslotlen;
4414 qi.tqi_burstTime = qi.tqi_readyTime;
4418 * XXX shouldn't this just use the default flags
4419 * used in the previous queue setup?
4421 qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4422 | HAL_TXQ_TXERRINT_ENABLE
4423 | HAL_TXQ_TXDESCINT_ENABLE
4424 | HAL_TXQ_TXURNINT_ENABLE
4425 | HAL_TXQ_TXEOLINT_ENABLE
4427 qi.tqi_aifs = wmep->wmep_aifsn;
4428 qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4429 qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4430 qi.tqi_readyTime = 0;
4431 qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4432 #ifdef IEEE80211_SUPPORT_TDMA
4436 DPRINTF(sc, ATH_DEBUG_RESET,
4437 "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4438 __func__, txq->axq_qnum, qi.tqi_qflags,
4439 qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4441 if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4442 if_printf(ifp, "unable to update hardware queue "
4443 "parameters for %s traffic!\n",
4444 ieee80211_wme_acnames[ac]);
4447 ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4450 #undef ATH_TXOP_TO_US
4451 #undef ATH_EXPONENT_TO_VALUE
4455 * Callback from the 802.11 layer to update WME parameters.
4458 ath_wme_update(struct ieee80211com *ic)
4460 struct ath_softc *sc = ic->ic_ifp->if_softc;
4462 return !ath_txq_update(sc, WME_AC_BE) ||
4463 !ath_txq_update(sc, WME_AC_BK) ||
4464 !ath_txq_update(sc, WME_AC_VI) ||
4465 !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4469 * Reclaim resources for a setup queue.
4472 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4475 ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4476 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4477 ATH_TXQ_LOCK_DESTROY(txq);
4481 * Reclaim all tx queue resources.
4484 ath_tx_cleanup(struct ath_softc *sc)
4488 ATH_TXBUF_LOCK_DESTROY(sc);
4489 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4490 if (ATH_TXQ_SETUP(sc, i))
4491 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4495 * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4496 * using the current rates in sc_rixmap.
4499 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4501 int rix = sc->sc_rixmap[rate];
4502 /* NB: return lowest rix for invalid rate */
4503 return (rix == 0xff ? 0 : rix);
4507 ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts,
4510 struct ieee80211_node *ni = bf->bf_node;
4511 struct ifnet *ifp = sc->sc_ifp;
4512 struct ieee80211com *ic = ifp->if_l2com;
4515 if (ts->ts_status == 0) {
4516 u_int8_t txant = ts->ts_antenna;
4517 sc->sc_stats.ast_ant_tx[txant]++;
4518 sc->sc_ant_tx[txant]++;
4519 if (ts->ts_finaltsi != 0)
4520 sc->sc_stats.ast_tx_altrate++;
4521 pri = M_WME_GETAC(bf->bf_m);
4522 if (pri >= WME_AC_VO)
4523 ic->ic_wme.wme_hipri_traffic++;
4524 if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)
4525 ni->ni_inact = ni->ni_inact_reload;
4527 if (ts->ts_status & HAL_TXERR_XRETRY)
4528 sc->sc_stats.ast_tx_xretries++;
4529 if (ts->ts_status & HAL_TXERR_FIFO)
4530 sc->sc_stats.ast_tx_fifoerr++;
4531 if (ts->ts_status & HAL_TXERR_FILT)
4532 sc->sc_stats.ast_tx_filtered++;
4533 if (ts->ts_status & HAL_TXERR_XTXOP)
4534 sc->sc_stats.ast_tx_xtxop++;
4535 if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED)
4536 sc->sc_stats.ast_tx_timerexpired++;
4538 if (bf->bf_m->m_flags & M_FF)
4539 sc->sc_stats.ast_ff_txerr++;
4541 /* XXX when is this valid? */
4542 if (ts->ts_flags & HAL_TX_DESC_CFG_ERR)
4543 sc->sc_stats.ast_tx_desccfgerr++;
4545 * This can be valid for successful frame transmission!
4546 * If there's a TX FIFO underrun during aggregate transmission,
4547 * the MAC will pad the rest of the aggregate with delimiters.
4548 * If a BA is returned, the frame is marked as "OK" and it's up
4549 * to the TX completion code to notice which frames weren't
4550 * successfully transmitted.
4552 if (ts->ts_flags & HAL_TX_DATA_UNDERRUN)
4553 sc->sc_stats.ast_tx_data_underrun++;
4554 if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN)
4555 sc->sc_stats.ast_tx_delim_underrun++;
4557 sr = ts->ts_shortretry;
4558 lr = ts->ts_longretry;
4559 sc->sc_stats.ast_tx_shortretry += sr;
4560 sc->sc_stats.ast_tx_longretry += lr;
4565 * The default completion. If fail is 1, this means
4566 * "please don't retry the frame, and just return -1 status
4567 * to the net80211 stack.
4570 ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4572 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4578 st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ?
4579 ts->ts_status : HAL_TXERR_XRETRY;
4582 if (bf->bf_state.bfs_dobaw)
4583 device_printf(sc->sc_dev,
4584 "%s: bf %p: seqno %d: dobaw should've been cleared!\n",
4587 SEQNO(bf->bf_state.bfs_seqno));
4589 if (bf->bf_next != NULL)
4590 device_printf(sc->sc_dev,
4591 "%s: bf %p: seqno %d: bf_next not NULL!\n",
4594 SEQNO(bf->bf_state.bfs_seqno));
4597 * Check if the node software queue is empty; if so
4598 * then clear the TIM.
4600 * This needs to be done before the buffer is freed as
4601 * otherwise the node reference will have been released
4602 * and the node may not actually exist any longer.
4604 * XXX I don't like this belonging here, but it's cleaner
4605 * to do it here right now then all the other places
4606 * where ath_tx_default_comp() is called.
4608 * XXX TODO: during drain, ensure that the callback is
4609 * being called so we get a chance to update the TIM.
4613 ath_tx_update_tim(sc, bf->bf_node, 0);
4618 * Do any tx complete callback. Note this must
4619 * be done before releasing the node reference.
4620 * This will free the mbuf, release the net80211
4621 * node and recycle the ath_buf.
4623 ath_tx_freebuf(sc, bf, st);
4627 * Update rate control with the given completion status.
4630 ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
4631 struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen,
4632 int nframes, int nbad)
4634 struct ath_node *an;
4636 /* Only for unicast frames */
4641 ATH_NODE_UNLOCK_ASSERT(an);
4643 if ((ts->ts_status & HAL_TXERR_FILT) == 0) {
4645 ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad);
4646 ATH_NODE_UNLOCK(an);
4651 * Process the completion of the given buffer.
4653 * This calls the rate control update and then the buffer completion.
4654 * This will either free the buffer or requeue it. In any case, the
4655 * bf pointer should be treated as invalid after this function is called.
4658 ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq,
4659 struct ath_tx_status *ts, struct ath_buf *bf)
4661 struct ieee80211_node *ni = bf->bf_node;
4663 ATH_TX_UNLOCK_ASSERT(sc);
4664 ATH_TXQ_UNLOCK_ASSERT(txq);
4666 /* If unicast frame, update general statistics */
4668 /* update statistics */
4669 ath_tx_update_stats(sc, ts, bf);
4673 * Call the completion handler.
4674 * The completion handler is responsible for
4675 * calling the rate control code.
4677 * Frames with no completion handler get the
4678 * rate control code called here.
4680 if (bf->bf_comp == NULL) {
4681 if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4682 (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) {
4684 * XXX assume this isn't an aggregate
4687 ath_tx_update_ratectrl(sc, ni,
4688 bf->bf_state.bfs_rc, ts,
4689 bf->bf_state.bfs_pktlen, 1,
4690 (ts->ts_status == 0 ? 0 : 1));
4692 ath_tx_default_comp(sc, bf, 0);
4694 bf->bf_comp(sc, bf, 0);
4700 * Process completed xmit descriptors from the specified queue.
4701 * Kick the packet scheduler if needed. This can occur from this
4705 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
4707 struct ath_hal *ah = sc->sc_ah;
4709 struct ath_desc *ds;
4710 struct ath_tx_status *ts;
4711 struct ieee80211_node *ni;
4712 #ifdef IEEE80211_SUPPORT_SUPERG
4713 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
4714 #endif /* IEEE80211_SUPPORT_SUPERG */
4718 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4719 __func__, txq->axq_qnum,
4720 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4723 ATH_KTR(sc, ATH_KTR_TXCOMP, 4,
4724 "ath_tx_processq: txq=%u head %p link %p depth %p",
4726 (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4733 txq->axq_intrcnt = 0; /* reset periodic desc intr count */
4734 bf = TAILQ_FIRST(&txq->axq_q);
4736 ATH_TXQ_UNLOCK(txq);
4739 ds = bf->bf_lastds; /* XXX must be setup correctly! */
4740 ts = &bf->bf_status.ds_txstat;
4742 status = ath_hal_txprocdesc(ah, ds, ts);
4744 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4745 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4747 else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0))
4748 ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4751 #ifdef ATH_DEBUG_ALQ
4752 if (if_ath_alq_checkdebug(&sc->sc_alq,
4753 ATH_ALQ_EDMA_TXSTATUS)) {
4754 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
4755 sc->sc_tx_statuslen,
4760 if (status == HAL_EINPROGRESS) {
4761 ATH_KTR(sc, ATH_KTR_TXCOMP, 3,
4762 "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS",
4763 txq->axq_qnum, bf, ds);
4764 ATH_TXQ_UNLOCK(txq);
4767 ATH_TXQ_REMOVE(txq, bf, bf_list);
4772 if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) {
4773 device_printf(sc->sc_dev,
4774 "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n",
4778 bf->bf_state.bfs_tx_queue);
4780 if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) {
4781 device_printf(sc->sc_dev,
4782 "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n",
4786 bf->bf_last->bf_state.bfs_tx_queue);
4790 if (txq->axq_depth > 0) {
4792 * More frames follow. Mark the buffer busy
4793 * so it's not re-used while the hardware may
4794 * still re-read the link field in the descriptor.
4796 * Use the last buffer in an aggregate as that
4797 * is where the hardware may be - intermediate
4798 * descriptors won't be "busy".
4800 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4802 txq->axq_link = NULL;
4804 bf->bf_last->bf_flags |= ATH_BUF_BUSY;
4806 if (bf->bf_state.bfs_aggr)
4807 txq->axq_aggr_depth--;
4811 ATH_KTR(sc, ATH_KTR_TXCOMP, 5,
4812 "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x",
4813 txq->axq_qnum, bf, ds, ni, ts->ts_status);
4815 * If unicast frame was ack'd update RSSI,
4816 * including the last rx time used to
4817 * workaround phantom bmiss interrupts.
4819 if (ni != NULL && ts->ts_status == 0 &&
4820 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
4822 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4823 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4826 ATH_TXQ_UNLOCK(txq);
4829 * Update statistics and call completion
4831 ath_tx_process_buf_completion(sc, txq, ts, bf);
4833 /* XXX at this point, bf and ni may be totally invalid */
4835 #ifdef IEEE80211_SUPPORT_SUPERG
4837 * Flush fast-frame staging queue when traffic slows.
4839 if (txq->axq_depth <= 1)
4840 ieee80211_ff_flush(ic, txq->axq_ac);
4843 /* Kick the software TXQ scheduler */
4846 ath_txq_sched(sc, txq);
4850 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4851 "ath_tx_processq: txq=%u: done",
4857 #define TXQACTIVE(t, q) ( (t) & (1 << (q)))
4860 * Deferred processing of transmit interrupt; special-cased
4861 * for a single hardware transmit queue (e.g. 5210 and 5211).
4864 ath_tx_proc_q0(void *arg, int npending)
4866 struct ath_softc *sc = arg;
4867 struct ifnet *ifp = sc->sc_ifp;
4871 sc->sc_txproc_cnt++;
4872 txqs = sc->sc_txq_active;
4873 sc->sc_txq_active &= ~txqs;
4877 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4880 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4881 "ath_tx_proc_q0: txqs=0x%08x", txqs);
4883 if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1))
4884 /* XXX why is lastrx updated in tx code? */
4885 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4886 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4887 ath_tx_processq(sc, sc->sc_cabq, 1);
4888 IF_LOCK(&ifp->if_snd);
4889 #if defined(__DragonFly__)
4890 ifq_clr_oactive(&ifp->if_snd);
4892 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4894 IF_UNLOCK(&ifp->if_snd);
4895 sc->sc_wd_timer = 0;
4898 ath_led_event(sc, sc->sc_txrix);
4901 sc->sc_txproc_cnt--;
4905 ath_power_restore_power_state(sc);
4912 * Deferred processing of transmit interrupt; special-cased
4913 * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
4916 ath_tx_proc_q0123(void *arg, int npending)
4918 struct ath_softc *sc = arg;
4919 struct ifnet *ifp = sc->sc_ifp;
4924 sc->sc_txproc_cnt++;
4925 txqs = sc->sc_txq_active;
4926 sc->sc_txq_active &= ~txqs;
4930 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4933 ATH_KTR(sc, ATH_KTR_TXCOMP, 1,
4934 "ath_tx_proc_q0123: txqs=0x%08x", txqs);
4937 * Process each active queue.
4940 if (TXQACTIVE(txqs, 0))
4941 nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1);
4942 if (TXQACTIVE(txqs, 1))
4943 nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1);
4944 if (TXQACTIVE(txqs, 2))
4945 nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1);
4946 if (TXQACTIVE(txqs, 3))
4947 nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1);
4948 if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum))
4949 ath_tx_processq(sc, sc->sc_cabq, 1);
4951 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
4953 IF_LOCK(&ifp->if_snd);
4954 #if defined(__DragonFly__)
4955 ifq_clr_oactive(&ifp->if_snd);
4957 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
4959 IF_UNLOCK(&ifp->if_snd);
4960 sc->sc_wd_timer = 0;
4963 ath_led_event(sc, sc->sc_txrix);
4966 sc->sc_txproc_cnt--;
4970 ath_power_restore_power_state(sc);
4977 * Deferred processing of transmit interrupt.
4980 ath_tx_proc(void *arg, int npending)
4982 struct ath_softc *sc = arg;
4983 struct ifnet *ifp = sc->sc_ifp;
4988 sc->sc_txproc_cnt++;
4989 txqs = sc->sc_txq_active;
4990 sc->sc_txq_active &= ~txqs;
4994 ath_power_set_power_state(sc, HAL_PM_AWAKE);
4997 ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs);
5000 * Process each active queue.
5003 for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5004 if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i))
5005 nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1);
5007 sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5009 /* XXX check this inside of IF_LOCK? */
5010 IF_LOCK(&ifp->if_snd);
5011 #if defined(__DragonFly__)
5012 ifq_clr_oactive(&ifp->if_snd);
5014 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5016 IF_UNLOCK(&ifp->if_snd);
5017 sc->sc_wd_timer = 0;
5020 ath_led_event(sc, sc->sc_txrix);
5023 sc->sc_txproc_cnt--;
5027 ath_power_restore_power_state(sc);
5035 * Deferred processing of TXQ rescheduling.
5038 ath_txq_sched_tasklet(void *arg, int npending)
5040 struct ath_softc *sc = arg;
5043 /* XXX is skipping ok? */
5046 if (sc->sc_inreset_cnt > 0) {
5047 device_printf(sc->sc_dev,
5048 "%s: sc_inreset_cnt > 0; skipping\n", __func__);
5053 sc->sc_txproc_cnt++;
5057 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5061 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5062 if (ATH_TXQ_SETUP(sc, i)) {
5063 ath_txq_sched(sc, &sc->sc_txq[i]);
5069 ath_power_restore_power_state(sc);
5073 sc->sc_txproc_cnt--;
5078 ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
5081 ATH_TXBUF_LOCK_ASSERT(sc);
5083 if (bf->bf_flags & ATH_BUF_MGMT)
5084 TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
5086 TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5088 if (sc->sc_txbuf_cnt > ath_txbuf) {
5089 device_printf(sc->sc_dev,
5090 "%s: sc_txbuf_cnt > %d?\n",
5093 sc->sc_txbuf_cnt = ath_txbuf;
5099 ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
5102 ATH_TXBUF_LOCK_ASSERT(sc);
5104 if (bf->bf_flags & ATH_BUF_MGMT)
5105 TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
5107 TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
5109 if (sc->sc_txbuf_cnt > ATH_TXBUF) {
5110 device_printf(sc->sc_dev,
5111 "%s: sc_txbuf_cnt > %d?\n",
5114 sc->sc_txbuf_cnt = ATH_TXBUF;
5120 * Free the holding buffer if it exists
5123 ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
5125 ATH_TXBUF_UNLOCK_ASSERT(sc);
5126 ATH_TXQ_LOCK_ASSERT(txq);
5128 if (txq->axq_holdingbf == NULL)
5131 txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
5134 ath_returnbuf_tail(sc, txq->axq_holdingbf);
5135 ATH_TXBUF_UNLOCK(sc);
5137 txq->axq_holdingbf = NULL;
5141 * Add this buffer to the holding queue, freeing the previous
5145 ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
5147 struct ath_txq *txq;
5149 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
5151 ATH_TXBUF_UNLOCK_ASSERT(sc);
5152 ATH_TXQ_LOCK_ASSERT(txq);
5154 /* XXX assert ATH_BUF_BUSY is set */
5156 /* XXX assert the tx queue is under the max number */
5157 if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
5158 device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
5161 bf->bf_state.bfs_tx_queue);
5162 bf->bf_flags &= ~ATH_BUF_BUSY;
5163 ath_returnbuf_tail(sc, bf);
5166 ath_txq_freeholdingbuf(sc, txq);
5167 txq->axq_holdingbf = bf;
5171 * Return a buffer to the pool and update the 'busy' flag on the
5172 * previous 'tail' entry.
5174 * This _must_ only be called when the buffer is involved in a completed
5175 * TX. The logic is that if it was part of an active TX, the previous
5176 * buffer on the list is now not involved in a halted TX DMA queue, waiting
5177 * for restart (eg for TDMA.)
5179 * The caller must free the mbuf and recycle the node reference.
5181 * XXX This method of handling busy / holding buffers is insanely stupid.
5182 * It requires bf_state.bfs_tx_queue to be correctly assigned. It would
5183 * be much nicer if buffers in the processq() methods would instead be
5184 * always completed there (pushed onto a txq or ath_bufhead) so we knew
5185 * exactly what hardware queue they came from in the first place.
5188 ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
5190 struct ath_txq *txq;
5192 txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
5194 KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
5195 KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
5198 * If this buffer is busy, push it onto the holding queue.
5200 if (bf->bf_flags & ATH_BUF_BUSY) {
5202 ath_txq_addholdingbuf(sc, bf);
5203 ATH_TXQ_UNLOCK(txq);
5208 * Not a busy buffer, so free normally
5211 ath_returnbuf_tail(sc, bf);
5212 ATH_TXBUF_UNLOCK(sc);
5216 * This is currently used by ath_tx_draintxq() and
5217 * ath_tx_tid_free_pkts().
5219 * It recycles a single ath_buf.
5222 ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
5224 struct ieee80211_node *ni = bf->bf_node;
5225 struct mbuf *m0 = bf->bf_m;
5228 * Make sure that we only sync/unload if there's an mbuf.
5229 * If not (eg we cloned a buffer), the unload will have already
5232 if (bf->bf_m != NULL) {
5233 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
5234 BUS_DMASYNC_POSTWRITE);
5235 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5241 /* Free the buffer, it's not needed any longer */
5242 ath_freebuf(sc, bf);
5244 /* Pass the buffer back to net80211 - completing it */
5245 ieee80211_tx_complete(ni, m0, status);
5248 static struct ath_buf *
5249 ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
5253 ATH_TXQ_LOCK_ASSERT(txq);
5256 * Drain the FIFO queue first, then if it's
5257 * empty, move to the normal frame queue.
5259 bf = TAILQ_FIRST(&txq->fifo.axq_q);
5262 * Is it the last buffer in this set?
5263 * Decrement the FIFO counter.
5265 if (bf->bf_flags & ATH_BUF_FIFOEND) {
5266 if (txq->axq_fifo_depth == 0) {
5267 device_printf(sc->sc_dev,
5268 "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
5271 txq->fifo.axq_depth);
5273 txq->axq_fifo_depth--;
5275 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
5282 if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
5283 device_printf(sc->sc_dev,
5284 "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
5287 txq->axq_fifo_depth,
5288 txq->fifo.axq_depth);
5292 * Now drain the pending queue.
5294 bf = TAILQ_FIRST(&txq->axq_q);
5296 txq->axq_link = NULL;
5299 ATH_TXQ_REMOVE(txq, bf, bf_list);
5304 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5307 struct ath_hal *ah = sc->sc_ah;
5313 * NB: this assumes output has been stopped and
5314 * we do not need to block ath_tx_proc
5316 for (ix = 0;; ix++) {
5318 bf = ath_tx_draintxq_get_one(sc, txq);
5320 ATH_TXQ_UNLOCK(txq);
5323 if (bf->bf_state.bfs_aggr)
5324 txq->axq_aggr_depth--;
5326 if (sc->sc_debug & ATH_DEBUG_RESET) {
5327 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5331 * EDMA operation has a TX completion FIFO
5332 * separate from the TX descriptor, so this
5333 * method of checking the "completion" status
5336 if (! sc->sc_isedma) {
5337 status = (ath_hal_txprocdesc(ah,
5339 &bf->bf_status.ds_txstat) == HAL_OK);
5341 ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status);
5342 ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
5343 bf->bf_m->m_len, 0, -1);
5345 #endif /* ATH_DEBUG */
5347 * Since we're now doing magic in the completion
5348 * functions, we -must- call it for aggregation
5349 * destinations or BAW tracking will get upset.
5352 * Clear ATH_BUF_BUSY; the completion handler
5353 * will free the buffer.
5355 ATH_TXQ_UNLOCK(txq);
5356 bf->bf_flags &= ~ATH_BUF_BUSY;
5358 bf->bf_comp(sc, bf, 1);
5360 ath_tx_default_comp(sc, bf, 1);
5364 * Free the holding buffer if it exists
5367 ath_txq_freeholdingbuf(sc, txq);
5368 ATH_TXQ_UNLOCK(txq);
5371 * Drain software queued frames which are on
5374 ath_tx_txq_drain(sc, txq);
5378 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5380 struct ath_hal *ah = sc->sc_ah;
5382 ATH_TXQ_LOCK_ASSERT(txq);
5384 DPRINTF(sc, ATH_DEBUG_RESET,
5385 "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, "
5386 "link %p, holdingbf=%p\n",
5389 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5390 (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)),
5391 (int) ath_hal_numtxpending(ah, txq->axq_qnum),
5394 txq->axq_holdingbf);
5396 (void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5397 /* We've stopped TX DMA, so mark this as stopped. */
5398 txq->axq_flags &= ~ATH_TXQ_PUTRUNNING;
5401 if ((sc->sc_debug & ATH_DEBUG_RESET)
5402 && (txq->axq_holdingbf != NULL)) {
5403 ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0);
5409 ath_stoptxdma(struct ath_softc *sc)
5411 struct ath_hal *ah = sc->sc_ah;
5414 /* XXX return value */
5418 if (!sc->sc_invalid) {
5419 /* don't touch the hardware if marked invalid */
5420 DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5421 __func__, sc->sc_bhalq,
5422 (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5425 /* stop the beacon queue */
5426 (void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5428 /* Stop the data queues */
5429 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5430 if (ATH_TXQ_SETUP(sc, i)) {
5431 ATH_TXQ_LOCK(&sc->sc_txq[i]);
5432 ath_tx_stopdma(sc, &sc->sc_txq[i]);
5433 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
5443 ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq)
5445 struct ath_hal *ah = sc->sc_ah;
5449 if (! (sc->sc_debug & ATH_DEBUG_RESET))
5452 device_printf(sc->sc_dev, "%s: Q%d: begin\n",
5453 __func__, txq->axq_qnum);
5454 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
5455 ath_printtxbuf(sc, bf, txq->axq_qnum, i,
5456 ath_hal_txprocdesc(ah, bf->bf_lastds,
5457 &bf->bf_status.ds_txstat) == HAL_OK);
5460 device_printf(sc->sc_dev, "%s: Q%d: end\n",
5461 __func__, txq->axq_qnum);
5463 #endif /* ATH_DEBUG */
5466 * Drain the transmit queues and reclaim resources.
5469 ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
5471 struct ath_hal *ah = sc->sc_ah;
5472 struct ifnet *ifp = sc->sc_ifp;
5474 struct ath_buf *bf_last;
5476 (void) ath_stoptxdma(sc);
5479 * Dump the queue contents
5481 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
5483 * XXX TODO: should we just handle the completed TX frames
5484 * here, whether or not the reset is a full one or not?
5486 if (ATH_TXQ_SETUP(sc, i)) {
5488 if (sc->sc_debug & ATH_DEBUG_RESET)
5489 ath_tx_dump(sc, &sc->sc_txq[i]);
5490 #endif /* ATH_DEBUG */
5491 if (reset_type == ATH_RESET_NOLOSS) {
5492 ath_tx_processq(sc, &sc->sc_txq[i], 0);
5493 ATH_TXQ_LOCK(&sc->sc_txq[i]);
5495 * Free the holding buffer; DMA is now
5498 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
5500 * Setup the link pointer to be the
5501 * _last_ buffer/descriptor in the list.
5502 * If there's nothing in the list, set it
5505 bf_last = ATH_TXQ_LAST(&sc->sc_txq[i],
5507 if (bf_last != NULL) {
5508 ath_hal_gettxdesclinkptr(ah,
5510 &sc->sc_txq[i].axq_link);
5512 sc->sc_txq[i].axq_link = NULL;
5514 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
5516 ath_tx_draintxq(sc, &sc->sc_txq[i]);
5520 if (sc->sc_debug & ATH_DEBUG_RESET) {
5521 struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf);
5522 if (bf != NULL && bf->bf_m != NULL) {
5523 ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5524 ath_hal_txprocdesc(ah, bf->bf_lastds,
5525 &bf->bf_status.ds_txstat) == HAL_OK);
5526 ieee80211_dump_pkt(ifp->if_l2com,
5527 mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5531 #endif /* ATH_DEBUG */
5532 IF_LOCK(&ifp->if_snd);
5533 #if defined(__DragonFly__)
5534 ifq_clr_oactive(&ifp->if_snd);
5536 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5538 IF_UNLOCK(&ifp->if_snd);
5539 sc->sc_wd_timer = 0;
5543 * Update internal state after a channel change.
5546 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5548 enum ieee80211_phymode mode;
5551 * Change channels and update the h/w rate map
5552 * if we're switching; e.g. 11a to 11b/g.
5554 mode = ieee80211_chan2mode(chan);
5555 if (mode != sc->sc_curmode)
5556 ath_setcurmode(sc, mode);
5557 sc->sc_curchan = chan;
5561 * Set/change channels. If the channel is really being changed,
5562 * it's done by resetting the chip. To accomplish this we must
5563 * first cleanup any pending DMA, then restart stuff after a la
5567 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5569 struct ifnet *ifp = sc->sc_ifp;
5570 struct ieee80211com *ic = ifp->if_l2com;
5571 struct ath_hal *ah = sc->sc_ah;
5574 /* Treat this as an interface reset */
5575 ATH_PCU_UNLOCK_ASSERT(sc);
5576 ATH_UNLOCK_ASSERT(sc);
5578 /* (Try to) stop TX/RX from occuring */
5579 taskqueue_block(sc->sc_tq);
5583 /* Disable interrupts */
5584 ath_hal_intrset(ah, 0);
5586 /* Stop new RX/TX/interrupt completion */
5587 if (ath_reset_grablock(sc, 1) == 0) {
5588 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
5592 /* Stop pending RX/TX completion */
5593 ath_txrx_stop_locked(sc);
5597 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5598 __func__, ieee80211_chan2ieee(ic, chan),
5599 chan->ic_freq, chan->ic_flags);
5600 if (chan != sc->sc_curchan) {
5603 * To switch channels clear any pending DMA operations;
5604 * wait long enough for the RX fifo to drain, reset the
5605 * hardware at the new frequency, and then re-enable
5606 * the relevant bits of the h/w.
5609 ath_hal_intrset(ah, 0); /* disable interrupts */
5611 ath_stoprecv(sc, 1); /* turn off frame recv */
5613 * First, handle completed TX/RX frames.
5616 ath_draintxq(sc, ATH_RESET_NOLOSS);
5618 * Next, flush the non-scheduled frames.
5620 ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */
5622 ath_update_chainmasks(sc, chan);
5623 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
5624 sc->sc_cur_rxchainmask);
5625 if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5626 if_printf(ifp, "%s: unable to reset "
5627 "channel %u (%u MHz, flags 0x%x), hal status %u\n",
5628 __func__, ieee80211_chan2ieee(ic, chan),
5629 chan->ic_freq, chan->ic_flags, status);
5633 sc->sc_diversity = ath_hal_getdiversity(ah);
5636 sc->sc_rx_stopped = 1;
5637 sc->sc_rx_resetted = 1;
5640 /* Let DFS at it in case it's a DFS channel */
5641 ath_dfs_radar_enable(sc, chan);
5643 /* Let spectral at in case spectral is enabled */
5644 ath_spectral_enable(sc, chan);
5647 * Let bluetooth coexistence at in case it's needed for this
5650 ath_btcoex_enable(sc, ic->ic_curchan);
5653 * If we're doing TDMA, enforce the TXOP limitation for chips
5656 if (sc->sc_hasenforcetxop && sc->sc_tdma)
5657 ath_hal_setenforcetxop(sc->sc_ah, 1);
5659 ath_hal_setenforcetxop(sc->sc_ah, 0);
5662 * Re-enable rx framework.
5664 if (ath_startrecv(sc) != 0) {
5665 if_printf(ifp, "%s: unable to restart recv logic\n",
5672 * Change channels and update the h/w rate map
5673 * if we're switching; e.g. 11a to 11b/g.
5675 ath_chan_change(sc, chan);
5678 * Reset clears the beacon timers; reset them
5681 if (sc->sc_beacons) { /* restart beacons */
5682 #ifdef IEEE80211_SUPPORT_TDMA
5684 ath_tdma_config(sc, NULL);
5687 ath_beacon_config(sc, NULL);
5691 * Re-enable interrupts.
5694 ath_hal_intrset(ah, sc->sc_imask);
5700 sc->sc_inreset_cnt--;
5701 /* XXX only do this if sc_inreset_cnt == 0? */
5702 ath_hal_intrset(ah, sc->sc_imask);
5705 IF_LOCK(&ifp->if_snd);
5706 #if defined(__DragonFly__)
5707 ifq_clr_oactive(&ifp->if_snd);
5709 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5711 IF_UNLOCK(&ifp->if_snd);
5713 /* XXX ath_start? */
5719 * Periodically recalibrate the PHY to account
5720 * for temperature/environment changes.
5723 ath_calibrate(void *arg)
5725 struct ath_softc *sc = arg;
5726 struct ath_hal *ah = sc->sc_ah;
5727 struct ifnet *ifp = sc->sc_ifp;
5728 struct ieee80211com *ic = ifp->if_l2com;
5729 HAL_BOOL longCal, isCalDone = AH_TRUE;
5730 HAL_BOOL aniCal, shortCal = AH_FALSE;
5733 ATH_LOCK_ASSERT(sc);
5736 * Force the hardware awake for ANI work.
5738 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5740 /* Skip trying to do this if we're in reset */
5741 if (sc->sc_inreset_cnt)
5744 if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */
5746 longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5747 aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000);
5748 if (sc->sc_doresetcal)
5749 shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000);
5751 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal);
5753 sc->sc_stats.ast_ani_cal++;
5754 sc->sc_lastani = ticks;
5755 ath_hal_ani_poll(ah, sc->sc_curchan);
5759 sc->sc_stats.ast_per_cal++;
5760 sc->sc_lastlongcal = ticks;
5761 if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5763 * Rfgain is out of bounds, reset the chip
5764 * to load new gain values.
5766 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5767 "%s: rfgain change\n", __func__);
5768 sc->sc_stats.ast_per_rfgain++;
5769 sc->sc_resetcal = 0;
5770 sc->sc_doresetcal = AH_TRUE;
5771 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
5772 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5773 ath_power_restore_power_state(sc);
5777 * If this long cal is after an idle period, then
5778 * reset the data collection state so we start fresh.
5780 if (sc->sc_resetcal) {
5781 (void) ath_hal_calreset(ah, sc->sc_curchan);
5782 sc->sc_lastcalreset = ticks;
5783 sc->sc_lastshortcal = ticks;
5784 sc->sc_resetcal = 0;
5785 sc->sc_doresetcal = AH_TRUE;
5789 /* Only call if we're doing a short/long cal, not for ANI calibration */
5790 if (shortCal || longCal) {
5791 isCalDone = AH_FALSE;
5792 if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5795 * Calibrate noise floor data again in case of change.
5797 ath_hal_process_noisefloor(ah);
5800 DPRINTF(sc, ATH_DEBUG_ANY,
5801 "%s: calibration of channel %u failed\n",
5802 __func__, sc->sc_curchan->ic_freq);
5803 sc->sc_stats.ast_per_calfail++;
5806 sc->sc_lastshortcal = ticks;
5811 * Use a shorter interval to potentially collect multiple
5812 * data samples required to complete calibration. Once
5813 * we're told the work is done we drop back to a longer
5814 * interval between requests. We're more aggressive doing
5815 * work when operating as an AP to improve operation right
5818 sc->sc_lastshortcal = ticks;
5819 nextcal = ath_shortcalinterval*hz/1000;
5820 if (sc->sc_opmode != HAL_M_HOSTAP)
5822 sc->sc_doresetcal = AH_TRUE;
5824 /* nextcal should be the shortest time for next event */
5825 nextcal = ath_longcalinterval*hz;
5826 if (sc->sc_lastcalreset == 0)
5827 sc->sc_lastcalreset = sc->sc_lastlongcal;
5828 else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5829 sc->sc_resetcal = 1; /* setup reset next trip */
5830 sc->sc_doresetcal = AH_FALSE;
5832 /* ANI calibration may occur more often than short/long/resetcal */
5833 if (ath_anicalinterval > 0)
5834 nextcal = MIN(nextcal, ath_anicalinterval*hz/1000);
5837 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5838 __func__, nextcal, isCalDone ? "" : "!");
5839 callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5841 DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5843 /* NB: don't rearm timer */
5846 * Restore power state now that we're done.
5848 ath_power_restore_power_state(sc);
5852 ath_scan_start(struct ieee80211com *ic)
5854 struct ifnet *ifp = ic->ic_ifp;
5855 struct ath_softc *sc = ifp->if_softc;
5856 struct ath_hal *ah = sc->sc_ah;
5859 /* XXX calibration timer? */
5862 sc->sc_scanning = 1;
5863 sc->sc_syncbeacon = 0;
5864 rfilt = ath_calcrxfilter(sc);
5868 ath_hal_setrxfilter(ah, rfilt);
5869 ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5872 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5873 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5877 ath_scan_end(struct ieee80211com *ic)
5879 struct ifnet *ifp = ic->ic_ifp;
5880 struct ath_softc *sc = ifp->if_softc;
5881 struct ath_hal *ah = sc->sc_ah;
5885 sc->sc_scanning = 0;
5886 rfilt = ath_calcrxfilter(sc);
5890 ath_hal_setrxfilter(ah, rfilt);
5891 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5893 ath_hal_process_noisefloor(ah);
5896 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5897 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5901 #ifdef ATH_ENABLE_11N
5903 * For now, just do a channel change.
5905 * Later, we'll go through the hard slog of suspending tx/rx, changing rate
5906 * control state and resetting the hardware without dropping frames out
5909 * The unfortunate trouble here is making absolutely sure that the
5910 * channel width change has propagated enough so the hardware
5911 * absolutely isn't handed bogus frames for it's current operating
5912 * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and
5913 * does occur in parallel, we need to make certain we've blocked
5914 * any further ongoing TX (and RX, that can cause raw TX)
5915 * before we do this.
5918 ath_update_chw(struct ieee80211com *ic)
5920 struct ifnet *ifp = ic->ic_ifp;
5921 struct ath_softc *sc = ifp->if_softc;
5923 DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__);
5924 ath_set_channel(ic);
5926 #endif /* ATH_ENABLE_11N */
5929 ath_set_channel(struct ieee80211com *ic)
5931 struct ifnet *ifp = ic->ic_ifp;
5932 struct ath_softc *sc = ifp->if_softc;
5935 ath_power_set_power_state(sc, HAL_PM_AWAKE);
5938 (void) ath_chan_set(sc, ic->ic_curchan);
5940 * If we are returning to our bss channel then mark state
5941 * so the next recv'd beacon's tsf will be used to sync the
5942 * beacon timers. Note that since we only hear beacons in
5943 * sta/ibss mode this has no effect in other operating modes.
5946 if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5947 sc->sc_syncbeacon = 1;
5948 ath_power_restore_power_state(sc);
5953 * Walk the vap list and check if there any vap's in RUN state.
5956 ath_isanyrunningvaps(struct ieee80211vap *this)
5958 struct ieee80211com *ic = this->iv_ic;
5959 struct ieee80211vap *vap;
5961 IEEE80211_LOCK_ASSERT(ic);
5963 TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5964 if (vap != this && vap->iv_state >= IEEE80211_S_RUN)
5971 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5973 struct ieee80211com *ic = vap->iv_ic;
5974 struct ath_softc *sc = ic->ic_ifp->if_softc;
5975 struct ath_vap *avp = ATH_VAP(vap);
5976 struct ath_hal *ah = sc->sc_ah;
5977 struct ieee80211_node *ni = NULL;
5978 int i, error, stamode;
5980 int csa_run_transition = 0;
5981 enum ieee80211_state ostate = vap->iv_state;
5983 static const HAL_LED_STATE leds[] = {
5984 HAL_LED_INIT, /* IEEE80211_S_INIT */
5985 HAL_LED_SCAN, /* IEEE80211_S_SCAN */
5986 HAL_LED_AUTH, /* IEEE80211_S_AUTH */
5987 HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */
5988 HAL_LED_RUN, /* IEEE80211_S_CAC */
5989 HAL_LED_RUN, /* IEEE80211_S_RUN */
5990 HAL_LED_RUN, /* IEEE80211_S_CSA */
5991 HAL_LED_RUN, /* IEEE80211_S_SLEEP */
5994 DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5995 ieee80211_state_name[ostate],
5996 ieee80211_state_name[nstate]);
5999 * net80211 _should_ have the comlock asserted at this point.
6000 * There are some comments around the calls to vap->iv_newstate
6001 * which indicate that it (newstate) may end up dropping the
6002 * lock. This and the subsequent lock assert check after newstate
6003 * are an attempt to catch these and figure out how/why.
6005 IEEE80211_LOCK_ASSERT(ic);
6007 /* Before we touch the hardware - wake it up */
6010 * If the NIC is in anything other than SLEEP state,
6011 * we need to ensure that self-generated frames are
6012 * set for PWRMGT=0. Otherwise we may end up with
6013 * strange situations.
6015 * XXX TODO: is this actually the case? :-)
6017 if (nstate != IEEE80211_S_SLEEP)
6018 ath_power_setselfgen(sc, HAL_PM_AWAKE);
6021 * Now, wake the thing up.
6023 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6026 * And stop the calibration callout whilst we have
6029 callout_stop_sync(&sc->sc_cal_ch);
6032 if (ostate == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN)
6033 csa_run_transition = 1;
6035 ath_hal_setledstate(ah, leds[nstate]); /* set LED */
6037 if (nstate == IEEE80211_S_SCAN) {
6039 * Scanning: turn off beacon miss and don't beacon.
6040 * Mark beacon state so when we reach RUN state we'll
6041 * [re]setup beacons. Unblock the task q thread so
6042 * deferred interrupt processing is done.
6045 /* Ensure we stay awake during scan */
6047 ath_power_setselfgen(sc, HAL_PM_AWAKE);
6048 ath_power_setpower(sc, HAL_PM_AWAKE);
6052 sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
6053 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6055 taskqueue_unblock(sc->sc_tq);
6058 ni = ieee80211_ref_node(vap->iv_bss);
6059 rfilt = ath_calcrxfilter(sc);
6060 stamode = (vap->iv_opmode == IEEE80211_M_STA ||
6061 vap->iv_opmode == IEEE80211_M_AHDEMO ||
6062 vap->iv_opmode == IEEE80211_M_IBSS);
6065 * XXX Dont need to do this (and others) if we've transitioned
6068 if (stamode && nstate == IEEE80211_S_RUN) {
6069 sc->sc_curaid = ni->ni_associd;
6070 IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
6071 ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
6073 DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
6074 __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
6075 ath_hal_setrxfilter(ah, rfilt);
6077 /* XXX is this to restore keycache on resume? */
6078 if (vap->iv_opmode != IEEE80211_M_STA &&
6079 (vap->iv_flags & IEEE80211_F_PRIVACY)) {
6080 for (i = 0; i < IEEE80211_WEP_NKID; i++)
6081 if (ath_hal_keyisvalid(ah, i))
6082 ath_hal_keysetmac(ah, i, ni->ni_bssid);
6086 * Invoke the parent method to do net80211 work.
6088 error = avp->av_newstate(vap, nstate, arg);
6093 * See above: ensure av_newstate() doesn't drop the lock
6096 IEEE80211_LOCK_ASSERT(ic);
6098 if (nstate == IEEE80211_S_RUN) {
6099 /* NB: collect bss node again, it may have changed */
6100 ieee80211_free_node(ni);
6101 ni = ieee80211_ref_node(vap->iv_bss);
6103 DPRINTF(sc, ATH_DEBUG_STATE,
6104 "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
6105 "capinfo 0x%04x chan %d\n", __func__,
6106 vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
6107 ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
6109 switch (vap->iv_opmode) {
6110 #ifdef IEEE80211_SUPPORT_TDMA
6111 case IEEE80211_M_AHDEMO:
6112 if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
6116 case IEEE80211_M_HOSTAP:
6117 case IEEE80211_M_IBSS:
6118 case IEEE80211_M_MBSS:
6120 * Allocate and setup the beacon frame.
6122 * Stop any previous beacon DMA. This may be
6123 * necessary, for example, when an ibss merge
6124 * causes reconfiguration; there will be a state
6125 * transition from RUN->RUN that means we may
6126 * be called with beacon transmission active.
6128 ath_hal_stoptxdma(ah, sc->sc_bhalq);
6130 error = ath_beacon_alloc(sc, ni);
6134 * If joining an adhoc network defer beacon timer
6135 * configuration to the next beacon frame so we
6136 * have a current TSF to use. Otherwise we're
6137 * starting an ibss/bss so there's no need to delay;
6138 * if this is the first vap moving to RUN state, then
6139 * beacon state needs to be [re]configured.
6141 if (vap->iv_opmode == IEEE80211_M_IBSS &&
6142 ni->ni_tstamp.tsf != 0) {
6143 sc->sc_syncbeacon = 1;
6144 } else if (!sc->sc_beacons) {
6145 #ifdef IEEE80211_SUPPORT_TDMA
6146 if (vap->iv_caps & IEEE80211_C_TDMA)
6147 ath_tdma_config(sc, vap);
6150 ath_beacon_config(sc, vap);
6154 case IEEE80211_M_STA:
6156 * Defer beacon timer configuration to the next
6157 * beacon frame so we have a current TSF to use
6158 * (any TSF collected when scanning is likely old).
6159 * However if it's due to a CSA -> RUN transition,
6160 * force a beacon update so we pick up a lack of
6161 * beacons from an AP in CAC and thus force a
6164 * And, there's also corner cases here where
6165 * after a scan, the AP may have disappeared.
6166 * In that case, we may not receive an actual
6167 * beacon to update the beacon timer and thus we
6168 * won't get notified of the missing beacons.
6170 if (ostate != IEEE80211_S_RUN &&
6171 ostate != IEEE80211_S_SLEEP) {
6172 DPRINTF(sc, ATH_DEBUG_BEACON,
6173 "%s: STA; syncbeacon=1\n", __func__);
6174 sc->sc_syncbeacon = 1;
6176 if (csa_run_transition)
6177 ath_beacon_config(sc, vap);
6182 * Reconfigure beacons during reset; as otherwise
6183 * we won't get the beacon timers reprogrammed
6184 * after a reset and thus we won't pick up a
6185 * beacon miss interrupt.
6187 * Hopefully we'll see a beacon before the BMISS
6188 * timer fires (too often), leading to a STA
6194 case IEEE80211_M_MONITOR:
6196 * Monitor mode vaps have only INIT->RUN and RUN->RUN
6197 * transitions so we must re-enable interrupts here to
6198 * handle the case of a single monitor mode vap.
6200 ath_hal_intrset(ah, sc->sc_imask);
6202 case IEEE80211_M_WDS:
6208 * Let the hal process statistics collected during a
6209 * scan so it can provide calibrated noise floor data.
6211 ath_hal_process_noisefloor(ah);
6213 * Reset rssi stats; maybe not the best place...
6215 sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
6216 sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
6217 sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
6220 * Force awake for RUN mode.
6223 ath_power_setselfgen(sc, HAL_PM_AWAKE);
6224 ath_power_setpower(sc, HAL_PM_AWAKE);
6227 * Finally, start any timers and the task q thread
6228 * (in case we didn't go through SCAN state).
6230 if (ath_longcalinterval != 0) {
6231 /* start periodic recalibration timer */
6232 callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
6234 DPRINTF(sc, ATH_DEBUG_CALIBRATE,
6235 "%s: calibration disabled\n", __func__);
6239 taskqueue_unblock(sc->sc_tq);
6240 } else if (nstate == IEEE80211_S_INIT) {
6242 * If there are no vaps left in RUN state then
6243 * shutdown host/driver operation:
6244 * o disable interrupts
6245 * o disable the task queue thread
6246 * o mark beacon processing as stopped
6248 if (!ath_isanyrunningvaps(vap)) {
6249 sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
6250 /* disable interrupts */
6251 ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
6252 taskqueue_block(sc->sc_tq);
6255 #ifdef IEEE80211_SUPPORT_TDMA
6256 ath_hal_setcca(ah, AH_TRUE);
6258 } else if (nstate == IEEE80211_S_SLEEP) {
6259 /* We're going to sleep, so transition appropriately */
6260 /* For now, only do this if we're a single STA vap */
6261 if (sc->sc_nvaps == 1 &&
6262 vap->iv_opmode == IEEE80211_M_STA) {
6263 DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=%d\n", __func__, sc->sc_syncbeacon);
6266 * Always at least set the self-generated
6267 * frame config to set PWRMGT=1.
6269 ath_power_setselfgen(sc, HAL_PM_NETWORK_SLEEP);
6272 * If we're not syncing beacons, transition
6275 * We stay awake if syncbeacon > 0 in case
6276 * we need to listen for some beacons otherwise
6277 * our beacon timer config may be wrong.
6279 if (sc->sc_syncbeacon == 0) {
6280 ath_power_setpower(sc, HAL_PM_NETWORK_SLEEP);
6286 ieee80211_free_node(ni);
6289 * Restore the power state - either to what it was, or
6290 * to network_sleep if it's alright.
6293 ath_power_restore_power_state(sc);
6299 * Allocate a key cache slot to the station so we can
6300 * setup a mapping from key index to node. The key cache
6301 * slot is needed for managing antenna state and for
6302 * compression when stations do not use crypto. We do
6303 * it uniliaterally here; if crypto is employed this slot
6304 * will be reassigned.
6307 ath_setup_stationkey(struct ieee80211_node *ni)
6309 struct ieee80211vap *vap = ni->ni_vap;
6310 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6311 ieee80211_keyix keyix, rxkeyix;
6313 /* XXX should take a locked ref to vap->iv_bss */
6314 if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
6316 * Key cache is full; we'll fall back to doing
6317 * the more expensive lookup in software. Note
6318 * this also means no h/w compression.
6320 /* XXX msg+statistic */
6323 ni->ni_ucastkey.wk_keyix = keyix;
6324 ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
6325 /* NB: must mark device key to get called back on delete */
6326 ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
6327 IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
6328 /* NB: this will create a pass-thru key entry */
6329 ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss);
6334 * Setup driver-specific state for a newly associated node.
6335 * Note that we're called also on a re-associate, the isnew
6336 * param tells us if this is the first time or not.
6339 ath_newassoc(struct ieee80211_node *ni, int isnew)
6341 struct ath_node *an = ATH_NODE(ni);
6342 struct ieee80211vap *vap = ni->ni_vap;
6343 struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
6344 const struct ieee80211_txparam *tp = ni->ni_txparms;
6346 an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
6347 an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
6349 DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: reassoc; isnew=%d, is_powersave=%d\n",
6354 an->an_is_powersave);
6357 ath_rate_newassoc(sc, an, isnew);
6358 ATH_NODE_UNLOCK(an);
6361 (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
6362 ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
6363 ath_setup_stationkey(ni);
6366 * If we're reassociating, make sure that any paused queues
6369 * Now, we may hvae frames in the hardware queue for this node.
6370 * So if we are reassociating and there are frames in the queue,
6371 * we need to go through the cleanup path to ensure that they're
6372 * marked as non-aggregate.
6375 DPRINTF(sc, ATH_DEBUG_NODE,
6376 "%s: %6D: reassoc; is_powersave=%d\n",
6380 an->an_is_powersave);
6382 /* XXX for now, we can't hold the lock across assoc */
6383 ath_tx_node_reassoc(sc, an);
6385 /* XXX for now, we can't hold the lock across wakeup */
6386 if (an->an_is_powersave)
6387 ath_tx_node_wakeup(sc, an);
6392 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
6393 int nchans, struct ieee80211_channel chans[])
6395 struct ath_softc *sc = ic->ic_ifp->if_softc;
6396 struct ath_hal *ah = sc->sc_ah;
6399 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6400 "%s: rd %u cc %u location %c%s\n",
6401 __func__, reg->regdomain, reg->country, reg->location,
6402 reg->ecm ? " ecm" : "");
6404 status = ath_hal_set_channels(ah, chans, nchans,
6405 reg->country, reg->regdomain);
6406 if (status != HAL_OK) {
6407 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
6409 return EINVAL; /* XXX */
6416 ath_getradiocaps(struct ieee80211com *ic,
6417 int maxchans, int *nchans, struct ieee80211_channel chans[])
6419 struct ath_softc *sc = ic->ic_ifp->if_softc;
6420 struct ath_hal *ah = sc->sc_ah;
6422 DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
6423 __func__, SKU_DEBUG, CTRY_DEFAULT);
6425 /* XXX check return */
6426 (void) ath_hal_getchannels(ah, chans, maxchans, nchans,
6427 HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
6432 ath_getchannels(struct ath_softc *sc)
6434 struct ifnet *ifp = sc->sc_ifp;
6435 struct ieee80211com *ic = ifp->if_l2com;
6436 struct ath_hal *ah = sc->sc_ah;
6440 * Collect channel set based on EEPROM contents.
6442 status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
6443 &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
6444 if (status != HAL_OK) {
6445 if_printf(ifp, "%s: unable to collect channel list from hal, "
6446 "status %d\n", __func__, status);
6449 (void) ath_hal_getregdomain(ah, &sc->sc_eerd);
6450 ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */
6451 /* XXX map Atheros sku's to net80211 SKU's */
6452 /* XXX net80211 types too small */
6453 ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
6454 ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
6455 ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */
6456 ic->ic_regdomain.isocc[1] = ' ';
6458 ic->ic_regdomain.ecm = 1;
6459 ic->ic_regdomain.location = 'I';
6461 DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
6462 "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
6463 __func__, sc->sc_eerd, sc->sc_eecc,
6464 ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
6465 ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
6470 ath_rate_setup(struct ath_softc *sc, u_int mode)
6472 struct ath_hal *ah = sc->sc_ah;
6473 const HAL_RATE_TABLE *rt;
6476 case IEEE80211_MODE_11A:
6477 rt = ath_hal_getratetable(ah, HAL_MODE_11A);
6479 case IEEE80211_MODE_HALF:
6480 rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
6482 case IEEE80211_MODE_QUARTER:
6483 rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
6485 case IEEE80211_MODE_11B:
6486 rt = ath_hal_getratetable(ah, HAL_MODE_11B);
6488 case IEEE80211_MODE_11G:
6489 rt = ath_hal_getratetable(ah, HAL_MODE_11G);
6491 case IEEE80211_MODE_TURBO_A:
6492 rt = ath_hal_getratetable(ah, HAL_MODE_108A);
6494 case IEEE80211_MODE_TURBO_G:
6495 rt = ath_hal_getratetable(ah, HAL_MODE_108G);
6497 case IEEE80211_MODE_STURBO_A:
6498 rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
6500 case IEEE80211_MODE_11NA:
6501 rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
6503 case IEEE80211_MODE_11NG:
6504 rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
6507 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
6511 sc->sc_rates[mode] = rt;
6512 return (rt != NULL);
6516 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
6518 #define N(a) (sizeof(a)/sizeof(a[0]))
6519 /* NB: on/off times from the Atheros NDIS driver, w/ permission */
6520 static const struct {
6521 u_int rate; /* tx/rx 802.11 rate */
6522 u_int16_t timeOn; /* LED on time (ms) */
6523 u_int16_t timeOff; /* LED off time (ms) */
6539 /* XXX half/quarter rates */
6541 const HAL_RATE_TABLE *rt;
6544 memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
6545 rt = sc->sc_rates[mode];
6546 KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
6547 for (i = 0; i < rt->rateCount; i++) {
6548 uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6549 if (rt->info[i].phy != IEEE80211_T_HT)
6550 sc->sc_rixmap[ieeerate] = i;
6552 sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
6554 memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
6555 for (i = 0; i < N(sc->sc_hwmap); i++) {
6556 if (i >= rt->rateCount) {
6557 sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
6558 sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
6561 sc->sc_hwmap[i].ieeerate =
6562 rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
6563 if (rt->info[i].phy == IEEE80211_T_HT)
6564 sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
6565 sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
6566 if (rt->info[i].shortPreamble ||
6567 rt->info[i].phy == IEEE80211_T_OFDM)
6568 sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
6569 sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
6570 for (j = 0; j < N(blinkrates)-1; j++)
6571 if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
6573 /* NB: this uses the last entry if the rate isn't found */
6574 /* XXX beware of overlow */
6575 sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
6576 sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
6578 sc->sc_currates = rt;
6579 sc->sc_curmode = mode;
6581 * All protection frames are transmited at 2Mb/s for
6582 * 11g, otherwise at 1Mb/s.
6584 if (mode == IEEE80211_MODE_11G)
6585 sc->sc_protrix = ath_tx_findrix(sc, 2*2);
6587 sc->sc_protrix = ath_tx_findrix(sc, 2*1);
6588 /* NB: caller is responsible for resetting rate control state */
6593 ath_watchdog(void *arg)
6595 struct ath_softc *sc = arg;
6598 ATH_LOCK_ASSERT(sc);
6600 if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6601 struct ifnet *ifp = sc->sc_ifp;
6604 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6606 if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6608 if_printf(ifp, "%s hang detected (0x%x)\n",
6609 hangs & 0xff ? "bb" : "mac", hangs);
6611 if_printf(ifp, "device timeout\n");
6613 #if defined(__DragonFly__)
6616 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
6618 sc->sc_stats.ast_watchdog++;
6620 ath_power_restore_power_state(sc);
6624 * We can't hold the lock across the ath_reset() call.
6626 * And since this routine can't hold a lock and sleep,
6627 * do the reset deferred.
6630 taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask);
6633 #if defined(__DragonFly__)
6634 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
6636 callout_schedule(&sc->sc_wd_ch, hz);
6640 #if defined(__DragonFly__)
6643 * (DragonFly network start)
6646 ath_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
6648 struct ath_softc *sc = ifp->if_softc;
6652 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
6653 wst = wlan_serialize_push();
6655 if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) {
6656 ifq_purge(&ifp->if_snd);
6657 wlan_serialize_pop(wst);
6660 ifq_set_oactive(&ifp->if_snd);
6662 m = ifq_dequeue(&ifp->if_snd);
6665 ath_transmit(ifp, m);
6667 ifq_clr_oactive(&ifp->if_snd);
6668 wlan_serialize_pop(wst);
6674 * Fetch the rate control statistics for the given node.
6677 ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs)
6679 struct ath_node *an;
6680 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
6681 struct ieee80211_node *ni;
6684 /* Perform a lookup on the given node */
6685 ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr);
6691 /* Lock the ath_node */
6695 /* Fetch the rate control stats for this node */
6696 error = ath_rate_fetch_node_stats(sc, an, rs);
6698 /* No matter what happens here, just drop through */
6700 /* Unlock the ath_node */
6701 ATH_NODE_UNLOCK(an);
6703 /* Unref the node */
6704 ieee80211_node_decref(ni);
6712 * Diagnostic interface to the HAL. This is used by various
6713 * tools to do things like retrieve register contents for
6714 * debugging. The mechanism is intentionally opaque so that
6715 * it can change frequently w/o concern for compatiblity.
6718 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6720 struct ath_hal *ah = sc->sc_ah;
6721 u_int id = ad->ad_id & ATH_DIAG_ID;
6722 void *indata = NULL;
6723 void *outdata = NULL;
6724 u_int32_t insize = ad->ad_in_size;
6725 u_int32_t outsize = ad->ad_out_size;
6728 if (ad->ad_id & ATH_DIAG_IN) {
6732 indata = kmalloc(insize, M_TEMP, M_INTWAIT);
6733 if (indata == NULL) {
6737 error = copyin(ad->ad_in_data, indata, insize);
6741 if (ad->ad_id & ATH_DIAG_DYN) {
6743 * Allocate a buffer for the results (otherwise the HAL
6744 * returns a pointer to a buffer where we can read the
6745 * results). Note that we depend on the HAL leaving this
6746 * pointer for us to use below in reclaiming the buffer;
6747 * may want to be more defensive.
6749 outdata = kmalloc(outsize, M_TEMP, M_INTWAIT);
6750 if (outdata == NULL) {
6758 if (id != HAL_DIAG_REGS)
6759 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6762 if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6763 if (outsize < ad->ad_out_size)
6764 ad->ad_out_size = outsize;
6765 if (outdata != NULL)
6766 error = copyout(outdata, ad->ad_out_data,
6773 if (id != HAL_DIAG_REGS)
6774 ath_power_restore_power_state(sc);
6778 if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6779 kfree(indata, M_TEMP);
6780 if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6781 kfree(outdata, M_TEMP);
6784 #endif /* ATH_DIAGAPI */
6786 #if defined(__DragonFly__)
6789 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,
6790 struct ucred *cred __unused)
6795 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6799 #define IS_RUNNING(ifp) \
6800 ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING))
6801 struct ath_softc *sc = ifp->if_softc;
6802 struct ieee80211com *ic = ifp->if_l2com;
6803 struct ifreq *ifr = (struct ifreq *)data;
6804 const HAL_RATE_TABLE *rt;
6809 if (IS_RUNNING(ifp)) {
6811 * To avoid rescanning another access point,
6812 * do not call ath_init() here. Instead,
6813 * only reflect promisc mode settings.
6816 ath_power_set_power_state(sc, HAL_PM_AWAKE);
6818 ath_power_restore_power_state(sc);
6820 } else if (ifp->if_flags & IFF_UP) {
6822 * Beware of being called during attach/detach
6823 * to reset promiscuous mode. In that case we
6824 * will still be marked UP but not RUNNING.
6825 * However trying to re-init the interface
6826 * is the wrong thing to do as we've already
6827 * torn down much of our state. There's
6828 * probably a better way to deal with this.
6830 if (!sc->sc_invalid)
6831 ath_init(sc); /* XXX lose error */
6834 ath_stop_locked(ifp);
6835 if (!sc->sc_invalid)
6836 ath_power_setpower(sc, HAL_PM_FULL_SLEEP);
6842 error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6845 /* NB: embed these numbers to get a consistent view */
6846 #if defined(__DragonFly__)
6847 sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6848 sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6850 sc->sc_stats.ast_tx_packets = ifp->if_get_counter(ifp,
6851 IFCOUNTER_OPACKETS);
6852 sc->sc_stats.ast_rx_packets = ifp->if_get_counter(ifp,
6853 IFCOUNTER_IPACKETS);
6855 sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6856 sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6857 #ifdef IEEE80211_SUPPORT_TDMA
6858 sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6859 sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6861 rt = sc->sc_currates;
6862 sc->sc_stats.ast_tx_rate =
6863 rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6864 if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT)
6865 sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS;
6866 error = copyout(&sc->sc_stats,
6867 ifr->ifr_data, sizeof (sc->sc_stats));
6869 case SIOCGATHAGSTATS:
6870 error = copyout(&sc->sc_aggr_stats,
6871 ifr->ifr_data, sizeof (sc->sc_aggr_stats));
6874 error = priv_check(curthread, PRIV_DRIVER);
6876 memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6877 memset(&sc->sc_aggr_stats, 0,
6878 sizeof(sc->sc_aggr_stats));
6879 memset(&sc->sc_intr_stats, 0,
6880 sizeof(sc->sc_intr_stats));
6885 error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6887 case SIOCGATHPHYERR:
6888 error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr);
6891 case SIOCGATHSPECTRAL:
6892 error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr);
6894 case SIOCGATHNODERATESTATS:
6895 error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr);
6898 error = ether_ioctl(ifp, cmd, data);
6909 * Announce various information on device/driver attach.
6912 ath_announce(struct ath_softc *sc)
6914 struct ifnet *ifp = sc->sc_ifp;
6915 struct ath_hal *ah = sc->sc_ah;
6917 if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6918 ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6919 ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6920 if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n",
6921 ah->ah_analog2GhzRev, ah->ah_analog5GhzRev);
6924 for (i = 0; i <= WME_AC_VO; i++) {
6925 struct ath_txq *txq = sc->sc_ac2q[i];
6926 if_printf(ifp, "Use hw queue %u for %s traffic\n",
6927 txq->axq_qnum, ieee80211_wme_acnames[i]);
6929 if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6930 sc->sc_cabq->axq_qnum);
6931 if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6933 if (ath_rxbuf != ATH_RXBUF)
6934 if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6935 if (ath_txbuf != ATH_TXBUF)
6936 if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6937 if (sc->sc_mcastkey && bootverbose)
6938 if_printf(ifp, "using multicast key search\n");
6942 ath_dfs_tasklet(void *p, int npending)
6944 struct ath_softc *sc = (struct ath_softc *) p;
6945 struct ifnet *ifp = sc->sc_ifp;
6946 struct ieee80211com *ic = ifp->if_l2com;
6949 * If previous processing has found a radar event,
6950 * signal this to the net80211 layer to begin DFS
6953 if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) {
6954 /* DFS event found, initiate channel change */
6956 * XXX doesn't currently tell us whether the event
6957 * XXX was found in the primary or extension
6961 ieee80211_dfs_notify_radar(ic, sc->sc_curchan);
6962 IEEE80211_UNLOCK(ic);
6967 * Enable/disable power save. This must be called with
6968 * no TX driver locks currently held, so it should only
6969 * be called from the RX path (which doesn't hold any
6973 ath_node_powersave(struct ieee80211_node *ni, int enable)
6976 struct ath_node *an = ATH_NODE(ni);
6977 struct ieee80211com *ic = ni->ni_ic;
6978 struct ath_softc *sc = ic->ic_ifp->if_softc;
6979 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
6981 /* XXX and no TXQ locks should be held here */
6983 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n",
6989 /* Suspend or resume software queue handling */
6991 ath_tx_node_sleep(sc, an);
6993 ath_tx_node_wakeup(sc, an);
6995 /* Update net80211 state */
6996 avp->av_node_ps(ni, enable);
6998 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
7000 /* Update net80211 state */
7001 avp->av_node_ps(ni, enable);
7002 #endif/* ATH_SW_PSQ */
7006 * Notification from net80211 that the powersave queue state has
7009 * Since the software queue also may have some frames:
7011 * + if the node software queue has frames and the TID state
7012 * is 0, we set the TIM;
7013 * + if the node and the stack are both empty, we clear the TIM bit.
7014 * + If the stack tries to set the bit, always set it.
7015 * + If the stack tries to clear the bit, only clear it if the
7016 * software queue in question is also cleared.
7018 * TODO: this is called during node teardown; so let's ensure this
7019 * is all correctly handled and that the TIM bit is cleared.
7020 * It may be that the node flush is called _AFTER_ the net80211
7021 * stack clears the TIM.
7023 * Here is the racy part. Since it's possible >1 concurrent,
7024 * overlapping TXes will appear complete with a TX completion in
7025 * another thread, it's possible that the concurrent TIM calls will
7026 * clash. We can't hold the node lock here because setting the
7027 * TIM grabs the net80211 comlock and this may cause a LOR.
7028 * The solution is either to totally serialise _everything_ at
7029 * this point (ie, all TX, completion and any reset/flush go into
7030 * one taskqueue) or a new "ath TIM lock" needs to be created that
7031 * just wraps the driver state change and this call to avp->av_set_tim().
7033 * The same race exists in the net80211 power save queue handling
7034 * as well. Since multiple transmitting threads may queue frames
7035 * into the driver, as well as ps-poll and the driver transmitting
7036 * frames (and thus clearing the psq), it's quite possible that
7037 * a packet entering the PSQ and a ps-poll being handled will
7038 * race, causing the TIM to be cleared and not re-set.
7041 ath_node_set_tim(struct ieee80211_node *ni, int enable)
7044 struct ieee80211com *ic = ni->ni_ic;
7045 struct ath_softc *sc = ic->ic_ifp->if_softc;
7046 struct ath_node *an = ATH_NODE(ni);
7047 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
7051 an->an_stack_psq = enable;
7054 * This will get called for all operating modes,
7055 * even if avp->av_set_tim is unset.
7056 * It's currently set for hostap/ibss modes; but
7057 * the same infrastructure is used for both STA
7058 * and AP/IBSS node power save.
7060 if (avp->av_set_tim == NULL) {
7066 * If setting the bit, always set it here.
7067 * If clearing the bit, only clear it if the
7068 * software queue is also empty.
7070 * If the node has left power save, just clear the TIM
7071 * bit regardless of the state of the power save queue.
7073 * XXX TODO: although atomics are used, it's quite possible
7074 * that a race will occur between this and setting/clearing
7075 * in another thread. TX completion will occur always in
7076 * one thread, however setting/clearing the TIM bit can come
7077 * from a variety of different process contexts!
7079 if (enable && an->an_tim_set == 1) {
7080 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7081 "%s: %6D: enable=%d, tim_set=1, ignoring\n",
7087 } else if (enable) {
7088 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7089 "%s: %6D: enable=%d, enabling TIM\n",
7096 changed = avp->av_set_tim(ni, enable);
7097 } else if (an->an_swq_depth == 0) {
7099 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7100 "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n",
7107 changed = avp->av_set_tim(ni, enable);
7108 } else if (! an->an_is_powersave) {
7110 * disable regardless; the node isn't in powersave now
7112 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7113 "%s: %6D: enable=%d, an_pwrsave=0, disabling\n",
7120 changed = avp->av_set_tim(ni, enable);
7123 * psq disable, node is currently in powersave, node
7124 * software queue isn't empty, so don't clear the TIM bit
7128 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7129 "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n",
7139 struct ath_vap *avp = ATH_VAP(ni->ni_vap);
7142 * Some operating modes don't set av_set_tim(), so don't
7145 if (avp->av_set_tim == NULL)
7148 return (avp->av_set_tim(ni, enable));
7149 #endif /* ATH_SW_PSQ */
7153 * Set or update the TIM from the software queue.
7155 * Check the software queue depth before attempting to do lock
7156 * anything; that avoids trying to obtain the lock. Then,
7157 * re-check afterwards to ensure nothing has changed in the
7160 * set: This is designed to be called from the TX path, after
7161 * a frame has been queued; to see if the swq > 0.
7163 * clear: This is designed to be called from the buffer completion point
7164 * (right now it's ath_tx_default_comp()) where the state of
7165 * a software queue has changed.
7167 * It makes sense to place it at buffer free / completion rather
7168 * than after each software queue operation, as there's no real
7169 * point in churning the TIM bit as the last frames in the software
7170 * queue are transmitted. If they fail and we retry them, we'd
7171 * just be setting the TIM bit again anyway.
7174 ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
7178 struct ath_node *an;
7179 struct ath_vap *avp;
7181 /* Don't do this for broadcast/etc frames */
7186 avp = ATH_VAP(ni->ni_vap);
7189 * And for operating modes without the TIM handler set, let's
7192 if (avp->av_set_tim == NULL)
7195 ATH_TX_LOCK_ASSERT(sc);
7198 if (an->an_is_powersave &&
7199 an->an_tim_set == 0 &&
7200 an->an_swq_depth != 0) {
7201 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7202 "%s: %6D: swq_depth>0, tim_set=0, set!\n",
7207 (void) avp->av_set_tim(ni, 1);
7211 * Don't bother grabbing the lock unless the queue is empty.
7213 if (an->an_swq_depth != 0)
7216 if (an->an_is_powersave &&
7217 an->an_stack_psq == 0 &&
7218 an->an_tim_set == 1 &&
7219 an->an_swq_depth == 0) {
7220 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7221 "%s: %6D: swq_depth=0, tim_set=1, psq_set=0,"
7227 (void) avp->av_set_tim(ni, 0);
7232 #endif /* ATH_SW_PSQ */
7236 * A device_printf() equivalent that does not require gcc hacks
7239 athdev_printf(device_t dev, const char *ctl, ...)
7244 retval = device_print_prettyname(dev);
7245 __va_start(va, ctl);
7246 retval += kvprintf(ctl, va);
7254 * Received a ps-poll frame from net80211.
7256 * Here we get a chance to serve out a software-queued frame ourselves
7257 * before we punt it to net80211 to transmit us one itself - either
7258 * because there's traffic in the net80211 psq, or a NULL frame to
7259 * indicate there's nothing else.
7262 ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
7265 struct ath_node *an;
7266 struct ath_vap *avp;
7267 struct ieee80211com *ic = ni->ni_ic;
7268 struct ath_softc *sc = ic->ic_ifp->if_softc;
7276 * Unassociated (temporary node) station.
7278 if (ni->ni_associd == 0)
7282 * We do have an active node, so let's begin looking into it.
7285 avp = ATH_VAP(ni->ni_vap);
7288 * For now, we just call the original ps-poll method.
7289 * Once we're ready to flip this on:
7291 * + Set leak to 1, as no matter what we're going to have
7293 * + Check the software queue and if there's something in it,
7294 * schedule the highest TID thas has traffic from this node.
7295 * Then make sure we schedule the software scheduler to
7296 * run so it picks up said frame.
7298 * That way whatever happens, we'll at least send _a_ frame
7299 * to the given node.
7301 * Again, yes, it's crappy QoS if the node has multiple
7302 * TIDs worth of traffic - but let's get it working first
7303 * before we optimise it.
7305 * Also yes, there's definitely latency here - we're not
7306 * direct dispatching to the hardware in this path (and
7307 * we're likely being called from the packet receive path,
7308 * so going back into TX may be a little hairy!) but again
7309 * I'd like to get this working first before optimising
7316 * Legacy - we're called and the node isn't asleep.
7319 if (! an->an_is_powersave) {
7320 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7321 "%s: %6D: not in powersave?\n",
7326 avp->av_recv_pspoll(ni, m);
7331 * We're in powersave.
7335 an->an_leak_count = 1;
7338 * Now, if there's no frames in the node, just punt to
7341 * Don't bother checking if the TIM bit is set, we really
7342 * only care if there are any frames here!
7344 if (an->an_swq_depth == 0) {
7346 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7347 "%s: %6D: SWQ empty; punting to net80211\n",
7351 avp->av_recv_pspoll(ni, m);
7356 * Ok, let's schedule the highest TID that has traffic
7357 * and then schedule something.
7359 for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) {
7360 struct ath_tid *atid = &an->an_tid[tid];
7364 if (atid->axq_depth == 0)
7366 ath_tx_tid_sched(sc, atid);
7368 * XXX we could do a direct call to the TXQ
7369 * scheduler code here to optimise latency
7370 * at the expense of a REALLY deep callstack.
7373 taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
7374 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7375 "%s: %6D: leaking frame to TID %d\n",
7386 * XXX nothing in the TIDs at this point? Eek.
7388 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
7389 "%s: %6D: TIDs empty, but ath_node showed traffic?!\n",
7393 avp->av_recv_pspoll(ni, m);
7395 avp->av_recv_pspoll(ni, m);
7396 #endif /* ATH_SW_PSQ */
7399 MODULE_VERSION(if_ath, 1);
7400 MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
7401 #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) || defined(ATH_DEBUG_ALQ)
7402 MODULE_DEPEND(if_ath, alq, 1, 1, 1);