| 1 | /*- |
| 2 | * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer, |
| 10 | * without modification. |
| 11 | * 2. Redistributions in binary form must reproduce at minimum a disclaimer |
| 12 | * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any |
| 13 | * redistribution must be conditioned upon including a substantially |
| 14 | * similar Disclaimer requirement for further binary redistribution. |
| 15 | * |
| 16 | * NO WARRANTY |
| 17 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 18 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 19 | * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY |
| 20 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL |
| 21 | * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, |
| 22 | * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 23 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 24 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER |
| 25 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 26 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF |
| 27 | * THE POSSIBILITY OF SUCH DAMAGES. |
| 28 | */ |
| 29 | |
| 30 | #include <sys/cdefs.h> |
| 31 | |
| 32 | /* |
| 33 | * Driver for the Atheros Wireless LAN controller. |
| 34 | * |
| 35 | * This software is derived from work of Atsushi Onoe; his contribution |
| 36 | * is greatly appreciated. |
| 37 | */ |
| 38 | |
| 39 | #include "opt_inet.h" |
| 40 | #include "opt_ath.h" |
| 41 | /* |
| 42 | * This is needed for register operations which are performed |
| 43 | * by the driver - eg, calls to ath_hal_gettsf32(). |
| 44 | * |
| 45 | * It's also required for any AH_DEBUG checks in here, eg the |
| 46 | * module dependencies. |
| 47 | */ |
| 48 | #include "opt_ah.h" |
| 49 | #include "opt_wlan.h" |
| 50 | |
| 51 | #include <sys/param.h> |
| 52 | #include <sys/systm.h> |
| 53 | #include <sys/sysctl.h> |
| 54 | #include <sys/mbuf.h> |
| 55 | #include <sys/malloc.h> |
| 56 | #include <sys/lock.h> |
| 57 | #include <sys/mutex.h> |
| 58 | #include <sys/kernel.h> |
| 59 | #include <sys/socket.h> |
| 60 | #include <sys/sockio.h> |
| 61 | #include <sys/errno.h> |
| 62 | #include <sys/callout.h> |
| 63 | #include <sys/bus.h> |
| 64 | #include <sys/endian.h> |
| 65 | #include <sys/kthread.h> |
| 66 | #include <sys/taskqueue.h> |
| 67 | #include <sys/priv.h> |
| 68 | #include <sys/module.h> |
| 69 | #include <sys/ktr.h> |
| 70 | |
| 71 | #include <net/if.h> |
| 72 | #include <net/if_var.h> |
| 73 | #include <net/if_dl.h> |
| 74 | #include <net/if_media.h> |
| 75 | #include <net/if_types.h> |
| 76 | #include <net/if_arp.h> |
| 77 | #include <net/ethernet.h> |
| 78 | #include <net/if_llc.h> |
| 79 | #include <net/ifq_var.h> |
| 80 | |
| 81 | #include <netproto/802_11/ieee80211_var.h> |
| 82 | #include <netproto/802_11/ieee80211_regdomain.h> |
| 83 | #ifdef IEEE80211_SUPPORT_SUPERG |
| 84 | #include <netproto/802_11/ieee80211_superg.h> |
| 85 | #endif |
| 86 | #ifdef IEEE80211_SUPPORT_TDMA |
| 87 | #include <netproto/802_11/ieee80211_tdma.h> |
| 88 | #endif |
| 89 | |
| 90 | #include <net/bpf.h> |
| 91 | |
| 92 | #ifdef INET |
| 93 | #include <netinet/in.h> |
| 94 | #include <netinet/if_ether.h> |
| 95 | #endif |
| 96 | |
| 97 | #include <dev/netif/ath/ath/if_athvar.h> |
| 98 | #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */ |
| 99 | #include <dev/netif/ath/ath_hal/ah_diagcodes.h> |
| 100 | |
| 101 | #include <dev/netif/ath/ath/if_ath_debug.h> |
| 102 | #include <dev/netif/ath/ath/if_ath_misc.h> |
| 103 | #include <dev/netif/ath/ath/if_ath_tsf.h> |
| 104 | #include <dev/netif/ath/ath/if_ath_tx.h> |
| 105 | #include <dev/netif/ath/ath/if_ath_sysctl.h> |
| 106 | #include <dev/netif/ath/ath/if_ath_led.h> |
| 107 | #include <dev/netif/ath/ath/if_ath_keycache.h> |
| 108 | #include <dev/netif/ath/ath/if_ath_rx.h> |
| 109 | #include <dev/netif/ath/ath/if_ath_rx_edma.h> |
| 110 | #include <dev/netif/ath/ath/if_ath_tx_edma.h> |
| 111 | #include <dev/netif/ath/ath/if_ath_beacon.h> |
| 112 | #include <dev/netif/ath/ath/if_ath_btcoex.h> |
| 113 | #include <dev/netif/ath/ath/if_ath_spectral.h> |
| 114 | #include <dev/netif/ath/ath/if_ath_lna_div.h> |
| 115 | #include <dev/netif/ath/ath/if_athdfs.h> |
| 116 | |
| 117 | #ifdef ATH_TX99_DIAG |
| 118 | #include <dev/netif/ath/ath_tx99/ath_tx99.h> |
| 119 | #endif |
| 120 | |
| 121 | #ifdef ATH_DEBUG_ALQ |
| 122 | #include <dev/netif/ath/ath/if_ath_alq.h> |
| 123 | #endif |
| 124 | |
| 125 | /* |
| 126 | * Only enable this if you're working on PS-POLL support. |
| 127 | */ |
| 128 | #define ATH_SW_PSQ |
| 129 | |
| 130 | #ifdef __DragonFly__ |
| 131 | #define CURVNET_SET(name) |
| 132 | #define CURVNET_RESTORE() |
| 133 | #endif |
| 134 | |
| 135 | /* |
| 136 | * ATH_BCBUF determines the number of vap's that can transmit |
| 137 | * beacons and also (currently) the number of vap's that can |
| 138 | * have unique mac addresses/bssid. When staggering beacons |
| 139 | * 4 is probably a good max as otherwise the beacons become |
| 140 | * very closely spaced and there is limited time for cab q traffic |
| 141 | * to go out. You can burst beacons instead but that is not good |
| 142 | * for stations in power save and at some point you really want |
| 143 | * another radio (and channel). |
| 144 | * |
| 145 | * The limit on the number of mac addresses is tied to our use of |
| 146 | * the U/L bit and tracking addresses in a byte; it would be |
| 147 | * worthwhile to allow more for applications like proxy sta. |
| 148 | */ |
| 149 | CTASSERT(ATH_BCBUF <= 8); |
| 150 | |
| 151 | static struct ieee80211vap *ath_vap_create(struct ieee80211com *, |
| 152 | const char [IFNAMSIZ], int, enum ieee80211_opmode, int, |
| 153 | const uint8_t [IEEE80211_ADDR_LEN], |
| 154 | const uint8_t [IEEE80211_ADDR_LEN]); |
| 155 | static void ath_vap_delete(struct ieee80211vap *); |
| 156 | static void ath_init(void *); |
| 157 | static void ath_stop_locked(struct ifnet *); |
| 158 | static void ath_stop(struct ifnet *); |
| 159 | static int ath_reset_vap(struct ieee80211vap *, u_long); |
| 160 | #if 0 |
| 161 | static int ath_transmit(struct ifnet *ifp, struct mbuf *m); |
| 162 | static void ath_qflush(struct ifnet *ifp); |
| 163 | #endif |
| 164 | static int ath_media_change(struct ifnet *); |
| 165 | static void ath_watchdog(void *); |
| 166 | static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); |
| 167 | static void ath_fatal_proc(void *, int); |
| 168 | static void ath_bmiss_vap(struct ieee80211vap *); |
| 169 | static void ath_bmiss_proc(void *, int); |
| 170 | static void ath_key_update_begin(struct ieee80211vap *); |
| 171 | static void ath_key_update_end(struct ieee80211vap *); |
| 172 | static void ath_update_mcast(struct ifnet *); |
| 173 | static void ath_update_promisc(struct ifnet *); |
| 174 | static void ath_updateslot(struct ifnet *); |
| 175 | static void ath_bstuck_proc(void *, int); |
| 176 | static void ath_reset_proc(void *, int); |
| 177 | static int ath_desc_alloc(struct ath_softc *); |
| 178 | static void ath_desc_free(struct ath_softc *); |
| 179 | static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, |
| 180 | const uint8_t [IEEE80211_ADDR_LEN]); |
| 181 | static void ath_node_cleanup(struct ieee80211_node *); |
| 182 | static void ath_node_free(struct ieee80211_node *); |
| 183 | static void ath_node_getsignal(const struct ieee80211_node *, |
| 184 | int8_t *, int8_t *); |
| 185 | static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); |
| 186 | static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); |
| 187 | static int ath_tx_setup(struct ath_softc *, int, int); |
| 188 | static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); |
| 189 | static void ath_tx_cleanup(struct ath_softc *); |
| 190 | static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, |
| 191 | int dosched); |
| 192 | static void ath_tx_proc_q0(void *, int); |
| 193 | static void ath_tx_proc_q0123(void *, int); |
| 194 | static void ath_tx_proc(void *, int); |
| 195 | static void ath_txq_sched_tasklet(void *, int); |
| 196 | static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); |
| 197 | static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); |
| 198 | static void ath_scan_start(struct ieee80211com *); |
| 199 | static void ath_scan_end(struct ieee80211com *); |
| 200 | static void ath_set_channel(struct ieee80211com *); |
| 201 | #ifdef ATH_ENABLE_11N |
| 202 | static void ath_update_chw(struct ieee80211com *); |
| 203 | #endif /* ATH_ENABLE_11N */ |
| 204 | static void ath_calibrate(void *); |
| 205 | static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); |
| 206 | static void ath_setup_stationkey(struct ieee80211_node *); |
| 207 | static void ath_newassoc(struct ieee80211_node *, int); |
| 208 | static int ath_setregdomain(struct ieee80211com *, |
| 209 | struct ieee80211_regdomain *, int, |
| 210 | struct ieee80211_channel []); |
| 211 | static void ath_getradiocaps(struct ieee80211com *, int, int *, |
| 212 | struct ieee80211_channel []); |
| 213 | static int ath_getchannels(struct ath_softc *); |
| 214 | |
| 215 | static int ath_rate_setup(struct ath_softc *, u_int mode); |
| 216 | static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); |
| 217 | |
| 218 | static void ath_announce(struct ath_softc *); |
| 219 | |
| 220 | static void ath_dfs_tasklet(void *, int); |
| 221 | #if 0 |
| 222 | static void ath_node_powersave(struct ieee80211_node *, int); |
| 223 | static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *); |
| 224 | #endif |
| 225 | static int ath_node_set_tim(struct ieee80211_node *, int); |
| 226 | |
| 227 | #ifdef IEEE80211_SUPPORT_TDMA |
| 228 | #include <dev/netif/ath/ath/if_ath_tdma.h> |
| 229 | #endif |
| 230 | |
| 231 | SYSCTL_DECL(_hw_ath); |
| 232 | |
| 233 | /* XXX validate sysctl values */ |
| 234 | static int ath_longcalinterval = 30; /* long cals every 30 secs */ |
| 235 | SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval, |
| 236 | 0, "long chip calibration interval (secs)"); |
| 237 | static int ath_shortcalinterval = 100; /* short cals every 100 ms */ |
| 238 | SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval, |
| 239 | 0, "short chip calibration interval (msecs)"); |
| 240 | static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */ |
| 241 | SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval, |
| 242 | 0, "reset chip calibration results (secs)"); |
| 243 | static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */ |
| 244 | SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval, |
| 245 | 0, "ANI calibration (msecs)"); |
| 246 | |
| 247 | int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ |
| 248 | SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, |
| 249 | 0, "rx buffers allocated"); |
| 250 | TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); |
| 251 | int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ |
| 252 | SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, |
| 253 | 0, "tx buffers allocated"); |
| 254 | TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); |
| 255 | int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */ |
| 256 | SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt, |
| 257 | 0, "tx (mgmt) buffers allocated"); |
| 258 | TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt); |
| 259 | |
| 260 | int ath_bstuck_threshold = 4; /* max missed beacons */ |
| 261 | SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold, |
| 262 | 0, "max missed beacon xmits before chip reset"); |
| 263 | |
| 264 | MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); |
| 265 | |
| 266 | void |
| 267 | ath_legacy_attach_comp_func(struct ath_softc *sc) |
| 268 | { |
| 269 | |
| 270 | /* |
| 271 | * Special case certain configurations. Note the |
| 272 | * CAB queue is handled by these specially so don't |
| 273 | * include them when checking the txq setup mask. |
| 274 | */ |
| 275 | switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) { |
| 276 | case 0x01: |
| 277 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); |
| 278 | break; |
| 279 | case 0x0f: |
| 280 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); |
| 281 | break; |
| 282 | default: |
| 283 | TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); |
| 284 | break; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | #define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20) |
| 289 | #define HAL_MODE_HT40 \ |
| 290 | (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \ |
| 291 | HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS) |
| 292 | int |
| 293 | ath_attach(u_int16_t devid, struct ath_softc *sc) |
| 294 | { |
| 295 | struct ifnet *ifp; |
| 296 | struct ieee80211com *ic; |
| 297 | struct ath_hal *ah = NULL; |
| 298 | HAL_STATUS status; |
| 299 | int error = 0, i; |
| 300 | u_int wmodes; |
| 301 | uint8_t macaddr[IEEE80211_ADDR_LEN]; |
| 302 | int rx_chainmask, tx_chainmask; |
| 303 | |
| 304 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); |
| 305 | |
| 306 | CURVNET_SET(vnet0); |
| 307 | ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); |
| 308 | if (ifp == NULL) { |
| 309 | device_printf(sc->sc_dev, "can not if_alloc()\n"); |
| 310 | error = ENOSPC; |
| 311 | CURVNET_RESTORE(); |
| 312 | goto bad; |
| 313 | } |
| 314 | ic = ifp->if_l2com; |
| 315 | |
| 316 | /* set these up early for if_printf use */ |
| 317 | if_initname(ifp, device_get_name(sc->sc_dev), |
| 318 | device_get_unit(sc->sc_dev)); |
| 319 | CURVNET_RESTORE(); |
| 320 | |
| 321 | /* prepare sysctl tree for use in sub modules */ |
| 322 | sysctl_ctx_init(&sc->sc_sysctl_ctx); |
| 323 | sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, |
| 324 | SYSCTL_STATIC_CHILDREN(_hw), |
| 325 | OID_AUTO, |
| 326 | device_get_nameunit(sc->sc_dev), |
| 327 | CTLFLAG_RD, 0, ""); |
| 328 | |
| 329 | |
| 330 | ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, |
| 331 | sc->sc_eepromdata, &status); |
| 332 | if (ah == NULL) { |
| 333 | if_printf(ifp, "unable to attach hardware; HAL status %u\n", |
| 334 | status); |
| 335 | error = ENXIO; |
| 336 | goto bad; |
| 337 | } |
| 338 | sc->sc_ah = ah; |
| 339 | sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ |
| 340 | #ifdef ATH_DEBUG |
| 341 | sc->sc_debug = ath_debug; |
| 342 | #endif |
| 343 | |
| 344 | /* |
| 345 | * Setup the DMA/EDMA functions based on the current |
| 346 | * hardware support. |
| 347 | * |
| 348 | * This is required before the descriptors are allocated. |
| 349 | */ |
| 350 | if (ath_hal_hasedma(sc->sc_ah)) { |
| 351 | sc->sc_isedma = 1; |
| 352 | ath_recv_setup_edma(sc); |
| 353 | ath_xmit_setup_edma(sc); |
| 354 | } else { |
| 355 | ath_recv_setup_legacy(sc); |
| 356 | ath_xmit_setup_legacy(sc); |
| 357 | } |
| 358 | |
| 359 | /* |
| 360 | * Check if the MAC has multi-rate retry support. |
| 361 | * We do this by trying to setup a fake extended |
| 362 | * descriptor. MAC's that don't have support will |
| 363 | * return false w/o doing anything. MAC's that do |
| 364 | * support it will return true w/o doing anything. |
| 365 | */ |
| 366 | sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); |
| 367 | |
| 368 | /* |
| 369 | * Check if the device has hardware counters for PHY |
| 370 | * errors. If so we need to enable the MIB interrupt |
| 371 | * so we can act on stat triggers. |
| 372 | */ |
| 373 | if (ath_hal_hwphycounters(ah)) |
| 374 | sc->sc_needmib = 1; |
| 375 | |
| 376 | /* |
| 377 | * Get the hardware key cache size. |
| 378 | */ |
| 379 | sc->sc_keymax = ath_hal_keycachesize(ah); |
| 380 | if (sc->sc_keymax > ATH_KEYMAX) { |
| 381 | if_printf(ifp, "Warning, using only %u of %u key cache slots\n", |
| 382 | ATH_KEYMAX, sc->sc_keymax); |
| 383 | sc->sc_keymax = ATH_KEYMAX; |
| 384 | } |
| 385 | /* |
| 386 | * Reset the key cache since some parts do not |
| 387 | * reset the contents on initial power up. |
| 388 | */ |
| 389 | for (i = 0; i < sc->sc_keymax; i++) |
| 390 | ath_hal_keyreset(ah, i); |
| 391 | |
| 392 | /* |
| 393 | * Collect the default channel list. |
| 394 | */ |
| 395 | error = ath_getchannels(sc); |
| 396 | if (error != 0) |
| 397 | goto bad; |
| 398 | |
| 399 | /* |
| 400 | * Setup rate tables for all potential media types. |
| 401 | */ |
| 402 | ath_rate_setup(sc, IEEE80211_MODE_11A); |
| 403 | ath_rate_setup(sc, IEEE80211_MODE_11B); |
| 404 | ath_rate_setup(sc, IEEE80211_MODE_11G); |
| 405 | ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); |
| 406 | ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); |
| 407 | ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); |
| 408 | ath_rate_setup(sc, IEEE80211_MODE_11NA); |
| 409 | ath_rate_setup(sc, IEEE80211_MODE_11NG); |
| 410 | ath_rate_setup(sc, IEEE80211_MODE_HALF); |
| 411 | ath_rate_setup(sc, IEEE80211_MODE_QUARTER); |
| 412 | |
| 413 | /* NB: setup here so ath_rate_update is happy */ |
| 414 | ath_setcurmode(sc, IEEE80211_MODE_11A); |
| 415 | |
| 416 | /* |
| 417 | * Allocate TX descriptors and populate the lists. |
| 418 | */ |
| 419 | wlan_assert_serialized(); |
| 420 | wlan_serialize_exit(); |
| 421 | error = ath_desc_alloc(sc); |
| 422 | wlan_serialize_enter(); |
| 423 | if (error != 0) { |
| 424 | if_printf(ifp, "failed to allocate TX descriptors: %d\n", |
| 425 | error); |
| 426 | goto bad; |
| 427 | } |
| 428 | error = ath_txdma_setup(sc); |
| 429 | if (error != 0) { |
| 430 | if_printf(ifp, "failed to allocate TX descriptors: %d\n", |
| 431 | error); |
| 432 | goto bad; |
| 433 | } |
| 434 | |
| 435 | /* |
| 436 | * Allocate RX descriptors and populate the lists. |
| 437 | */ |
| 438 | error = ath_rxdma_setup(sc); |
| 439 | if (error != 0) { |
| 440 | if_printf(ifp, "failed to allocate RX descriptors: %d\n", |
| 441 | error); |
| 442 | goto bad; |
| 443 | } |
| 444 | |
| 445 | callout_init_mp(&sc->sc_cal_ch); |
| 446 | callout_init_mp(&sc->sc_wd_ch); |
| 447 | |
| 448 | ATH_TXBUF_LOCK_INIT(sc); |
| 449 | |
| 450 | sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, |
| 451 | taskqueue_thread_enqueue, &sc->sc_tq); |
| 452 | taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1, |
| 453 | "%s taskq", ifp->if_xname); |
| 454 | |
| 455 | TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc); |
| 456 | TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); |
| 457 | TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); |
| 458 | TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc); |
| 459 | TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc); |
| 460 | TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); |
| 461 | |
| 462 | /* |
| 463 | * Allocate hardware transmit queues: one queue for |
| 464 | * beacon frames and one data queue for each QoS |
| 465 | * priority. Note that the hal handles resetting |
| 466 | * these queues at the needed time. |
| 467 | * |
| 468 | * XXX PS-Poll |
| 469 | */ |
| 470 | sc->sc_bhalq = ath_beaconq_setup(sc); |
| 471 | if (sc->sc_bhalq == (u_int) -1) { |
| 472 | if_printf(ifp, "unable to setup a beacon xmit queue!\n"); |
| 473 | error = EIO; |
| 474 | goto bad2; |
| 475 | } |
| 476 | sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); |
| 477 | if (sc->sc_cabq == NULL) { |
| 478 | if_printf(ifp, "unable to setup CAB xmit queue!\n"); |
| 479 | error = EIO; |
| 480 | goto bad2; |
| 481 | } |
| 482 | /* NB: insure BK queue is the lowest priority h/w queue */ |
| 483 | if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { |
| 484 | if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", |
| 485 | ieee80211_wme_acnames[WME_AC_BK]); |
| 486 | error = EIO; |
| 487 | goto bad2; |
| 488 | } |
| 489 | if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || |
| 490 | !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || |
| 491 | !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { |
| 492 | /* |
| 493 | * Not enough hardware tx queues to properly do WME; |
| 494 | * just punt and assign them all to the same h/w queue. |
| 495 | * We could do a better job of this if, for example, |
| 496 | * we allocate queues when we switch from station to |
| 497 | * AP mode. |
| 498 | */ |
| 499 | if (sc->sc_ac2q[WME_AC_VI] != NULL) |
| 500 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); |
| 501 | if (sc->sc_ac2q[WME_AC_BE] != NULL) |
| 502 | ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); |
| 503 | sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; |
| 504 | sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; |
| 505 | sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; |
| 506 | } |
| 507 | |
| 508 | /* |
| 509 | * Attach the TX completion function. |
| 510 | * |
| 511 | * The non-EDMA chips may have some special case optimisations; |
| 512 | * this method gives everyone a chance to attach cleanly. |
| 513 | */ |
| 514 | sc->sc_tx.xmit_attach_comp_func(sc); |
| 515 | |
| 516 | /* |
| 517 | * Setup rate control. Some rate control modules |
| 518 | * call back to change the anntena state so expose |
| 519 | * the necessary entry points. |
| 520 | * XXX maybe belongs in struct ath_ratectrl? |
| 521 | */ |
| 522 | sc->sc_setdefantenna = ath_setdefantenna; |
| 523 | sc->sc_rc = ath_rate_attach(sc); |
| 524 | if (sc->sc_rc == NULL) { |
| 525 | error = EIO; |
| 526 | goto bad2; |
| 527 | } |
| 528 | |
| 529 | /* Attach DFS module */ |
| 530 | if (! ath_dfs_attach(sc)) { |
| 531 | device_printf(sc->sc_dev, |
| 532 | "%s: unable to attach DFS\n", __func__); |
| 533 | error = EIO; |
| 534 | goto bad2; |
| 535 | } |
| 536 | |
| 537 | /* Attach spectral module */ |
| 538 | if (ath_spectral_attach(sc) < 0) { |
| 539 | device_printf(sc->sc_dev, |
| 540 | "%s: unable to attach spectral\n", __func__); |
| 541 | error = EIO; |
| 542 | goto bad2; |
| 543 | } |
| 544 | |
| 545 | /* Attach bluetooth coexistence module */ |
| 546 | if (ath_btcoex_attach(sc) < 0) { |
| 547 | device_printf(sc->sc_dev, |
| 548 | "%s: unable to attach bluetooth coexistence\n", __func__); |
| 549 | error = EIO; |
| 550 | goto bad2; |
| 551 | } |
| 552 | |
| 553 | /* Attach LNA diversity module */ |
| 554 | if (ath_lna_div_attach(sc) < 0) { |
| 555 | device_printf(sc->sc_dev, |
| 556 | "%s: unable to attach LNA diversity\n", __func__); |
| 557 | error = EIO; |
| 558 | goto bad2; |
| 559 | } |
| 560 | |
| 561 | /* Start DFS processing tasklet */ |
| 562 | TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc); |
| 563 | |
| 564 | /* Configure LED state */ |
| 565 | sc->sc_blinking = 0; |
| 566 | sc->sc_ledstate = 1; |
| 567 | sc->sc_ledon = 0; /* low true */ |
| 568 | sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ |
| 569 | callout_init_mp(&sc->sc_ledtimer); |
| 570 | |
| 571 | /* |
| 572 | * Don't setup hardware-based blinking. |
| 573 | * |
| 574 | * Although some NICs may have this configured in the |
| 575 | * default reset register values, the user may wish |
| 576 | * to alter which pins have which function. |
| 577 | * |
| 578 | * The reference driver attaches the MAC network LED to GPIO1 and |
| 579 | * the MAC power LED to GPIO2. However, the DWA-552 cardbus |
| 580 | * NIC has these reversed. |
| 581 | */ |
| 582 | sc->sc_hardled = (1 == 0); |
| 583 | sc->sc_led_net_pin = -1; |
| 584 | sc->sc_led_pwr_pin = -1; |
| 585 | /* |
| 586 | * Auto-enable soft led processing for IBM cards and for |
| 587 | * 5211 minipci cards. Users can also manually enable/disable |
| 588 | * support with a sysctl. |
| 589 | */ |
| 590 | sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); |
| 591 | ath_led_config(sc); |
| 592 | ath_hal_setledstate(ah, HAL_LED_INIT); |
| 593 | |
| 594 | ifp->if_softc = sc; |
| 595 | ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; |
| 596 | #if 0 |
| 597 | ifp->if_transmit = ath_transmit; |
| 598 | ifp->if_qflush = ath_qflush; |
| 599 | #endif |
| 600 | ifp->if_ioctl = ath_ioctl; |
| 601 | ifp->if_init = ath_init; |
| 602 | ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN); |
| 603 | #if 0 |
| 604 | ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; |
| 605 | IFQ_SET_READY(&ifp->if_snd); |
| 606 | #endif |
| 607 | |
| 608 | ic->ic_ifp = ifp; |
| 609 | /* XXX not right but it's not used anywhere important */ |
| 610 | ic->ic_phytype = IEEE80211_T_OFDM; |
| 611 | ic->ic_opmode = IEEE80211_M_STA; |
| 612 | ic->ic_caps = |
| 613 | IEEE80211_C_STA /* station mode */ |
| 614 | | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ |
| 615 | | IEEE80211_C_HOSTAP /* hostap mode */ |
| 616 | | IEEE80211_C_MONITOR /* monitor mode */ |
| 617 | | IEEE80211_C_AHDEMO /* adhoc demo mode */ |
| 618 | | IEEE80211_C_WDS /* 4-address traffic works */ |
| 619 | | IEEE80211_C_MBSS /* mesh point link mode */ |
| 620 | | IEEE80211_C_SHPREAMBLE /* short preamble supported */ |
| 621 | | IEEE80211_C_SHSLOT /* short slot time supported */ |
| 622 | | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ |
| 623 | #ifndef ATH_ENABLE_11N |
| 624 | | IEEE80211_C_BGSCAN /* capable of bg scanning */ |
| 625 | #endif |
| 626 | | IEEE80211_C_TXFRAG /* handle tx frags */ |
| 627 | #ifdef ATH_ENABLE_DFS |
| 628 | | IEEE80211_C_DFS /* Enable radar detection */ |
| 629 | #endif |
| 630 | ; |
| 631 | /* |
| 632 | * Query the hal to figure out h/w crypto support. |
| 633 | */ |
| 634 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) |
| 635 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; |
| 636 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) |
| 637 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; |
| 638 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) |
| 639 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; |
| 640 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) |
| 641 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; |
| 642 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { |
| 643 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; |
| 644 | /* |
| 645 | * Check if h/w does the MIC and/or whether the |
| 646 | * separate key cache entries are required to |
| 647 | * handle both tx+rx MIC keys. |
| 648 | */ |
| 649 | if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) |
| 650 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; |
| 651 | /* |
| 652 | * If the h/w supports storing tx+rx MIC keys |
| 653 | * in one cache slot automatically enable use. |
| 654 | */ |
| 655 | if (ath_hal_hastkipsplit(ah) || |
| 656 | !ath_hal_settkipsplit(ah, AH_FALSE)) |
| 657 | sc->sc_splitmic = 1; |
| 658 | /* |
| 659 | * If the h/w can do TKIP MIC together with WME then |
| 660 | * we use it; otherwise we force the MIC to be done |
| 661 | * in software by the net80211 layer. |
| 662 | */ |
| 663 | if (ath_hal_haswmetkipmic(ah)) |
| 664 | sc->sc_wmetkipmic = 1; |
| 665 | } |
| 666 | sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); |
| 667 | /* |
| 668 | * Check for multicast key search support. |
| 669 | */ |
| 670 | if (ath_hal_hasmcastkeysearch(sc->sc_ah) && |
| 671 | !ath_hal_getmcastkeysearch(sc->sc_ah)) { |
| 672 | ath_hal_setmcastkeysearch(sc->sc_ah, 1); |
| 673 | } |
| 674 | sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); |
| 675 | /* |
| 676 | * Mark key cache slots associated with global keys |
| 677 | * as in use. If we knew TKIP was not to be used we |
| 678 | * could leave the +32, +64, and +32+64 slots free. |
| 679 | */ |
| 680 | for (i = 0; i < IEEE80211_WEP_NKID; i++) { |
| 681 | setbit(sc->sc_keymap, i); |
| 682 | setbit(sc->sc_keymap, i+64); |
| 683 | if (sc->sc_splitmic) { |
| 684 | setbit(sc->sc_keymap, i+32); |
| 685 | setbit(sc->sc_keymap, i+32+64); |
| 686 | } |
| 687 | } |
| 688 | /* |
| 689 | * TPC support can be done either with a global cap or |
| 690 | * per-packet support. The latter is not available on |
| 691 | * all parts. We're a bit pedantic here as all parts |
| 692 | * support a global cap. |
| 693 | */ |
| 694 | if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) |
| 695 | ic->ic_caps |= IEEE80211_C_TXPMGT; |
| 696 | |
| 697 | /* |
| 698 | * Mark WME capability only if we have sufficient |
| 699 | * hardware queues to do proper priority scheduling. |
| 700 | */ |
| 701 | if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) |
| 702 | ic->ic_caps |= IEEE80211_C_WME; |
| 703 | /* |
| 704 | * Check for misc other capabilities. |
| 705 | */ |
| 706 | if (ath_hal_hasbursting(ah)) |
| 707 | ic->ic_caps |= IEEE80211_C_BURST; |
| 708 | sc->sc_hasbmask = ath_hal_hasbssidmask(ah); |
| 709 | sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah); |
| 710 | sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); |
| 711 | sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah); |
| 712 | sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah); |
| 713 | sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah); |
| 714 | sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah); |
| 715 | sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah); |
| 716 | |
| 717 | if (ath_hal_hasfastframes(ah)) |
| 718 | ic->ic_caps |= IEEE80211_C_FF; |
| 719 | wmodes = ath_hal_getwirelessmodes(ah); |
| 720 | if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO)) |
| 721 | ic->ic_caps |= IEEE80211_C_TURBOP; |
| 722 | #ifdef IEEE80211_SUPPORT_TDMA |
| 723 | if (ath_hal_macversion(ah) > 0x78) { |
| 724 | ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */ |
| 725 | ic->ic_tdma_update = ath_tdma_update; |
| 726 | } |
| 727 | #endif |
| 728 | |
| 729 | /* |
| 730 | * TODO: enforce that at least this many frames are available |
| 731 | * in the txbuf list before allowing data frames (raw or |
| 732 | * otherwise) to be transmitted. |
| 733 | */ |
| 734 | sc->sc_txq_data_minfree = 10; |
| 735 | /* |
| 736 | * Leave this as default to maintain legacy behaviour. |
| 737 | * Shortening the cabq/mcastq may end up causing some |
| 738 | * undesirable behaviour. |
| 739 | */ |
| 740 | sc->sc_txq_mcastq_maxdepth = ath_txbuf; |
| 741 | |
| 742 | /* |
| 743 | * How deep can the node software TX queue get whilst it's asleep. |
| 744 | */ |
| 745 | sc->sc_txq_node_psq_maxdepth = 16; |
| 746 | |
| 747 | /* |
| 748 | * Default the maximum queue depth for a given node |
| 749 | * to 1/4'th the TX buffers, or 64, whichever |
| 750 | * is larger. |
| 751 | */ |
| 752 | sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4); |
| 753 | |
| 754 | /* Enable CABQ by default */ |
| 755 | sc->sc_cabq_enable = 1; |
| 756 | |
| 757 | /* |
| 758 | * Allow the TX and RX chainmasks to be overridden by |
| 759 | * environment variables and/or device.hints. |
| 760 | * |
| 761 | * This must be done early - before the hardware is |
| 762 | * calibrated or before the 802.11n stream calculation |
| 763 | * is done. |
| 764 | */ |
| 765 | if (resource_int_value(device_get_name(sc->sc_dev), |
| 766 | device_get_unit(sc->sc_dev), "rx_chainmask", |
| 767 | &rx_chainmask) == 0) { |
| 768 | device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n", |
| 769 | rx_chainmask); |
| 770 | (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask); |
| 771 | } |
| 772 | if (resource_int_value(device_get_name(sc->sc_dev), |
| 773 | device_get_unit(sc->sc_dev), "tx_chainmask", |
| 774 | &tx_chainmask) == 0) { |
| 775 | device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n", |
| 776 | tx_chainmask); |
| 777 | (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask); |
| 778 | } |
| 779 | |
| 780 | /* |
| 781 | * Query the TX/RX chainmask configuration. |
| 782 | * |
| 783 | * This is only relevant for 11n devices. |
| 784 | */ |
| 785 | ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask); |
| 786 | ath_hal_gettxchainmask(ah, &sc->sc_txchainmask); |
| 787 | |
| 788 | /* |
| 789 | * Disable MRR with protected frames by default. |
| 790 | * Only 802.11n series NICs can handle this. |
| 791 | */ |
| 792 | sc->sc_mrrprot = 0; /* XXX should be a capability */ |
| 793 | |
| 794 | /* |
| 795 | * Query the enterprise mode information the HAL. |
| 796 | */ |
| 797 | if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0, |
| 798 | &sc->sc_ent_cfg) == HAL_OK) |
| 799 | sc->sc_use_ent = 1; |
| 800 | |
| 801 | #ifdef ATH_ENABLE_11N |
| 802 | /* |
| 803 | * Query HT capabilities |
| 804 | */ |
| 805 | if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK && |
| 806 | (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) { |
| 807 | uint32_t rxs, txs; |
| 808 | |
| 809 | device_printf(sc->sc_dev, "[HT] enabling HT modes\n"); |
| 810 | |
| 811 | sc->sc_mrrprot = 1; /* XXX should be a capability */ |
| 812 | |
| 813 | ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */ |
| 814 | | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */ |
| 815 | | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */ |
| 816 | | IEEE80211_HTCAP_MAXAMSDU_3839 |
| 817 | /* max A-MSDU length */ |
| 818 | | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */ |
| 819 | ; |
| 820 | |
| 821 | /* |
| 822 | * Enable short-GI for HT20 only if the hardware |
| 823 | * advertises support. |
| 824 | * Notably, anything earlier than the AR9287 doesn't. |
| 825 | */ |
| 826 | if ((ath_hal_getcapability(ah, |
| 827 | HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) && |
| 828 | (wmodes & HAL_MODE_HT20)) { |
| 829 | device_printf(sc->sc_dev, |
| 830 | "[HT] enabling short-GI in 20MHz mode\n"); |
| 831 | ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20; |
| 832 | } |
| 833 | |
| 834 | if (wmodes & HAL_MODE_HT40) |
| 835 | ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40 |
| 836 | | IEEE80211_HTCAP_SHORTGI40; |
| 837 | |
| 838 | /* |
| 839 | * TX/RX streams need to be taken into account when |
| 840 | * negotiating which MCS rates it'll receive and |
| 841 | * what MCS rates are available for TX. |
| 842 | */ |
| 843 | (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs); |
| 844 | (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs); |
| 845 | ic->ic_txstream = txs; |
| 846 | ic->ic_rxstream = rxs; |
| 847 | |
| 848 | /* |
| 849 | * Setup TX and RX STBC based on what the HAL allows and |
| 850 | * the currently configured chainmask set. |
| 851 | * Ie - don't enable STBC TX if only one chain is enabled. |
| 852 | * STBC RX is fine on a single RX chain; it just won't |
| 853 | * provide any real benefit. |
| 854 | */ |
| 855 | if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0, |
| 856 | NULL) == HAL_OK) { |
| 857 | sc->sc_rx_stbc = 1; |
| 858 | device_printf(sc->sc_dev, |
| 859 | "[HT] 1 stream STBC receive enabled\n"); |
| 860 | ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM; |
| 861 | } |
| 862 | if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0, |
| 863 | NULL) == HAL_OK) { |
| 864 | sc->sc_tx_stbc = 1; |
| 865 | device_printf(sc->sc_dev, |
| 866 | "[HT] 1 stream STBC transmit enabled\n"); |
| 867 | ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC; |
| 868 | } |
| 869 | |
| 870 | (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1, |
| 871 | &sc->sc_rts_aggr_limit); |
| 872 | if (sc->sc_rts_aggr_limit != (64 * 1024)) |
| 873 | device_printf(sc->sc_dev, |
| 874 | "[HT] RTS aggregates limited to %d KiB\n", |
| 875 | sc->sc_rts_aggr_limit / 1024); |
| 876 | |
| 877 | device_printf(sc->sc_dev, |
| 878 | "[HT] %d RX streams; %d TX streams\n", rxs, txs); |
| 879 | } |
| 880 | #endif |
| 881 | |
| 882 | /* |
| 883 | * Initial aggregation settings. |
| 884 | */ |
| 885 | sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH; |
| 886 | sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH; |
| 887 | sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW; |
| 888 | sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH; |
| 889 | sc->sc_aggr_limit = ATH_AGGR_MAXSIZE; |
| 890 | sc->sc_delim_min_pad = 0; |
| 891 | |
| 892 | /* |
| 893 | * Check if the hardware requires PCI register serialisation. |
| 894 | * Some of the Owl based MACs require this. |
| 895 | */ |
| 896 | if (ncpus > 1 && |
| 897 | ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR, |
| 898 | 0, NULL) == HAL_OK) { |
| 899 | sc->sc_ah->ah_config.ah_serialise_reg_war = 1; |
| 900 | device_printf(sc->sc_dev, |
| 901 | "Enabling register serialisation\n"); |
| 902 | } |
| 903 | |
| 904 | /* |
| 905 | * Initialise the deferred completed RX buffer list. |
| 906 | */ |
| 907 | TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); |
| 908 | TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); |
| 909 | |
| 910 | /* |
| 911 | * Indicate we need the 802.11 header padded to a |
| 912 | * 32-bit boundary for 4-address and QoS frames. |
| 913 | */ |
| 914 | ic->ic_flags |= IEEE80211_F_DATAPAD; |
| 915 | |
| 916 | /* |
| 917 | * Query the hal about antenna support. |
| 918 | */ |
| 919 | sc->sc_defant = ath_hal_getdefantenna(ah); |
| 920 | |
| 921 | /* |
| 922 | * Not all chips have the VEOL support we want to |
| 923 | * use with IBSS beacons; check here for it. |
| 924 | */ |
| 925 | sc->sc_hasveol = ath_hal_hasveol(ah); |
| 926 | |
| 927 | /* get mac address from hardware */ |
| 928 | ath_hal_getmac(ah, macaddr); |
| 929 | if (sc->sc_hasbmask) |
| 930 | ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); |
| 931 | |
| 932 | /* NB: used to size node table key mapping array */ |
| 933 | ic->ic_max_keyix = sc->sc_keymax; |
| 934 | /* call MI attach routine. */ |
| 935 | ieee80211_ifattach(ic, macaddr); |
| 936 | ic->ic_setregdomain = ath_setregdomain; |
| 937 | ic->ic_getradiocaps = ath_getradiocaps; |
| 938 | sc->sc_opmode = HAL_M_STA; |
| 939 | |
| 940 | /* override default methods */ |
| 941 | ic->ic_newassoc = ath_newassoc; |
| 942 | ic->ic_updateslot = ath_updateslot; |
| 943 | ic->ic_wme.wme_update = ath_wme_update; |
| 944 | ic->ic_vap_create = ath_vap_create; |
| 945 | ic->ic_vap_delete = ath_vap_delete; |
| 946 | ic->ic_raw_xmit = ath_raw_xmit; |
| 947 | ic->ic_update_mcast = ath_update_mcast; |
| 948 | ic->ic_update_promisc = ath_update_promisc; |
| 949 | ic->ic_node_alloc = ath_node_alloc; |
| 950 | sc->sc_node_free = ic->ic_node_free; |
| 951 | ic->ic_node_free = ath_node_free; |
| 952 | sc->sc_node_cleanup = ic->ic_node_cleanup; |
| 953 | ic->ic_node_cleanup = ath_node_cleanup; |
| 954 | ic->ic_node_getsignal = ath_node_getsignal; |
| 955 | ic->ic_scan_start = ath_scan_start; |
| 956 | ic->ic_scan_end = ath_scan_end; |
| 957 | ic->ic_set_channel = ath_set_channel; |
| 958 | #ifdef ATH_ENABLE_11N |
| 959 | /* 802.11n specific - but just override anyway */ |
| 960 | sc->sc_addba_request = ic->ic_addba_request; |
| 961 | sc->sc_addba_response = ic->ic_addba_response; |
| 962 | sc->sc_addba_stop = ic->ic_addba_stop; |
| 963 | sc->sc_bar_response = ic->ic_bar_response; |
| 964 | sc->sc_addba_response_timeout = ic->ic_addba_response_timeout; |
| 965 | |
| 966 | ic->ic_addba_request = ath_addba_request; |
| 967 | ic->ic_addba_response = ath_addba_response; |
| 968 | ic->ic_addba_response_timeout = ath_addba_response_timeout; |
| 969 | ic->ic_addba_stop = ath_addba_stop; |
| 970 | ic->ic_bar_response = ath_bar_response; |
| 971 | |
| 972 | ic->ic_update_chw = ath_update_chw; |
| 973 | #endif /* ATH_ENABLE_11N */ |
| 974 | |
| 975 | #ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT |
| 976 | /* |
| 977 | * There's one vendor bitmap entry in the RX radiotap |
| 978 | * header; make sure that's taken into account. |
| 979 | */ |
| 980 | ieee80211_radiotap_attachv(ic, |
| 981 | &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0, |
| 982 | ATH_TX_RADIOTAP_PRESENT, |
| 983 | &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1, |
| 984 | ATH_RX_RADIOTAP_PRESENT); |
| 985 | #else |
| 986 | /* |
| 987 | * No vendor bitmap/extensions are present. |
| 988 | */ |
| 989 | ieee80211_radiotap_attach(ic, |
| 990 | &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), |
| 991 | ATH_TX_RADIOTAP_PRESENT, |
| 992 | &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), |
| 993 | ATH_RX_RADIOTAP_PRESENT); |
| 994 | #endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */ |
| 995 | |
| 996 | /* |
| 997 | * Setup the ALQ logging if required |
| 998 | */ |
| 999 | #ifdef ATH_DEBUG_ALQ |
| 1000 | if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev)); |
| 1001 | if_ath_alq_setcfg(&sc->sc_alq, |
| 1002 | sc->sc_ah->ah_macVersion, |
| 1003 | sc->sc_ah->ah_macRev, |
| 1004 | sc->sc_ah->ah_phyRev, |
| 1005 | sc->sc_ah->ah_magic); |
| 1006 | #endif |
| 1007 | |
| 1008 | /* |
| 1009 | * Setup dynamic sysctl's now that country code and |
| 1010 | * regdomain are available from the hal. |
| 1011 | */ |
| 1012 | ath_sysctlattach(sc); |
| 1013 | ath_sysctl_stats_attach(sc); |
| 1014 | ath_sysctl_hal_attach(sc); |
| 1015 | |
| 1016 | if (bootverbose) |
| 1017 | ieee80211_announce(ic); |
| 1018 | ath_announce(sc); |
| 1019 | return 0; |
| 1020 | bad2: |
| 1021 | ath_tx_cleanup(sc); |
| 1022 | ath_desc_free(sc); |
| 1023 | ath_txdma_teardown(sc); |
| 1024 | ath_rxdma_teardown(sc); |
| 1025 | bad: |
| 1026 | if (ah) |
| 1027 | ath_hal_detach(ah); |
| 1028 | |
| 1029 | /* |
| 1030 | * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE.. |
| 1031 | */ |
| 1032 | #if !defined(__DragonFly__) |
| 1033 | if (ifp != NULL && ifp->if_vnet) { |
| 1034 | CURVNET_SET(ifp->if_vnet); |
| 1035 | if_free(ifp); |
| 1036 | CURVNET_RESTORE(); |
| 1037 | } else |
| 1038 | #endif |
| 1039 | if (ifp != NULL) |
| 1040 | if_free(ifp); |
| 1041 | sc->sc_invalid = 1; |
| 1042 | return error; |
| 1043 | } |
| 1044 | |
| 1045 | int |
| 1046 | ath_detach(struct ath_softc *sc) |
| 1047 | { |
| 1048 | struct ifnet *ifp = sc->sc_ifp; |
| 1049 | |
| 1050 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", |
| 1051 | __func__, ifp->if_flags); |
| 1052 | |
| 1053 | /* |
| 1054 | * NB: the order of these is important: |
| 1055 | * o stop the chip so no more interrupts will fire |
| 1056 | * o call the 802.11 layer before detaching the hal to |
| 1057 | * insure callbacks into the driver to delete global |
| 1058 | * key cache entries can be handled |
| 1059 | * o free the taskqueue which drains any pending tasks |
| 1060 | * o reclaim the tx queue data structures after calling |
| 1061 | * the 802.11 layer as we'll get called back to reclaim |
| 1062 | * node state and potentially want to use them |
| 1063 | * o to cleanup the tx queues the hal is called, so detach |
| 1064 | * it last |
| 1065 | * Other than that, it's straightforward... |
| 1066 | */ |
| 1067 | ath_stop(ifp); |
| 1068 | ieee80211_ifdetach(ifp->if_l2com); |
| 1069 | taskqueue_free(sc->sc_tq); |
| 1070 | #ifdef ATH_TX99_DIAG |
| 1071 | if (sc->sc_tx99 != NULL) |
| 1072 | sc->sc_tx99->detach(sc->sc_tx99); |
| 1073 | #endif |
| 1074 | ath_rate_detach(sc->sc_rc); |
| 1075 | #ifdef ATH_DEBUG_ALQ |
| 1076 | if_ath_alq_tidyup(&sc->sc_alq); |
| 1077 | #endif |
| 1078 | ath_lna_div_detach(sc); |
| 1079 | ath_btcoex_detach(sc); |
| 1080 | ath_spectral_detach(sc); |
| 1081 | ath_dfs_detach(sc); |
| 1082 | ath_desc_free(sc); |
| 1083 | ath_txdma_teardown(sc); |
| 1084 | ath_rxdma_teardown(sc); |
| 1085 | ath_tx_cleanup(sc); |
| 1086 | ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ |
| 1087 | |
| 1088 | CURVNET_SET(ifp->if_vnet); |
| 1089 | if_free(ifp); |
| 1090 | CURVNET_RESTORE(); |
| 1091 | |
| 1092 | if (sc->sc_sysctl_tree) { |
| 1093 | sysctl_ctx_free(&sc->sc_sysctl_ctx); |
| 1094 | sc->sc_sysctl_tree = NULL; |
| 1095 | } |
| 1096 | |
| 1097 | return 0; |
| 1098 | } |
| 1099 | |
| 1100 | /* |
| 1101 | * MAC address handling for multiple BSS on the same radio. |
| 1102 | * The first vap uses the MAC address from the EEPROM. For |
| 1103 | * subsequent vap's we set the U/L bit (bit 1) in the MAC |
| 1104 | * address and use the next six bits as an index. |
| 1105 | */ |
| 1106 | static void |
| 1107 | assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) |
| 1108 | { |
| 1109 | int i; |
| 1110 | |
| 1111 | if (clone && sc->sc_hasbmask) { |
| 1112 | /* NB: we only do this if h/w supports multiple bssid */ |
| 1113 | for (i = 0; i < 8; i++) |
| 1114 | if ((sc->sc_bssidmask & (1<<i)) == 0) |
| 1115 | break; |
| 1116 | if (i != 0) |
| 1117 | mac[0] |= (i << 2)|0x2; |
| 1118 | } else |
| 1119 | i = 0; |
| 1120 | sc->sc_bssidmask |= 1<<i; |
| 1121 | sc->sc_hwbssidmask[0] &= ~mac[0]; |
| 1122 | if (i == 0) |
| 1123 | sc->sc_nbssid0++; |
| 1124 | } |
| 1125 | |
| 1126 | static void |
| 1127 | reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) |
| 1128 | { |
| 1129 | int i = mac[0] >> 2; |
| 1130 | uint8_t mask; |
| 1131 | |
| 1132 | if (i != 0 || --sc->sc_nbssid0 == 0) { |
| 1133 | sc->sc_bssidmask &= ~(1<<i); |
| 1134 | /* recalculate bssid mask from remaining addresses */ |
| 1135 | mask = 0xff; |
| 1136 | for (i = 1; i < 8; i++) |
| 1137 | if (sc->sc_bssidmask & (1<<i)) |
| 1138 | mask &= ~((i<<2)|0x2); |
| 1139 | sc->sc_hwbssidmask[0] |= mask; |
| 1140 | } |
| 1141 | } |
| 1142 | |
| 1143 | /* |
| 1144 | * Assign a beacon xmit slot. We try to space out |
| 1145 | * assignments so when beacons are staggered the |
| 1146 | * traffic coming out of the cab q has maximal time |
| 1147 | * to go out before the next beacon is scheduled. |
| 1148 | */ |
| 1149 | static int |
| 1150 | assign_bslot(struct ath_softc *sc) |
| 1151 | { |
| 1152 | u_int slot, free; |
| 1153 | |
| 1154 | free = 0; |
| 1155 | for (slot = 0; slot < ATH_BCBUF; slot++) |
| 1156 | if (sc->sc_bslot[slot] == NULL) { |
| 1157 | if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && |
| 1158 | sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) |
| 1159 | return slot; |
| 1160 | free = slot; |
| 1161 | /* NB: keep looking for a double slot */ |
| 1162 | } |
| 1163 | return free; |
| 1164 | } |
| 1165 | |
| 1166 | static struct ieee80211vap * |
| 1167 | ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, |
| 1168 | enum ieee80211_opmode opmode, int flags, |
| 1169 | const uint8_t bssid[IEEE80211_ADDR_LEN], |
| 1170 | const uint8_t mac0[IEEE80211_ADDR_LEN]) |
| 1171 | { |
| 1172 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 1173 | struct ath_vap *avp; |
| 1174 | struct ieee80211vap *vap; |
| 1175 | uint8_t mac[IEEE80211_ADDR_LEN]; |
| 1176 | int needbeacon, error; |
| 1177 | enum ieee80211_opmode ic_opmode; |
| 1178 | |
| 1179 | avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap), |
| 1180 | M_80211_VAP, M_WAITOK | M_ZERO); |
| 1181 | needbeacon = 0; |
| 1182 | IEEE80211_ADDR_COPY(mac, mac0); |
| 1183 | |
| 1184 | ATH_LOCK(sc); |
| 1185 | ic_opmode = opmode; /* default to opmode of new vap */ |
| 1186 | switch (opmode) { |
| 1187 | case IEEE80211_M_STA: |
| 1188 | if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */ |
| 1189 | device_printf(sc->sc_dev, "only 1 sta vap supported\n"); |
| 1190 | goto bad; |
| 1191 | } |
| 1192 | if (sc->sc_nvaps) { |
| 1193 | /* |
| 1194 | * With multiple vaps we must fall back |
| 1195 | * to s/w beacon miss handling. |
| 1196 | */ |
| 1197 | flags |= IEEE80211_CLONE_NOBEACONS; |
| 1198 | } |
| 1199 | if (flags & IEEE80211_CLONE_NOBEACONS) { |
| 1200 | /* |
| 1201 | * Station mode w/o beacons are implemented w/ AP mode. |
| 1202 | */ |
| 1203 | ic_opmode = IEEE80211_M_HOSTAP; |
| 1204 | } |
| 1205 | break; |
| 1206 | case IEEE80211_M_IBSS: |
| 1207 | if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ |
| 1208 | device_printf(sc->sc_dev, |
| 1209 | "only 1 ibss vap supported\n"); |
| 1210 | goto bad; |
| 1211 | } |
| 1212 | needbeacon = 1; |
| 1213 | break; |
| 1214 | case IEEE80211_M_AHDEMO: |
| 1215 | #ifdef IEEE80211_SUPPORT_TDMA |
| 1216 | if (flags & IEEE80211_CLONE_TDMA) { |
| 1217 | if (sc->sc_nvaps != 0) { |
| 1218 | device_printf(sc->sc_dev, |
| 1219 | "only 1 tdma vap supported\n"); |
| 1220 | goto bad; |
| 1221 | } |
| 1222 | needbeacon = 1; |
| 1223 | flags |= IEEE80211_CLONE_NOBEACONS; |
| 1224 | } |
| 1225 | /* fall thru... */ |
| 1226 | #endif |
| 1227 | case IEEE80211_M_MONITOR: |
| 1228 | if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { |
| 1229 | /* |
| 1230 | * Adopt existing mode. Adding a monitor or ahdemo |
| 1231 | * vap to an existing configuration is of dubious |
| 1232 | * value but should be ok. |
| 1233 | */ |
| 1234 | /* XXX not right for monitor mode */ |
| 1235 | ic_opmode = ic->ic_opmode; |
| 1236 | } |
| 1237 | break; |
| 1238 | case IEEE80211_M_HOSTAP: |
| 1239 | case IEEE80211_M_MBSS: |
| 1240 | needbeacon = 1; |
| 1241 | break; |
| 1242 | case IEEE80211_M_WDS: |
| 1243 | if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) { |
| 1244 | device_printf(sc->sc_dev, |
| 1245 | "wds not supported in sta mode\n"); |
| 1246 | goto bad; |
| 1247 | } |
| 1248 | /* |
| 1249 | * Silently remove any request for a unique |
| 1250 | * bssid; WDS vap's always share the local |
| 1251 | * mac address. |
| 1252 | */ |
| 1253 | flags &= ~IEEE80211_CLONE_BSSID; |
| 1254 | if (sc->sc_nvaps == 0) |
| 1255 | ic_opmode = IEEE80211_M_HOSTAP; |
| 1256 | else |
| 1257 | ic_opmode = ic->ic_opmode; |
| 1258 | break; |
| 1259 | default: |
| 1260 | device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); |
| 1261 | goto bad; |
| 1262 | } |
| 1263 | /* |
| 1264 | * Check that a beacon buffer is available; the code below assumes it. |
| 1265 | */ |
| 1266 | if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) { |
| 1267 | device_printf(sc->sc_dev, "no beacon buffer available\n"); |
| 1268 | goto bad; |
| 1269 | } |
| 1270 | |
| 1271 | /* STA, AHDEMO? */ |
| 1272 | if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { |
| 1273 | assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); |
| 1274 | ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); |
| 1275 | } |
| 1276 | |
| 1277 | vap = &avp->av_vap; |
| 1278 | /* XXX can't hold mutex across if_alloc */ |
| 1279 | ATH_UNLOCK(sc); |
| 1280 | error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, |
| 1281 | bssid, mac); |
| 1282 | ATH_LOCK(sc); |
| 1283 | if (error != 0) { |
| 1284 | device_printf(sc->sc_dev, "%s: error %d creating vap\n", |
| 1285 | __func__, error); |
| 1286 | goto bad2; |
| 1287 | } |
| 1288 | |
| 1289 | /* h/w crypto support */ |
| 1290 | vap->iv_key_alloc = ath_key_alloc; |
| 1291 | vap->iv_key_delete = ath_key_delete; |
| 1292 | vap->iv_key_set = ath_key_set; |
| 1293 | vap->iv_key_update_begin = ath_key_update_begin; |
| 1294 | vap->iv_key_update_end = ath_key_update_end; |
| 1295 | |
| 1296 | /* override various methods */ |
| 1297 | avp->av_recv_mgmt = vap->iv_recv_mgmt; |
| 1298 | vap->iv_recv_mgmt = ath_recv_mgmt; |
| 1299 | vap->iv_reset = ath_reset_vap; |
| 1300 | vap->iv_update_beacon = ath_beacon_update; |
| 1301 | avp->av_newstate = vap->iv_newstate; |
| 1302 | vap->iv_newstate = ath_newstate; |
| 1303 | avp->av_bmiss = vap->iv_bmiss; |
| 1304 | vap->iv_bmiss = ath_bmiss_vap; |
| 1305 | |
| 1306 | #if 0 |
| 1307 | avp->av_node_ps = vap->iv_node_ps; |
| 1308 | vap->iv_node_ps = ath_node_powersave; |
| 1309 | #endif |
| 1310 | |
| 1311 | avp->av_set_tim = vap->iv_set_tim; |
| 1312 | vap->iv_set_tim = ath_node_set_tim; |
| 1313 | |
| 1314 | #if 0 |
| 1315 | avp->av_recv_pspoll = vap->iv_recv_pspoll; |
| 1316 | vap->iv_recv_pspoll = ath_node_recv_pspoll; |
| 1317 | #endif |
| 1318 | |
| 1319 | /* Set default parameters */ |
| 1320 | |
| 1321 | /* |
| 1322 | * Anything earlier than some AR9300 series MACs don't |
| 1323 | * support a smaller MPDU density. |
| 1324 | */ |
| 1325 | vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8; |
| 1326 | /* |
| 1327 | * All NICs can handle the maximum size, however |
| 1328 | * AR5416 based MACs can only TX aggregates w/ RTS |
| 1329 | * protection when the total aggregate size is <= 8k. |
| 1330 | * However, for now that's enforced by the TX path. |
| 1331 | */ |
| 1332 | vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; |
| 1333 | |
| 1334 | avp->av_bslot = -1; |
| 1335 | if (needbeacon) { |
| 1336 | /* |
| 1337 | * Allocate beacon state and setup the q for buffered |
| 1338 | * multicast frames. We know a beacon buffer is |
| 1339 | * available because we checked above. |
| 1340 | */ |
| 1341 | avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf); |
| 1342 | TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list); |
| 1343 | if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { |
| 1344 | /* |
| 1345 | * Assign the vap to a beacon xmit slot. As above |
| 1346 | * this cannot fail to find a free one. |
| 1347 | */ |
| 1348 | avp->av_bslot = assign_bslot(sc); |
| 1349 | KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, |
| 1350 | ("beacon slot %u not empty", avp->av_bslot)); |
| 1351 | sc->sc_bslot[avp->av_bslot] = vap; |
| 1352 | sc->sc_nbcnvaps++; |
| 1353 | } |
| 1354 | if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { |
| 1355 | /* |
| 1356 | * Multple vaps are to transmit beacons and we |
| 1357 | * have h/w support for TSF adjusting; enable |
| 1358 | * use of staggered beacons. |
| 1359 | */ |
| 1360 | sc->sc_stagbeacons = 1; |
| 1361 | } |
| 1362 | ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); |
| 1363 | } |
| 1364 | |
| 1365 | ic->ic_opmode = ic_opmode; |
| 1366 | if (opmode != IEEE80211_M_WDS) { |
| 1367 | sc->sc_nvaps++; |
| 1368 | if (opmode == IEEE80211_M_STA) |
| 1369 | sc->sc_nstavaps++; |
| 1370 | if (opmode == IEEE80211_M_MBSS) |
| 1371 | sc->sc_nmeshvaps++; |
| 1372 | } |
| 1373 | switch (ic_opmode) { |
| 1374 | case IEEE80211_M_IBSS: |
| 1375 | sc->sc_opmode = HAL_M_IBSS; |
| 1376 | break; |
| 1377 | case IEEE80211_M_STA: |
| 1378 | sc->sc_opmode = HAL_M_STA; |
| 1379 | break; |
| 1380 | case IEEE80211_M_AHDEMO: |
| 1381 | #ifdef IEEE80211_SUPPORT_TDMA |
| 1382 | if (vap->iv_caps & IEEE80211_C_TDMA) { |
| 1383 | sc->sc_tdma = 1; |
| 1384 | /* NB: disable tsf adjust */ |
| 1385 | sc->sc_stagbeacons = 0; |
| 1386 | } |
| 1387 | /* |
| 1388 | * NB: adhoc demo mode is a pseudo mode; to the hal it's |
| 1389 | * just ap mode. |
| 1390 | */ |
| 1391 | /* fall thru... */ |
| 1392 | #endif |
| 1393 | case IEEE80211_M_HOSTAP: |
| 1394 | case IEEE80211_M_MBSS: |
| 1395 | sc->sc_opmode = HAL_M_HOSTAP; |
| 1396 | break; |
| 1397 | case IEEE80211_M_MONITOR: |
| 1398 | sc->sc_opmode = HAL_M_MONITOR; |
| 1399 | break; |
| 1400 | default: |
| 1401 | /* XXX should not happen */ |
| 1402 | break; |
| 1403 | } |
| 1404 | if (sc->sc_hastsfadd) { |
| 1405 | /* |
| 1406 | * Configure whether or not TSF adjust should be done. |
| 1407 | */ |
| 1408 | ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); |
| 1409 | } |
| 1410 | if (flags & IEEE80211_CLONE_NOBEACONS) { |
| 1411 | /* |
| 1412 | * Enable s/w beacon miss handling. |
| 1413 | */ |
| 1414 | sc->sc_swbmiss = 1; |
| 1415 | } |
| 1416 | ATH_UNLOCK(sc); |
| 1417 | |
| 1418 | /* complete setup */ |
| 1419 | ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); |
| 1420 | return vap; |
| 1421 | bad2: |
| 1422 | reclaim_address(sc, mac); |
| 1423 | ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); |
| 1424 | bad: |
| 1425 | kfree(avp, M_80211_VAP); |
| 1426 | ATH_UNLOCK(sc); |
| 1427 | return NULL; |
| 1428 | } |
| 1429 | |
| 1430 | static void |
| 1431 | ath_vap_delete(struct ieee80211vap *vap) |
| 1432 | { |
| 1433 | struct ieee80211com *ic = vap->iv_ic; |
| 1434 | struct ifnet *ifp = ic->ic_ifp; |
| 1435 | struct ath_softc *sc = ifp->if_softc; |
| 1436 | struct ath_hal *ah = sc->sc_ah; |
| 1437 | struct ath_vap *avp = ATH_VAP(vap); |
| 1438 | |
| 1439 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); |
| 1440 | if (ifp->if_flags & IFF_RUNNING) { |
| 1441 | /* |
| 1442 | * Quiesce the hardware while we remove the vap. In |
| 1443 | * particular we need to reclaim all references to |
| 1444 | * the vap state by any frames pending on the tx queues. |
| 1445 | */ |
| 1446 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| 1447 | ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */ |
| 1448 | /* XXX Do all frames from all vaps/nodes need draining here? */ |
| 1449 | ath_stoprecv(sc, 1); /* stop recv side */ |
| 1450 | } |
| 1451 | |
| 1452 | ieee80211_vap_detach(vap); |
| 1453 | |
| 1454 | /* |
| 1455 | * XXX Danger Will Robinson! Danger! |
| 1456 | * |
| 1457 | * Because ieee80211_vap_detach() can queue a frame (the station |
| 1458 | * diassociate message?) after we've drained the TXQ and |
| 1459 | * flushed the software TXQ, we will end up with a frame queued |
| 1460 | * to a node whose vap is about to be freed. |
| 1461 | * |
| 1462 | * To work around this, flush the hardware/software again. |
| 1463 | * This may be racy - the ath task may be running and the packet |
| 1464 | * may be being scheduled between sw->hw txq. Tsk. |
| 1465 | * |
| 1466 | * TODO: figure out why a new node gets allocated somewhere around |
| 1467 | * here (after the ath_tx_swq() call; and after an ath_stop_locked() |
| 1468 | * call!) |
| 1469 | */ |
| 1470 | |
| 1471 | ath_draintxq(sc, ATH_RESET_DEFAULT); |
| 1472 | |
| 1473 | ATH_LOCK(sc); |
| 1474 | /* |
| 1475 | * Reclaim beacon state. Note this must be done before |
| 1476 | * the vap instance is reclaimed as we may have a reference |
| 1477 | * to it in the buffer for the beacon frame. |
| 1478 | */ |
| 1479 | if (avp->av_bcbuf != NULL) { |
| 1480 | if (avp->av_bslot != -1) { |
| 1481 | sc->sc_bslot[avp->av_bslot] = NULL; |
| 1482 | sc->sc_nbcnvaps--; |
| 1483 | } |
| 1484 | ath_beacon_return(sc, avp->av_bcbuf); |
| 1485 | avp->av_bcbuf = NULL; |
| 1486 | if (sc->sc_nbcnvaps == 0) { |
| 1487 | sc->sc_stagbeacons = 0; |
| 1488 | if (sc->sc_hastsfadd) |
| 1489 | ath_hal_settsfadjust(sc->sc_ah, 0); |
| 1490 | } |
| 1491 | /* |
| 1492 | * Reclaim any pending mcast frames for the vap. |
| 1493 | */ |
| 1494 | ath_tx_draintxq(sc, &avp->av_mcastq); |
| 1495 | } |
| 1496 | /* |
| 1497 | * Update bookkeeping. |
| 1498 | */ |
| 1499 | if (vap->iv_opmode == IEEE80211_M_STA) { |
| 1500 | sc->sc_nstavaps--; |
| 1501 | if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) |
| 1502 | sc->sc_swbmiss = 0; |
| 1503 | } else if (vap->iv_opmode == IEEE80211_M_HOSTAP || |
| 1504 | vap->iv_opmode == IEEE80211_M_MBSS) { |
| 1505 | reclaim_address(sc, vap->iv_myaddr); |
| 1506 | ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); |
| 1507 | if (vap->iv_opmode == IEEE80211_M_MBSS) |
| 1508 | sc->sc_nmeshvaps--; |
| 1509 | } |
| 1510 | if (vap->iv_opmode != IEEE80211_M_WDS) |
| 1511 | sc->sc_nvaps--; |
| 1512 | #ifdef IEEE80211_SUPPORT_TDMA |
| 1513 | /* TDMA operation ceases when the last vap is destroyed */ |
| 1514 | if (sc->sc_tdma && sc->sc_nvaps == 0) { |
| 1515 | sc->sc_tdma = 0; |
| 1516 | sc->sc_swbmiss = 0; |
| 1517 | } |
| 1518 | #endif |
| 1519 | kfree(avp, M_80211_VAP); |
| 1520 | |
| 1521 | if (ifp->if_flags & IFF_RUNNING) { |
| 1522 | /* |
| 1523 | * Restart rx+tx machines if still running (RUNNING will |
| 1524 | * be reset if we just destroyed the last vap). |
| 1525 | */ |
| 1526 | if (ath_startrecv(sc) != 0) |
| 1527 | if_printf(ifp, "%s: unable to restart recv logic\n", |
| 1528 | __func__); |
| 1529 | if (sc->sc_beacons) { /* restart beacons */ |
| 1530 | #ifdef IEEE80211_SUPPORT_TDMA |
| 1531 | if (sc->sc_tdma) |
| 1532 | ath_tdma_config(sc, NULL); |
| 1533 | else |
| 1534 | #endif |
| 1535 | ath_beacon_config(sc, NULL); |
| 1536 | } |
| 1537 | ath_hal_intrset(ah, sc->sc_imask); |
| 1538 | } |
| 1539 | ATH_UNLOCK(sc); |
| 1540 | } |
| 1541 | |
| 1542 | void |
| 1543 | ath_suspend(struct ath_softc *sc) |
| 1544 | { |
| 1545 | struct ifnet *ifp = sc->sc_ifp; |
| 1546 | struct ieee80211com *ic = ifp->if_l2com; |
| 1547 | |
| 1548 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", |
| 1549 | __func__, ifp->if_flags); |
| 1550 | |
| 1551 | sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; |
| 1552 | |
| 1553 | ieee80211_suspend_all(ic); |
| 1554 | /* |
| 1555 | * NB: don't worry about putting the chip in low power |
| 1556 | * mode; pci will power off our socket on suspend and |
| 1557 | * CardBus detaches the device. |
| 1558 | */ |
| 1559 | |
| 1560 | /* |
| 1561 | * XXX ensure none of the taskqueues are running |
| 1562 | * XXX ensure sc_invalid is 1 |
| 1563 | * XXX ensure the calibration callout is disabled |
| 1564 | */ |
| 1565 | |
| 1566 | /* Disable the PCIe PHY, complete with workarounds */ |
| 1567 | ath_hal_enablepcie(sc->sc_ah, 1, 1); |
| 1568 | } |
| 1569 | |
| 1570 | /* |
| 1571 | * Reset the key cache since some parts do not reset the |
| 1572 | * contents on resume. First we clear all entries, then |
| 1573 | * re-load keys that the 802.11 layer assumes are setup |
| 1574 | * in h/w. |
| 1575 | */ |
| 1576 | static void |
| 1577 | ath_reset_keycache(struct ath_softc *sc) |
| 1578 | { |
| 1579 | struct ifnet *ifp = sc->sc_ifp; |
| 1580 | struct ieee80211com *ic = ifp->if_l2com; |
| 1581 | struct ath_hal *ah = sc->sc_ah; |
| 1582 | int i; |
| 1583 | |
| 1584 | for (i = 0; i < sc->sc_keymax; i++) |
| 1585 | ath_hal_keyreset(ah, i); |
| 1586 | ieee80211_crypto_reload_keys(ic); |
| 1587 | } |
| 1588 | |
| 1589 | /* |
| 1590 | * Fetch the current chainmask configuration based on the current |
| 1591 | * operating channel and options. |
| 1592 | */ |
| 1593 | static void |
| 1594 | ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan) |
| 1595 | { |
| 1596 | |
| 1597 | /* |
| 1598 | * Set TX chainmask to the currently configured chainmask; |
| 1599 | * the TX chainmask depends upon the current operating mode. |
| 1600 | */ |
| 1601 | sc->sc_cur_rxchainmask = sc->sc_rxchainmask; |
| 1602 | if (IEEE80211_IS_CHAN_HT(chan)) { |
| 1603 | sc->sc_cur_txchainmask = sc->sc_txchainmask; |
| 1604 | } else { |
| 1605 | sc->sc_cur_txchainmask = 1; |
| 1606 | } |
| 1607 | |
| 1608 | DPRINTF(sc, ATH_DEBUG_RESET, |
| 1609 | "%s: TX chainmask is now 0x%x, RX is now 0x%x\n", |
| 1610 | __func__, |
| 1611 | sc->sc_cur_txchainmask, |
| 1612 | sc->sc_cur_rxchainmask); |
| 1613 | } |
| 1614 | |
| 1615 | void |
| 1616 | ath_resume(struct ath_softc *sc) |
| 1617 | { |
| 1618 | struct ifnet *ifp = sc->sc_ifp; |
| 1619 | struct ieee80211com *ic = ifp->if_l2com; |
| 1620 | struct ath_hal *ah = sc->sc_ah; |
| 1621 | HAL_STATUS status; |
| 1622 | |
| 1623 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", |
| 1624 | __func__, ifp->if_flags); |
| 1625 | |
| 1626 | /* Re-enable PCIe, re-enable the PCIe bus */ |
| 1627 | ath_hal_enablepcie(ah, 0, 0); |
| 1628 | |
| 1629 | /* |
| 1630 | * Must reset the chip before we reload the |
| 1631 | * keycache as we were powered down on suspend. |
| 1632 | */ |
| 1633 | ath_update_chainmasks(sc, |
| 1634 | sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan); |
| 1635 | ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, |
| 1636 | sc->sc_cur_rxchainmask); |
| 1637 | ath_hal_reset(ah, sc->sc_opmode, |
| 1638 | sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan, |
| 1639 | AH_FALSE, &status); |
| 1640 | ath_reset_keycache(sc); |
| 1641 | |
| 1642 | /* Let DFS at it in case it's a DFS channel */ |
| 1643 | ath_dfs_radar_enable(sc, ic->ic_curchan); |
| 1644 | |
| 1645 | /* Let spectral at in case spectral is enabled */ |
| 1646 | ath_spectral_enable(sc, ic->ic_curchan); |
| 1647 | |
| 1648 | /* |
| 1649 | * Let bluetooth coexistence at in case it's needed for this channel |
| 1650 | */ |
| 1651 | ath_btcoex_enable(sc, ic->ic_curchan); |
| 1652 | |
| 1653 | /* |
| 1654 | * If we're doing TDMA, enforce the TXOP limitation for chips that |
| 1655 | * support it. |
| 1656 | */ |
| 1657 | if (sc->sc_hasenforcetxop && sc->sc_tdma) |
| 1658 | ath_hal_setenforcetxop(sc->sc_ah, 1); |
| 1659 | else |
| 1660 | ath_hal_setenforcetxop(sc->sc_ah, 0); |
| 1661 | |
| 1662 | /* Restore the LED configuration */ |
| 1663 | ath_led_config(sc); |
| 1664 | ath_hal_setledstate(ah, HAL_LED_INIT); |
| 1665 | |
| 1666 | if (sc->sc_resume_up) |
| 1667 | ieee80211_resume_all(ic); |
| 1668 | |
| 1669 | /* XXX beacons ? */ |
| 1670 | } |
| 1671 | |
| 1672 | void |
| 1673 | ath_shutdown(struct ath_softc *sc) |
| 1674 | { |
| 1675 | struct ifnet *ifp = sc->sc_ifp; |
| 1676 | |
| 1677 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", |
| 1678 | __func__, ifp->if_flags); |
| 1679 | |
| 1680 | ath_stop(ifp); |
| 1681 | /* NB: no point powering down chip as we're about to reboot */ |
| 1682 | } |
| 1683 | |
| 1684 | /* |
| 1685 | * Interrupt handler. Most of the actual processing is deferred. |
| 1686 | */ |
| 1687 | void |
| 1688 | ath_intr(void *arg) |
| 1689 | { |
| 1690 | struct ath_softc *sc = arg; |
| 1691 | struct ifnet *ifp = sc->sc_ifp; |
| 1692 | struct ath_hal *ah = sc->sc_ah; |
| 1693 | HAL_INT status = 0; |
| 1694 | uint32_t txqs; |
| 1695 | |
| 1696 | /* |
| 1697 | * If we're inside a reset path, just print a warning and |
| 1698 | * clear the ISR. The reset routine will finish it for us. |
| 1699 | */ |
| 1700 | ATH_PCU_LOCK(sc); |
| 1701 | if (sc->sc_inreset_cnt) { |
| 1702 | HAL_INT status; |
| 1703 | ath_hal_getisr(ah, &status); /* clear ISR */ |
| 1704 | ath_hal_intrset(ah, 0); /* disable further intr's */ |
| 1705 | DPRINTF(sc, ATH_DEBUG_ANY, |
| 1706 | "%s: in reset, ignoring: status=0x%x\n", |
| 1707 | __func__, status); |
| 1708 | ATH_PCU_UNLOCK(sc); |
| 1709 | return; |
| 1710 | } |
| 1711 | |
| 1712 | if (sc->sc_invalid) { |
| 1713 | /* |
| 1714 | * The hardware is not ready/present, don't touch anything. |
| 1715 | * Note this can happen early on if the IRQ is shared. |
| 1716 | */ |
| 1717 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); |
| 1718 | ATH_PCU_UNLOCK(sc); |
| 1719 | return; |
| 1720 | } |
| 1721 | if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */ |
| 1722 | ATH_PCU_UNLOCK(sc); |
| 1723 | return; |
| 1724 | } |
| 1725 | |
| 1726 | if ((ifp->if_flags & IFF_UP) == 0 || |
| 1727 | (ifp->if_flags & IFF_RUNNING) == 0) { |
| 1728 | HAL_INT status; |
| 1729 | |
| 1730 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", |
| 1731 | __func__, ifp->if_flags); |
| 1732 | ath_hal_getisr(ah, &status); /* clear ISR */ |
| 1733 | ath_hal_intrset(ah, 0); /* disable further intr's */ |
| 1734 | ATH_PCU_UNLOCK(sc); |
| 1735 | return; |
| 1736 | } |
| 1737 | |
| 1738 | /* |
| 1739 | * Figure out the reason(s) for the interrupt. Note |
| 1740 | * that the hal returns a pseudo-ISR that may include |
| 1741 | * bits we haven't explicitly enabled so we mask the |
| 1742 | * value to insure we only process bits we requested. |
| 1743 | */ |
| 1744 | ath_hal_getisr(ah, &status); /* NB: clears ISR too */ |
| 1745 | DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); |
| 1746 | ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status); |
| 1747 | #ifdef ATH_DEBUG_ALQ |
| 1748 | if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate, |
| 1749 | ah->ah_syncstate); |
| 1750 | #endif /* ATH_DEBUG_ALQ */ |
| 1751 | #ifdef ATH_KTR_INTR_DEBUG |
| 1752 | ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5, |
| 1753 | "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x", |
| 1754 | ah->ah_intrstate[0], |
| 1755 | ah->ah_intrstate[1], |
| 1756 | ah->ah_intrstate[2], |
| 1757 | ah->ah_intrstate[3], |
| 1758 | ah->ah_intrstate[6]); |
| 1759 | #endif |
| 1760 | |
| 1761 | /* Squirrel away SYNC interrupt debugging */ |
| 1762 | if (ah->ah_syncstate != 0) { |
| 1763 | int i; |
| 1764 | for (i = 0; i < 32; i++) |
| 1765 | if (ah->ah_syncstate & (i << i)) |
| 1766 | sc->sc_intr_stats.sync_intr[i]++; |
| 1767 | } |
| 1768 | |
| 1769 | status &= sc->sc_imask; /* discard unasked for bits */ |
| 1770 | |
| 1771 | /* Short-circuit un-handled interrupts */ |
| 1772 | if (status == 0x0) { |
| 1773 | ATH_PCU_UNLOCK(sc); |
| 1774 | return; |
| 1775 | } |
| 1776 | |
| 1777 | /* |
| 1778 | * Take a note that we're inside the interrupt handler, so |
| 1779 | * the reset routines know to wait. |
| 1780 | */ |
| 1781 | sc->sc_intr_cnt++; |
| 1782 | ATH_PCU_UNLOCK(sc); |
| 1783 | |
| 1784 | /* |
| 1785 | * Handle the interrupt. We won't run concurrent with the reset |
| 1786 | * or channel change routines as they'll wait for sc_intr_cnt |
| 1787 | * to be 0 before continuing. |
| 1788 | */ |
| 1789 | if (status & HAL_INT_FATAL) { |
| 1790 | sc->sc_stats.ast_hardware++; |
| 1791 | ath_hal_intrset(ah, 0); /* disable intr's until reset */ |
| 1792 | taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask); |
| 1793 | } else { |
| 1794 | if (status & HAL_INT_SWBA) { |
| 1795 | /* |
| 1796 | * Software beacon alert--time to send a beacon. |
| 1797 | * Handle beacon transmission directly; deferring |
| 1798 | * this is too slow to meet timing constraints |
| 1799 | * under load. |
| 1800 | */ |
| 1801 | #ifdef IEEE80211_SUPPORT_TDMA |
| 1802 | if (sc->sc_tdma) { |
| 1803 | if (sc->sc_tdmaswba == 0) { |
| 1804 | struct ieee80211com *ic = ifp->if_l2com; |
| 1805 | struct ieee80211vap *vap = |
| 1806 | TAILQ_FIRST(&ic->ic_vaps); |
| 1807 | ath_tdma_beacon_send(sc, vap); |
| 1808 | sc->sc_tdmaswba = |
| 1809 | vap->iv_tdma->tdma_bintval; |
| 1810 | } else |
| 1811 | sc->sc_tdmaswba--; |
| 1812 | } else |
| 1813 | #endif |
| 1814 | { |
| 1815 | ath_beacon_proc(sc, 0); |
| 1816 | #ifdef IEEE80211_SUPPORT_SUPERG |
| 1817 | /* |
| 1818 | * Schedule the rx taskq in case there's no |
| 1819 | * traffic so any frames held on the staging |
| 1820 | * queue are aged and potentially flushed. |
| 1821 | */ |
| 1822 | sc->sc_rx.recv_sched(sc, 1); |
| 1823 | #endif |
| 1824 | } |
| 1825 | } |
| 1826 | if (status & HAL_INT_RXEOL) { |
| 1827 | int imask; |
| 1828 | ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL"); |
| 1829 | ATH_PCU_LOCK(sc); |
| 1830 | /* |
| 1831 | * NB: the hardware should re-read the link when |
| 1832 | * RXE bit is written, but it doesn't work at |
| 1833 | * least on older hardware revs. |
| 1834 | */ |
| 1835 | sc->sc_stats.ast_rxeol++; |
| 1836 | /* |
| 1837 | * Disable RXEOL/RXORN - prevent an interrupt |
| 1838 | * storm until the PCU logic can be reset. |
| 1839 | * In case the interface is reset some other |
| 1840 | * way before "sc_kickpcu" is called, don't |
| 1841 | * modify sc_imask - that way if it is reset |
| 1842 | * by a call to ath_reset() somehow, the |
| 1843 | * interrupt mask will be correctly reprogrammed. |
| 1844 | */ |
| 1845 | imask = sc->sc_imask; |
| 1846 | imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN); |
| 1847 | ath_hal_intrset(ah, imask); |
| 1848 | /* |
| 1849 | * Only blank sc_rxlink if we've not yet kicked |
| 1850 | * the PCU. |
| 1851 | * |
| 1852 | * This isn't entirely correct - the correct solution |
| 1853 | * would be to have a PCU lock and engage that for |
| 1854 | * the duration of the PCU fiddling; which would include |
| 1855 | * running the RX process. Otherwise we could end up |
| 1856 | * messing up the RX descriptor chain and making the |
| 1857 | * RX desc list much shorter. |
| 1858 | */ |
| 1859 | if (! sc->sc_kickpcu) |
| 1860 | sc->sc_rxlink = NULL; |
| 1861 | sc->sc_kickpcu = 1; |
| 1862 | ATH_PCU_UNLOCK(sc); |
| 1863 | /* |
| 1864 | * Enqueue an RX proc, to handled whatever |
| 1865 | * is in the RX queue. |
| 1866 | * This will then kick the PCU. |
| 1867 | */ |
| 1868 | sc->sc_rx.recv_sched(sc, 1); |
| 1869 | } |
| 1870 | if (status & HAL_INT_TXURN) { |
| 1871 | sc->sc_stats.ast_txurn++; |
| 1872 | /* bump tx trigger level */ |
| 1873 | ath_hal_updatetxtriglevel(ah, AH_TRUE); |
| 1874 | } |
| 1875 | /* |
| 1876 | * Handle both the legacy and RX EDMA interrupt bits. |
| 1877 | * Note that HAL_INT_RXLP is also HAL_INT_RXDESC. |
| 1878 | */ |
| 1879 | if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) { |
| 1880 | sc->sc_stats.ast_rx_intr++; |
| 1881 | sc->sc_rx.recv_sched(sc, 1); |
| 1882 | } |
| 1883 | if (status & HAL_INT_TX) { |
| 1884 | sc->sc_stats.ast_tx_intr++; |
| 1885 | /* |
| 1886 | * Grab all the currently set bits in the HAL txq bitmap |
| 1887 | * and blank them. This is the only place we should be |
| 1888 | * doing this. |
| 1889 | */ |
| 1890 | if (! sc->sc_isedma) { |
| 1891 | ATH_PCU_LOCK(sc); |
| 1892 | txqs = 0xffffffff; |
| 1893 | ath_hal_gettxintrtxqs(sc->sc_ah, &txqs); |
| 1894 | ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3, |
| 1895 | "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x", |
| 1896 | txqs, |
| 1897 | sc->sc_txq_active, |
| 1898 | sc->sc_txq_active | txqs); |
| 1899 | sc->sc_txq_active |= txqs; |
| 1900 | ATH_PCU_UNLOCK(sc); |
| 1901 | } |
| 1902 | taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); |
| 1903 | } |
| 1904 | if (status & HAL_INT_BMISS) { |
| 1905 | sc->sc_stats.ast_bmiss++; |
| 1906 | taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); |
| 1907 | } |
| 1908 | if (status & HAL_INT_GTT) |
| 1909 | sc->sc_stats.ast_tx_timeout++; |
| 1910 | if (status & HAL_INT_CST) |
| 1911 | sc->sc_stats.ast_tx_cst++; |
| 1912 | if (status & HAL_INT_MIB) { |
| 1913 | sc->sc_stats.ast_mib++; |
| 1914 | ATH_PCU_LOCK(sc); |
| 1915 | /* |
| 1916 | * Disable interrupts until we service the MIB |
| 1917 | * interrupt; otherwise it will continue to fire. |
| 1918 | */ |
| 1919 | ath_hal_intrset(ah, 0); |
| 1920 | /* |
| 1921 | * Let the hal handle the event. We assume it will |
| 1922 | * clear whatever condition caused the interrupt. |
| 1923 | */ |
| 1924 | ath_hal_mibevent(ah, &sc->sc_halstats); |
| 1925 | /* |
| 1926 | * Don't reset the interrupt if we've just |
| 1927 | * kicked the PCU, or we may get a nested |
| 1928 | * RXEOL before the rxproc has had a chance |
| 1929 | * to run. |
| 1930 | */ |
| 1931 | if (sc->sc_kickpcu == 0) |
| 1932 | ath_hal_intrset(ah, sc->sc_imask); |
| 1933 | ATH_PCU_UNLOCK(sc); |
| 1934 | } |
| 1935 | if (status & HAL_INT_RXORN) { |
| 1936 | /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */ |
| 1937 | ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN"); |
| 1938 | sc->sc_stats.ast_rxorn++; |
| 1939 | } |
| 1940 | } |
| 1941 | ATH_PCU_LOCK(sc); |
| 1942 | sc->sc_intr_cnt--; |
| 1943 | ATH_PCU_UNLOCK(sc); |
| 1944 | } |
| 1945 | |
| 1946 | static void |
| 1947 | ath_fatal_proc(void *arg, int pending) |
| 1948 | { |
| 1949 | struct ath_softc *sc = arg; |
| 1950 | struct ifnet *ifp = sc->sc_ifp; |
| 1951 | u_int32_t *state; |
| 1952 | u_int32_t len; |
| 1953 | void *sp; |
| 1954 | |
| 1955 | if_printf(ifp, "hardware error; resetting\n"); |
| 1956 | /* |
| 1957 | * Fatal errors are unrecoverable. Typically these |
| 1958 | * are caused by DMA errors. Collect h/w state from |
| 1959 | * the hal so we can diagnose what's going on. |
| 1960 | */ |
| 1961 | wlan_serialize_enter(); |
| 1962 | if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { |
| 1963 | KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); |
| 1964 | state = sp; |
| 1965 | if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", |
| 1966 | state[0], state[1] , state[2], state[3], |
| 1967 | state[4], state[5]); |
| 1968 | } |
| 1969 | ath_reset(ifp, ATH_RESET_NOLOSS); |
| 1970 | wlan_serialize_exit(); |
| 1971 | } |
| 1972 | |
| 1973 | static void |
| 1974 | ath_bmiss_vap(struct ieee80211vap *vap) |
| 1975 | { |
| 1976 | /* |
| 1977 | * Workaround phantom bmiss interrupts by sanity-checking |
| 1978 | * the time of our last rx'd frame. If it is within the |
| 1979 | * beacon miss interval then ignore the interrupt. If it's |
| 1980 | * truly a bmiss we'll get another interrupt soon and that'll |
| 1981 | * be dispatched up for processing. Note this applies only |
| 1982 | * for h/w beacon miss events. |
| 1983 | */ |
| 1984 | if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) { |
| 1985 | struct ifnet *ifp = vap->iv_ic->ic_ifp; |
| 1986 | struct ath_softc *sc = ifp->if_softc; |
| 1987 | u_int64_t lastrx = sc->sc_lastrx; |
| 1988 | u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); |
| 1989 | /* XXX should take a locked ref to iv_bss */ |
| 1990 | u_int bmisstimeout = |
| 1991 | vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; |
| 1992 | |
| 1993 | DPRINTF(sc, ATH_DEBUG_BEACON, |
| 1994 | "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", |
| 1995 | __func__, (unsigned long long) tsf, |
| 1996 | (unsigned long long)(tsf - lastrx), |
| 1997 | (unsigned long long) lastrx, bmisstimeout); |
| 1998 | |
| 1999 | if (tsf - lastrx <= bmisstimeout) { |
| 2000 | sc->sc_stats.ast_bmiss_phantom++; |
| 2001 | return; |
| 2002 | } |
| 2003 | } |
| 2004 | ATH_VAP(vap)->av_bmiss(vap); |
| 2005 | } |
| 2006 | |
| 2007 | int |
| 2008 | ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs) |
| 2009 | { |
| 2010 | uint32_t rsize; |
| 2011 | void *sp; |
| 2012 | |
| 2013 | if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize)) |
| 2014 | return 0; |
| 2015 | KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize)); |
| 2016 | *hangs = *(uint32_t *)sp; |
| 2017 | return 1; |
| 2018 | } |
| 2019 | |
| 2020 | static void |
| 2021 | ath_bmiss_proc(void *arg, int pending) |
| 2022 | { |
| 2023 | struct ath_softc *sc = arg; |
| 2024 | struct ifnet *ifp = sc->sc_ifp; |
| 2025 | uint32_t hangs; |
| 2026 | |
| 2027 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); |
| 2028 | |
| 2029 | /* |
| 2030 | * Do a reset upon any becaon miss event. |
| 2031 | * |
| 2032 | * It may be a non-recognised RX clear hang which needs a reset |
| 2033 | * to clear. |
| 2034 | */ |
| 2035 | wlan_serialize_enter(); |
| 2036 | if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) { |
| 2037 | ath_reset(ifp, ATH_RESET_NOLOSS); |
| 2038 | if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs); |
| 2039 | } else { |
| 2040 | ath_reset(ifp, ATH_RESET_NOLOSS); |
| 2041 | ieee80211_beacon_miss(ifp->if_l2com); |
| 2042 | } |
| 2043 | wlan_serialize_exit(); |
| 2044 | } |
| 2045 | |
| 2046 | /* |
| 2047 | * Handle TKIP MIC setup to deal hardware that doesn't do MIC |
| 2048 | * calcs together with WME. If necessary disable the crypto |
| 2049 | * hardware and mark the 802.11 state so keys will be setup |
| 2050 | * with the MIC work done in software. |
| 2051 | */ |
| 2052 | static void |
| 2053 | ath_settkipmic(struct ath_softc *sc) |
| 2054 | { |
| 2055 | struct ifnet *ifp = sc->sc_ifp; |
| 2056 | struct ieee80211com *ic = ifp->if_l2com; |
| 2057 | |
| 2058 | if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { |
| 2059 | if (ic->ic_flags & IEEE80211_F_WME) { |
| 2060 | ath_hal_settkipmic(sc->sc_ah, AH_FALSE); |
| 2061 | ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; |
| 2062 | } else { |
| 2063 | ath_hal_settkipmic(sc->sc_ah, AH_TRUE); |
| 2064 | ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; |
| 2065 | } |
| 2066 | } |
| 2067 | } |
| 2068 | |
| 2069 | static void |
| 2070 | ath_init(void *arg) |
| 2071 | { |
| 2072 | struct ath_softc *sc = (struct ath_softc *) arg; |
| 2073 | struct ifnet *ifp = sc->sc_ifp; |
| 2074 | struct ieee80211com *ic = ifp->if_l2com; |
| 2075 | struct ath_hal *ah = sc->sc_ah; |
| 2076 | HAL_STATUS status; |
| 2077 | |
| 2078 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", |
| 2079 | __func__, ifp->if_flags); |
| 2080 | |
| 2081 | ATH_LOCK(sc); |
| 2082 | /* |
| 2083 | * Stop anything previously setup. This is safe |
| 2084 | * whether this is the first time through or not. |
| 2085 | */ |
| 2086 | ath_stop_locked(ifp); |
| 2087 | |
| 2088 | /* |
| 2089 | * The basic interface to setting the hardware in a good |
| 2090 | * state is ``reset''. On return the hardware is known to |
| 2091 | * be powered up and with interrupts disabled. This must |
| 2092 | * be followed by initialization of the appropriate bits |
| 2093 | * and then setup of the interrupt mask. |
| 2094 | */ |
| 2095 | ath_settkipmic(sc); |
| 2096 | ath_update_chainmasks(sc, ic->ic_curchan); |
| 2097 | ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, |
| 2098 | sc->sc_cur_rxchainmask); |
| 2099 | if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) { |
| 2100 | if_printf(ifp, "unable to reset hardware; hal status %u\n", |
| 2101 | status); |
| 2102 | ATH_UNLOCK(sc); |
| 2103 | return; |
| 2104 | } |
| 2105 | ath_chan_change(sc, ic->ic_curchan); |
| 2106 | |
| 2107 | /* Let DFS at it in case it's a DFS channel */ |
| 2108 | ath_dfs_radar_enable(sc, ic->ic_curchan); |
| 2109 | |
| 2110 | /* Let spectral at in case spectral is enabled */ |
| 2111 | ath_spectral_enable(sc, ic->ic_curchan); |
| 2112 | |
| 2113 | /* |
| 2114 | * Let bluetooth coexistence at in case it's needed for this channel |
| 2115 | */ |
| 2116 | ath_btcoex_enable(sc, ic->ic_curchan); |
| 2117 | |
| 2118 | /* |
| 2119 | * If we're doing TDMA, enforce the TXOP limitation for chips that |
| 2120 | * support it. |
| 2121 | */ |
| 2122 | if (sc->sc_hasenforcetxop && sc->sc_tdma) |
| 2123 | ath_hal_setenforcetxop(sc->sc_ah, 1); |
| 2124 | else |
| 2125 | ath_hal_setenforcetxop(sc->sc_ah, 0); |
| 2126 | |
| 2127 | /* |
| 2128 | * Likewise this is set during reset so update |
| 2129 | * state cached in the driver. |
| 2130 | */ |
| 2131 | sc->sc_diversity = ath_hal_getdiversity(ah); |
| 2132 | sc->sc_lastlongcal = 0; |
| 2133 | sc->sc_resetcal = 1; |
| 2134 | sc->sc_lastcalreset = 0; |
| 2135 | sc->sc_lastani = 0; |
| 2136 | sc->sc_lastshortcal = 0; |
| 2137 | sc->sc_doresetcal = AH_FALSE; |
| 2138 | /* |
| 2139 | * Beacon timers were cleared here; give ath_newstate() |
| 2140 | * a hint that the beacon timers should be poked when |
| 2141 | * things transition to the RUN state. |
| 2142 | */ |
| 2143 | sc->sc_beacons = 0; |
| 2144 | |
| 2145 | /* |
| 2146 | * Setup the hardware after reset: the key cache |
| 2147 | * is filled as needed and the receive engine is |
| 2148 | * set going. Frame transmit is handled entirely |
| 2149 | * in the frame output path; there's nothing to do |
| 2150 | * here except setup the interrupt mask. |
| 2151 | */ |
| 2152 | if (ath_startrecv(sc) != 0) { |
| 2153 | if_printf(ifp, "unable to start recv logic\n"); |
| 2154 | ATH_UNLOCK(sc); |
| 2155 | return; |
| 2156 | } |
| 2157 | |
| 2158 | /* |
| 2159 | * Enable interrupts. |
| 2160 | */ |
| 2161 | sc->sc_imask = HAL_INT_RX | HAL_INT_TX |
| 2162 | | HAL_INT_RXEOL | HAL_INT_RXORN |
| 2163 | | HAL_INT_TXURN |
| 2164 | | HAL_INT_FATAL | HAL_INT_GLOBAL; |
| 2165 | |
| 2166 | /* |
| 2167 | * Enable RX EDMA bits. Note these overlap with |
| 2168 | * HAL_INT_RX and HAL_INT_RXDESC respectively. |
| 2169 | */ |
| 2170 | if (sc->sc_isedma) |
| 2171 | sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP); |
| 2172 | |
| 2173 | /* |
| 2174 | * Enable MIB interrupts when there are hardware phy counters. |
| 2175 | * Note we only do this (at the moment) for station mode. |
| 2176 | */ |
| 2177 | if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) |
| 2178 | sc->sc_imask |= HAL_INT_MIB; |
| 2179 | |
| 2180 | /* Enable global TX timeout and carrier sense timeout if available */ |
| 2181 | if (ath_hal_gtxto_supported(ah)) |
| 2182 | sc->sc_imask |= HAL_INT_GTT; |
| 2183 | |
| 2184 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n", |
| 2185 | __func__, sc->sc_imask); |
| 2186 | |
| 2187 | ifp->if_flags |= IFF_RUNNING; |
| 2188 | callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); |
| 2189 | ath_hal_intrset(ah, sc->sc_imask); |
| 2190 | |
| 2191 | ATH_UNLOCK(sc); |
| 2192 | |
| 2193 | #ifdef ATH_TX99_DIAG |
| 2194 | if (sc->sc_tx99 != NULL) |
| 2195 | sc->sc_tx99->start(sc->sc_tx99); |
| 2196 | else |
| 2197 | #endif |
| 2198 | ieee80211_start_all(ic); /* start all vap's */ |
| 2199 | } |
| 2200 | |
| 2201 | static void |
| 2202 | ath_stop_locked(struct ifnet *ifp) |
| 2203 | { |
| 2204 | struct ath_softc *sc = ifp->if_softc; |
| 2205 | struct ath_hal *ah = sc->sc_ah; |
| 2206 | |
| 2207 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", |
| 2208 | __func__, sc->sc_invalid, ifp->if_flags); |
| 2209 | |
| 2210 | ATH_LOCK_ASSERT(sc); |
| 2211 | if (ifp->if_flags & IFF_RUNNING) { |
| 2212 | /* |
| 2213 | * Shutdown the hardware and driver: |
| 2214 | * reset 802.11 state machine |
| 2215 | * turn off timers |
| 2216 | * disable interrupts |
| 2217 | * turn off the radio |
| 2218 | * clear transmit machinery |
| 2219 | * clear receive machinery |
| 2220 | * drain and release tx queues |
| 2221 | * reclaim beacon resources |
| 2222 | * power down hardware |
| 2223 | * |
| 2224 | * Note that some of this work is not possible if the |
| 2225 | * hardware is gone (invalid). |
| 2226 | */ |
| 2227 | #ifdef ATH_TX99_DIAG |
| 2228 | if (sc->sc_tx99 != NULL) |
| 2229 | sc->sc_tx99->stop(sc->sc_tx99); |
| 2230 | #endif |
| 2231 | callout_stop(&sc->sc_wd_ch); |
| 2232 | sc->sc_wd_timer = 0; |
| 2233 | ifp->if_flags &= ~IFF_RUNNING; |
| 2234 | if (!sc->sc_invalid) { |
| 2235 | if (sc->sc_softled) { |
| 2236 | callout_stop(&sc->sc_ledtimer); |
| 2237 | ath_hal_gpioset(ah, sc->sc_ledpin, |
| 2238 | !sc->sc_ledon); |
| 2239 | sc->sc_blinking = 0; |
| 2240 | } |
| 2241 | ath_hal_intrset(ah, 0); |
| 2242 | } |
| 2243 | ath_draintxq(sc, ATH_RESET_DEFAULT); |
| 2244 | if (!sc->sc_invalid) { |
| 2245 | ath_stoprecv(sc, 1); |
| 2246 | ath_hal_phydisable(ah); |
| 2247 | } else |
| 2248 | sc->sc_rxlink = NULL; |
| 2249 | ath_beacon_free(sc); /* XXX not needed */ |
| 2250 | } |
| 2251 | } |
| 2252 | |
| 2253 | #define MAX_TXRX_ITERATIONS 1000 |
| 2254 | static void |
| 2255 | ath_txrx_stop_locked(struct ath_softc *sc) |
| 2256 | { |
| 2257 | int i = MAX_TXRX_ITERATIONS; |
| 2258 | |
| 2259 | ATH_UNLOCK_ASSERT(sc); |
| 2260 | ATH_PCU_LOCK_ASSERT(sc); |
| 2261 | |
| 2262 | /* |
| 2263 | * Sleep until all the pending operations have completed. |
| 2264 | * |
| 2265 | * The caller must ensure that reset has been incremented |
| 2266 | * or the pending operations may continue being queued. |
| 2267 | */ |
| 2268 | while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt || |
| 2269 | sc->sc_txstart_cnt || sc->sc_intr_cnt) { |
| 2270 | if (i <= 0) |
| 2271 | break; |
| 2272 | wlan_serialize_sleep(sc, 0, "ath_txrx_stop", 1); |
| 2273 | i--; |
| 2274 | } |
| 2275 | |
| 2276 | if (i <= 0) |
| 2277 | device_printf(sc->sc_dev, |
| 2278 | "%s: didn't finish after %d iterations\n", |
| 2279 | __func__, MAX_TXRX_ITERATIONS); |
| 2280 | } |
| 2281 | #undef MAX_TXRX_ITERATIONS |
| 2282 | |
| 2283 | #if 0 |
| 2284 | static void |
| 2285 | ath_txrx_stop(struct ath_softc *sc) |
| 2286 | { |
| 2287 | ATH_UNLOCK_ASSERT(sc); |
| 2288 | ATH_PCU_UNLOCK_ASSERT(sc); |
| 2289 | |
| 2290 | ATH_PCU_LOCK(sc); |
| 2291 | ath_txrx_stop_locked(sc); |
| 2292 | ATH_PCU_UNLOCK(sc); |
| 2293 | } |
| 2294 | #endif |
| 2295 | |
| 2296 | static void |
| 2297 | ath_txrx_start(struct ath_softc *sc) |
| 2298 | { |
| 2299 | |
| 2300 | taskqueue_unblock(sc->sc_tq); |
| 2301 | } |
| 2302 | |
| 2303 | /* |
| 2304 | * Grab the reset lock, and wait around until noone else |
| 2305 | * is trying to do anything with it. |
| 2306 | * |
| 2307 | * This is totally horrible but we can't hold this lock for |
| 2308 | * long enough to do TX/RX or we end up with net80211/ip stack |
| 2309 | * LORs and eventual deadlock. |
| 2310 | * |
| 2311 | * "dowait" signals whether to spin, waiting for the reset |
| 2312 | * lock count to reach 0. This should (for now) only be used |
| 2313 | * during the reset path, as the rest of the code may not |
| 2314 | * be locking-reentrant enough to behave correctly. |
| 2315 | * |
| 2316 | * Another, cleaner way should be found to serialise all of |
| 2317 | * these operations. |
| 2318 | */ |
| 2319 | #define MAX_RESET_ITERATIONS 10 |
| 2320 | static int |
| 2321 | ath_reset_grablock(struct ath_softc *sc, int dowait) |
| 2322 | { |
| 2323 | int w = 0; |
| 2324 | int i = MAX_RESET_ITERATIONS; |
| 2325 | |
| 2326 | ATH_PCU_LOCK_ASSERT(sc); |
| 2327 | do { |
| 2328 | if (sc->sc_inreset_cnt == 0) { |
| 2329 | w = 1; |
| 2330 | break; |
| 2331 | } |
| 2332 | if (dowait == 0) { |
| 2333 | w = 0; |
| 2334 | break; |
| 2335 | } |
| 2336 | ATH_PCU_UNLOCK(sc); |
| 2337 | wlan_serialize_sleep(sc, 0, "ath_reset_grablock", 1); |
| 2338 | i--; |
| 2339 | ATH_PCU_LOCK(sc); |
| 2340 | } while (i > 0); |
| 2341 | |
| 2342 | /* |
| 2343 | * We always increment the refcounter, regardless |
| 2344 | * of whether we succeeded to get it in an exclusive |
| 2345 | * way. |
| 2346 | */ |
| 2347 | sc->sc_inreset_cnt++; |
| 2348 | |
| 2349 | if (i <= 0) |
| 2350 | device_printf(sc->sc_dev, |
| 2351 | "%s: didn't finish after %d iterations\n", |
| 2352 | __func__, MAX_RESET_ITERATIONS); |
| 2353 | |
| 2354 | if (w == 0) |
| 2355 | device_printf(sc->sc_dev, |
| 2356 | "%s: warning, recursive reset path!\n", |
| 2357 | __func__); |
| 2358 | |
| 2359 | return w; |
| 2360 | } |
| 2361 | #undef MAX_RESET_ITERATIONS |
| 2362 | |
| 2363 | /* |
| 2364 | * XXX TODO: write ath_reset_releaselock |
| 2365 | */ |
| 2366 | |
| 2367 | static void |
| 2368 | ath_stop(struct ifnet *ifp) |
| 2369 | { |
| 2370 | struct ath_softc *sc __unused = ifp->if_softc; |
| 2371 | |
| 2372 | ATH_LOCK(sc); |
| 2373 | ath_stop_locked(ifp); |
| 2374 | ATH_UNLOCK(sc); |
| 2375 | } |
| 2376 | |
| 2377 | /* |
| 2378 | * Reset the hardware w/o losing operational state. This is |
| 2379 | * basically a more efficient way of doing ath_stop, ath_init, |
| 2380 | * followed by state transitions to the current 802.11 |
| 2381 | * operational state. Used to recover from various errors and |
| 2382 | * to reset or reload hardware state. |
| 2383 | */ |
| 2384 | int |
| 2385 | ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type) |
| 2386 | { |
| 2387 | struct ath_softc *sc = ifp->if_softc; |
| 2388 | struct ieee80211com *ic = ifp->if_l2com; |
| 2389 | struct ath_hal *ah = sc->sc_ah; |
| 2390 | HAL_STATUS status; |
| 2391 | int i; |
| 2392 | |
| 2393 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__); |
| 2394 | |
| 2395 | /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */ |
| 2396 | ATH_PCU_UNLOCK_ASSERT(sc); |
| 2397 | ATH_UNLOCK_ASSERT(sc); |
| 2398 | |
| 2399 | /* Try to (stop any further TX/RX from occuring */ |
| 2400 | taskqueue_block(sc->sc_tq); |
| 2401 | |
| 2402 | ATH_PCU_LOCK(sc); |
| 2403 | |
| 2404 | /* |
| 2405 | * Grab the reset lock before TX/RX is stopped. |
| 2406 | * |
| 2407 | * This is needed to ensure that when the TX/RX actually does finish, |
| 2408 | * no further TX/RX/reset runs in parallel with this. |
| 2409 | */ |
| 2410 | if (ath_reset_grablock(sc, 1) == 0) { |
| 2411 | device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", |
| 2412 | __func__); |
| 2413 | } |
| 2414 | |
| 2415 | /* disable interrupts */ |
| 2416 | ath_hal_intrset(ah, 0); |
| 2417 | |
| 2418 | /* |
| 2419 | * Now, ensure that any in progress TX/RX completes before we |
| 2420 | * continue. |
| 2421 | */ |
| 2422 | ath_txrx_stop_locked(sc); |
| 2423 | |
| 2424 | ATH_PCU_UNLOCK(sc); |
| 2425 | |
| 2426 | /* |
| 2427 | * Should now wait for pending TX/RX to complete |
| 2428 | * and block future ones from occuring. This needs to be |
| 2429 | * done before the TX queue is drained. |
| 2430 | */ |
| 2431 | ath_draintxq(sc, reset_type); /* stop xmit side */ |
| 2432 | |
| 2433 | /* |
| 2434 | * Regardless of whether we're doing a no-loss flush or |
| 2435 | * not, stop the PCU and handle what's in the RX queue. |
| 2436 | * That way frames aren't dropped which shouldn't be. |
| 2437 | */ |
| 2438 | ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS)); |
| 2439 | ath_rx_flush(sc); |
| 2440 | |
| 2441 | ath_settkipmic(sc); /* configure TKIP MIC handling */ |
| 2442 | /* NB: indicate channel change so we do a full reset */ |
| 2443 | ath_update_chainmasks(sc, ic->ic_curchan); |
| 2444 | ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, |
| 2445 | sc->sc_cur_rxchainmask); |
| 2446 | if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status)) |
| 2447 | if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", |
| 2448 | __func__, status); |
| 2449 | sc->sc_diversity = ath_hal_getdiversity(ah); |
| 2450 | |
| 2451 | /* Let DFS at it in case it's a DFS channel */ |
| 2452 | ath_dfs_radar_enable(sc, ic->ic_curchan); |
| 2453 | |
| 2454 | /* Let spectral at in case spectral is enabled */ |
| 2455 | ath_spectral_enable(sc, ic->ic_curchan); |
| 2456 | |
| 2457 | /* |
| 2458 | * Let bluetooth coexistence at in case it's needed for this channel |
| 2459 | */ |
| 2460 | ath_btcoex_enable(sc, ic->ic_curchan); |
| 2461 | |
| 2462 | /* |
| 2463 | * If we're doing TDMA, enforce the TXOP limitation for chips that |
| 2464 | * support it. |
| 2465 | */ |
| 2466 | if (sc->sc_hasenforcetxop && sc->sc_tdma) |
| 2467 | ath_hal_setenforcetxop(sc->sc_ah, 1); |
| 2468 | else |
| 2469 | ath_hal_setenforcetxop(sc->sc_ah, 0); |
| 2470 | |
| 2471 | if (ath_startrecv(sc) != 0) /* restart recv */ |
| 2472 | if_printf(ifp, "%s: unable to start recv logic\n", __func__); |
| 2473 | /* |
| 2474 | * We may be doing a reset in response to an ioctl |
| 2475 | * that changes the channel so update any state that |
| 2476 | * might change as a result. |
| 2477 | */ |
| 2478 | ath_chan_change(sc, ic->ic_curchan); |
| 2479 | if (sc->sc_beacons) { /* restart beacons */ |
| 2480 | #ifdef IEEE80211_SUPPORT_TDMA |
| 2481 | if (sc->sc_tdma) |
| 2482 | ath_tdma_config(sc, NULL); |
| 2483 | else |
| 2484 | #endif |
| 2485 | ath_beacon_config(sc, NULL); |
| 2486 | } |
| 2487 | |
| 2488 | /* |
| 2489 | * Release the reset lock and re-enable interrupts here. |
| 2490 | * If an interrupt was being processed in ath_intr(), |
| 2491 | * it would disable interrupts at this point. So we have |
| 2492 | * to atomically enable interrupts and decrement the |
| 2493 | * reset counter - this way ath_intr() doesn't end up |
| 2494 | * disabling interrupts without a corresponding enable |
| 2495 | * in the rest or channel change path. |
| 2496 | */ |
| 2497 | ATH_PCU_LOCK(sc); |
| 2498 | sc->sc_inreset_cnt--; |
| 2499 | /* XXX only do this if sc_inreset_cnt == 0? */ |
| 2500 | ath_hal_intrset(ah, sc->sc_imask); |
| 2501 | ATH_PCU_UNLOCK(sc); |
| 2502 | |
| 2503 | /* |
| 2504 | * TX and RX can be started here. If it were started with |
| 2505 | * sc_inreset_cnt > 0, the TX and RX path would abort. |
| 2506 | * Thus if this is a nested call through the reset or |
| 2507 | * channel change code, TX completion will occur but |
| 2508 | * RX completion and ath_start / ath_tx_start will not |
| 2509 | * run. |
| 2510 | */ |
| 2511 | |
| 2512 | /* Restart TX/RX as needed */ |
| 2513 | ath_txrx_start(sc); |
| 2514 | |
| 2515 | /* Restart TX completion and pending TX */ |
| 2516 | if (reset_type == ATH_RESET_NOLOSS) { |
| 2517 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { |
| 2518 | if (ATH_TXQ_SETUP(sc, i)) { |
| 2519 | ATH_TXQ_LOCK(&sc->sc_txq[i]); |
| 2520 | ath_txq_restart_dma(sc, &sc->sc_txq[i]); |
| 2521 | ATH_TXQ_UNLOCK(&sc->sc_txq[i]); |
| 2522 | |
| 2523 | ATH_TX_LOCK(sc); |
| 2524 | ath_txq_sched(sc, &sc->sc_txq[i]); |
| 2525 | ATH_TX_UNLOCK(sc); |
| 2526 | } |
| 2527 | } |
| 2528 | } |
| 2529 | |
| 2530 | /* |
| 2531 | * This may have been set during an ath_start() call which |
| 2532 | * set this once it detected a concurrent TX was going on. |
| 2533 | * So, clear it. |
| 2534 | */ |
| 2535 | IF_LOCK(&ifp->if_snd); |
| 2536 | ifq_clr_oactive(&ifp->if_snd); |
| 2537 | IF_UNLOCK(&ifp->if_snd); |
| 2538 | |
| 2539 | /* Handle any frames in the TX queue */ |
| 2540 | /* |
| 2541 | * XXX should this be done by the caller, rather than |
| 2542 | * ath_reset() ? |
| 2543 | */ |
| 2544 | ath_tx_kick(sc); /* restart xmit */ |
| 2545 | return 0; |
| 2546 | } |
| 2547 | |
| 2548 | static int |
| 2549 | ath_reset_vap(struct ieee80211vap *vap, u_long cmd) |
| 2550 | { |
| 2551 | struct ieee80211com *ic = vap->iv_ic; |
| 2552 | struct ifnet *ifp = ic->ic_ifp; |
| 2553 | struct ath_softc *sc = ifp->if_softc; |
| 2554 | struct ath_hal *ah = sc->sc_ah; |
| 2555 | |
| 2556 | switch (cmd) { |
| 2557 | case IEEE80211_IOC_TXPOWER: |
| 2558 | /* |
| 2559 | * If per-packet TPC is enabled, then we have nothing |
| 2560 | * to do; otherwise we need to force the global limit. |
| 2561 | * All this can happen directly; no need to reset. |
| 2562 | */ |
| 2563 | if (!ath_hal_gettpc(ah)) |
| 2564 | ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); |
| 2565 | return 0; |
| 2566 | } |
| 2567 | /* XXX? Full or NOLOSS? */ |
| 2568 | return ath_reset(ifp, ATH_RESET_FULL); |
| 2569 | } |
| 2570 | |
| 2571 | struct ath_buf * |
| 2572 | _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype) |
| 2573 | { |
| 2574 | struct ath_buf *bf; |
| 2575 | |
| 2576 | ATH_TXBUF_LOCK_ASSERT(sc); |
| 2577 | |
| 2578 | if (btype == ATH_BUFTYPE_MGMT) |
| 2579 | bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt); |
| 2580 | else |
| 2581 | bf = TAILQ_FIRST(&sc->sc_txbuf); |
| 2582 | |
| 2583 | if (bf == NULL) { |
| 2584 | sc->sc_stats.ast_tx_getnobuf++; |
| 2585 | } else { |
| 2586 | if (bf->bf_flags & ATH_BUF_BUSY) { |
| 2587 | sc->sc_stats.ast_tx_getbusybuf++; |
| 2588 | bf = NULL; |
| 2589 | } |
| 2590 | } |
| 2591 | |
| 2592 | if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) { |
| 2593 | if (btype == ATH_BUFTYPE_MGMT) |
| 2594 | TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list); |
| 2595 | else { |
| 2596 | TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list); |
| 2597 | sc->sc_txbuf_cnt--; |
| 2598 | |
| 2599 | /* |
| 2600 | * This shuldn't happen; however just to be |
| 2601 | * safe print a warning and fudge the txbuf |
| 2602 | * count. |
| 2603 | */ |
| 2604 | if (sc->sc_txbuf_cnt < 0) { |
| 2605 | device_printf(sc->sc_dev, |
| 2606 | "%s: sc_txbuf_cnt < 0?\n", |
| 2607 | __func__); |
| 2608 | sc->sc_txbuf_cnt = 0; |
| 2609 | } |
| 2610 | } |
| 2611 | } else |
| 2612 | bf = NULL; |
| 2613 | |
| 2614 | if (bf == NULL) { |
| 2615 | /* XXX should check which list, mgmt or otherwise */ |
| 2616 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__, |
| 2617 | TAILQ_FIRST(&sc->sc_txbuf) == NULL ? |
| 2618 | "out of xmit buffers" : "xmit buffer busy"); |
| 2619 | return NULL; |
| 2620 | } |
| 2621 | |
| 2622 | /* XXX TODO: should do this at buffer list initialisation */ |
| 2623 | /* XXX (then, ensure the buffer has the right flag set) */ |
| 2624 | bf->bf_flags = 0; |
| 2625 | if (btype == ATH_BUFTYPE_MGMT) |
| 2626 | bf->bf_flags |= ATH_BUF_MGMT; |
| 2627 | else |
| 2628 | bf->bf_flags &= (~ATH_BUF_MGMT); |
| 2629 | |
| 2630 | /* Valid bf here; clear some basic fields */ |
| 2631 | bf->bf_next = NULL; /* XXX just to be sure */ |
| 2632 | bf->bf_last = NULL; /* XXX again, just to be sure */ |
| 2633 | bf->bf_comp = NULL; /* XXX again, just to be sure */ |
| 2634 | bzero(&bf->bf_state, sizeof(bf->bf_state)); |
| 2635 | |
| 2636 | /* |
| 2637 | * Track the descriptor ID only if doing EDMA |
| 2638 | */ |
| 2639 | if (sc->sc_isedma) { |
| 2640 | bf->bf_descid = sc->sc_txbuf_descid; |
| 2641 | sc->sc_txbuf_descid++; |
| 2642 | } |
| 2643 | |
| 2644 | return bf; |
| 2645 | } |
| 2646 | |
| 2647 | /* |
| 2648 | * When retrying a software frame, buffers marked ATH_BUF_BUSY |
| 2649 | * can't be thrown back on the queue as they could still be |
| 2650 | * in use by the hardware. |
| 2651 | * |
| 2652 | * This duplicates the buffer, or returns NULL. |
| 2653 | * |
| 2654 | * The descriptor is also copied but the link pointers and |
| 2655 | * the DMA segments aren't copied; this frame should thus |
| 2656 | * be again passed through the descriptor setup/chain routines |
| 2657 | * so the link is correct. |
| 2658 | * |
| 2659 | * The caller must free the buffer using ath_freebuf(). |
| 2660 | */ |
| 2661 | struct ath_buf * |
| 2662 | ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf) |
| 2663 | { |
| 2664 | struct ath_buf *tbf; |
| 2665 | |
| 2666 | tbf = ath_getbuf(sc, |
| 2667 | (bf->bf_flags & ATH_BUF_MGMT) ? |
| 2668 | ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL); |
| 2669 | if (tbf == NULL) |
| 2670 | return NULL; /* XXX failure? Why? */ |
| 2671 | |
| 2672 | /* Copy basics */ |
| 2673 | tbf->bf_next = NULL; |
| 2674 | tbf->bf_nseg = bf->bf_nseg; |
| 2675 | tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE; |
| 2676 | tbf->bf_status = bf->bf_status; |
| 2677 | tbf->bf_m = bf->bf_m; |
| 2678 | tbf->bf_node = bf->bf_node; |
| 2679 | /* will be setup by the chain/setup function */ |
| 2680 | tbf->bf_lastds = NULL; |
| 2681 | /* for now, last == self */ |
| 2682 | tbf->bf_last = tbf; |
| 2683 | tbf->bf_comp = bf->bf_comp; |
| 2684 | |
| 2685 | /* NOTE: DMA segments will be setup by the setup/chain functions */ |
| 2686 | |
| 2687 | /* The caller has to re-init the descriptor + links */ |
| 2688 | |
| 2689 | /* |
| 2690 | * Free the DMA mapping here, before we NULL the mbuf. |
| 2691 | * We must only call bus_dmamap_unload() once per mbuf chain |
| 2692 | * or behaviour is undefined. |
| 2693 | */ |
| 2694 | if (bf->bf_m != NULL) { |
| 2695 | /* |
| 2696 | * XXX is this POSTWRITE call required? |
| 2697 | */ |
| 2698 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, |
| 2699 | BUS_DMASYNC_POSTWRITE); |
| 2700 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
| 2701 | } |
| 2702 | |
| 2703 | bf->bf_m = NULL; |
| 2704 | bf->bf_node = NULL; |
| 2705 | |
| 2706 | /* Copy state */ |
| 2707 | memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state)); |
| 2708 | |
| 2709 | return tbf; |
| 2710 | } |
| 2711 | |
| 2712 | struct ath_buf * |
| 2713 | ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype) |
| 2714 | { |
| 2715 | struct ath_buf *bf; |
| 2716 | |
| 2717 | ATH_TXBUF_LOCK(sc); |
| 2718 | bf = _ath_getbuf_locked(sc, btype); |
| 2719 | /* |
| 2720 | * If a mgmt buffer was requested but we're out of those, |
| 2721 | * try requesting a normal one. |
| 2722 | */ |
| 2723 | if (bf == NULL && btype == ATH_BUFTYPE_MGMT) |
| 2724 | bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL); |
| 2725 | ATH_TXBUF_UNLOCK(sc); |
| 2726 | if (bf == NULL) { |
| 2727 | struct ifnet *ifp = sc->sc_ifp; |
| 2728 | |
| 2729 | DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__); |
| 2730 | sc->sc_stats.ast_tx_qstop++; |
| 2731 | IF_LOCK(&ifp->if_snd); |
| 2732 | ifq_set_oactive(&ifp->if_snd); |
| 2733 | IF_UNLOCK(&ifp->if_snd); |
| 2734 | } |
| 2735 | return bf; |
| 2736 | } |
| 2737 | |
| 2738 | #if 0 |
| 2739 | static void |
| 2740 | ath_qflush(struct ifnet *ifp) |
| 2741 | { |
| 2742 | |
| 2743 | /* XXX TODO */ |
| 2744 | } |
| 2745 | |
| 2746 | /* |
| 2747 | * Transmit a single frame. |
| 2748 | * |
| 2749 | * net80211 will free the node reference if the transmit |
| 2750 | * fails, so don't free the node reference here. |
| 2751 | */ |
| 2752 | static int |
| 2753 | ath_transmit(struct ifnet *ifp, struct mbuf *m) |
| 2754 | { |
| 2755 | struct ieee80211com *ic = ifp->if_l2com; |
| 2756 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 2757 | struct ieee80211_node *ni; |
| 2758 | struct mbuf *next; |
| 2759 | struct ath_buf *bf; |
| 2760 | ath_bufhead frags; |
| 2761 | int retval = 0; |
| 2762 | |
| 2763 | /* |
| 2764 | * Tell the reset path that we're currently transmitting. |
| 2765 | */ |
| 2766 | ATH_PCU_LOCK(sc); |
| 2767 | if (sc->sc_inreset_cnt > 0) { |
| 2768 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| 2769 | "%s: sc_inreset_cnt > 0; bailing\n", __func__); |
| 2770 | ATH_PCU_UNLOCK(sc); |
| 2771 | IF_LOCK(&ifp->if_snd); |
| 2772 | sc->sc_stats.ast_tx_qstop++; |
| 2773 | ifq_set_oactive(&ifp->if_snd); |
| 2774 | IF_UNLOCK(&ifp->if_snd); |
| 2775 | ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish"); |
| 2776 | return (ENOBUFS); /* XXX should be EINVAL or? */ |
| 2777 | } |
| 2778 | sc->sc_txstart_cnt++; |
| 2779 | ATH_PCU_UNLOCK(sc); |
| 2780 | |
| 2781 | ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start"); |
| 2782 | /* |
| 2783 | * Grab the TX lock - it's ok to do this here; we haven't |
| 2784 | * yet started transmitting. |
| 2785 | */ |
| 2786 | ATH_TX_LOCK(sc); |
| 2787 | |
| 2788 | /* |
| 2789 | * Node reference, if there's one. |
| 2790 | */ |
| 2791 | ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; |
| 2792 | |
| 2793 | /* |
| 2794 | * Enforce how deep a node queue can get. |
| 2795 | * |
| 2796 | * XXX it would be nicer if we kept an mbuf queue per |
| 2797 | * node and only whacked them into ath_bufs when we |
| 2798 | * are ready to schedule some traffic from them. |
| 2799 | * .. that may come later. |
| 2800 | * |
| 2801 | * XXX we should also track the per-node hardware queue |
| 2802 | * depth so it is easy to limit the _SUM_ of the swq and |
| 2803 | * hwq frames. Since we only schedule two HWQ frames |
| 2804 | * at a time, this should be OK for now. |
| 2805 | */ |
| 2806 | if ((!(m->m_flags & M_EAPOL)) && |
| 2807 | (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) { |
| 2808 | sc->sc_stats.ast_tx_nodeq_overflow++; |
| 2809 | m_freem(m); |
| 2810 | m = NULL; |
| 2811 | retval = ENOBUFS; |
| 2812 | goto finish; |
| 2813 | } |
| 2814 | |
| 2815 | /* |
| 2816 | * Check how many TX buffers are available. |
| 2817 | * |
| 2818 | * If this is for non-EAPOL traffic, just leave some |
| 2819 | * space free in order for buffer cloning and raw |
| 2820 | * frame transmission to occur. |
| 2821 | * |
| 2822 | * If it's for EAPOL traffic, ignore this for now. |
| 2823 | * Management traffic will be sent via the raw transmit |
| 2824 | * method which bypasses this check. |
| 2825 | * |
| 2826 | * This is needed to ensure that EAPOL frames during |
| 2827 | * (re) keying have a chance to go out. |
| 2828 | * |
| 2829 | * See kern/138379 for more information. |
| 2830 | */ |
| 2831 | if ((!(m->m_flags & M_EAPOL)) && |
| 2832 | (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) { |
| 2833 | sc->sc_stats.ast_tx_nobuf++; |
| 2834 | m_freem(m); |
| 2835 | m = NULL; |
| 2836 | retval = ENOBUFS; |
| 2837 | goto finish; |
| 2838 | } |
| 2839 | |
| 2840 | /* |
| 2841 | * Grab a TX buffer and associated resources. |
| 2842 | * |
| 2843 | * If it's an EAPOL frame, allocate a MGMT ath_buf. |
| 2844 | * That way even with temporary buffer exhaustion due to |
| 2845 | * the data path doesn't leave us without the ability |
| 2846 | * to transmit management frames. |
| 2847 | * |
| 2848 | * Otherwise allocate a normal buffer. |
| 2849 | */ |
| 2850 | if (m->m_flags & M_EAPOL) |
| 2851 | bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT); |
| 2852 | else |
| 2853 | bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL); |
| 2854 | |
| 2855 | if (bf == NULL) { |
| 2856 | /* |
| 2857 | * If we failed to allocate a buffer, fail. |
| 2858 | * |
| 2859 | * We shouldn't fail normally, due to the check |
| 2860 | * above. |
| 2861 | */ |
| 2862 | sc->sc_stats.ast_tx_nobuf++; |
| 2863 | IF_LOCK(&ifp->if_snd); |
| 2864 | ifq_set_oactive(&ifp->if_snd); |
| 2865 | IF_UNLOCK(&ifp->if_snd); |
| 2866 | m_freem(m); |
| 2867 | m = NULL; |
| 2868 | retval = ENOBUFS; |
| 2869 | goto finish; |
| 2870 | } |
| 2871 | |
| 2872 | /* |
| 2873 | * At this point we have a buffer; so we need to free it |
| 2874 | * if we hit any error conditions. |
| 2875 | */ |
| 2876 | |
| 2877 | /* |
| 2878 | * Check for fragmentation. If this frame |
| 2879 | * has been broken up verify we have enough |
| 2880 | * buffers to send all the fragments so all |
| 2881 | * go out or none... |
| 2882 | */ |
| 2883 | TAILQ_INIT(&frags); |
| 2884 | if ((m->m_flags & M_FRAG) && |
| 2885 | !ath_txfrag_setup(sc, &frags, m, ni)) { |
| 2886 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| 2887 | "%s: out of txfrag buffers\n", __func__); |
| 2888 | sc->sc_stats.ast_tx_nofrag++; |
| 2889 | ifp->if_oerrors++; |
| 2890 | ath_freetx(m); |
| 2891 | goto bad; |
| 2892 | } |
| 2893 | |
| 2894 | /* |
| 2895 | * At this point if we have any TX fragments, then we will |
| 2896 | * have bumped the node reference once for each of those. |
| 2897 | */ |
| 2898 | |
| 2899 | /* |
| 2900 | * XXX Is there anything actually _enforcing_ that the |
| 2901 | * fragments are being transmitted in one hit, rather than |
| 2902 | * being interleaved with other transmissions on that |
| 2903 | * hardware queue? |
| 2904 | * |
| 2905 | * The ATH TX output lock is the only thing serialising this |
| 2906 | * right now. |
| 2907 | */ |
| 2908 | |
| 2909 | /* |
| 2910 | * Calculate the "next fragment" length field in ath_buf |
| 2911 | * in order to let the transmit path know enough about |
| 2912 | * what to next write to the hardware. |
| 2913 | */ |
| 2914 | if (m->m_flags & M_FRAG) { |
| 2915 | struct ath_buf *fbf = bf; |
| 2916 | struct ath_buf *n_fbf = NULL; |
| 2917 | struct mbuf *fm = m->m_nextpkt; |
| 2918 | |
| 2919 | /* |
| 2920 | * We need to walk the list of fragments and set |
| 2921 | * the next size to the following buffer. |
| 2922 | * However, the first buffer isn't in the frag |
| 2923 | * list, so we have to do some gymnastics here. |
| 2924 | */ |
| 2925 | TAILQ_FOREACH(n_fbf, &frags, bf_list) { |
| 2926 | fbf->bf_nextfraglen = fm->m_pkthdr.len; |
| 2927 | fbf = n_fbf; |
| 2928 | fm = fm->m_nextpkt; |
| 2929 | } |
| 2930 | } |
| 2931 | |
| 2932 | /* |
| 2933 | * Bump the ifp output counter. |
| 2934 | * |
| 2935 | * XXX should use atomics? |
| 2936 | */ |
| 2937 | ifp->if_opackets++; |
| 2938 | nextfrag: |
| 2939 | /* |
| 2940 | * Pass the frame to the h/w for transmission. |
| 2941 | * Fragmented frames have each frag chained together |
| 2942 | * with m_nextpkt. We know there are sufficient ath_buf's |
| 2943 | * to send all the frags because of work done by |
| 2944 | * ath_txfrag_setup. We leave m_nextpkt set while |
| 2945 | * calling ath_tx_start so it can use it to extend the |
| 2946 | * the tx duration to cover the subsequent frag and |
| 2947 | * so it can reclaim all the mbufs in case of an error; |
| 2948 | * ath_tx_start clears m_nextpkt once it commits to |
| 2949 | * handing the frame to the hardware. |
| 2950 | * |
| 2951 | * Note: if this fails, then the mbufs are freed but |
| 2952 | * not the node reference. |
| 2953 | */ |
| 2954 | next = m->m_nextpkt; |
| 2955 | if (ath_tx_start(sc, ni, bf, m)) { |
| 2956 | bad: |
| 2957 | ifp->if_oerrors++; |
| 2958 | reclaim: |
| 2959 | bf->bf_m = NULL; |
| 2960 | bf->bf_node = NULL; |
| 2961 | ATH_TXBUF_LOCK(sc); |
| 2962 | ath_returnbuf_head(sc, bf); |
| 2963 | /* |
| 2964 | * Free the rest of the node references and |
| 2965 | * buffers for the fragment list. |
| 2966 | */ |
| 2967 | ath_txfrag_cleanup(sc, &frags, ni); |
| 2968 | ATH_TXBUF_UNLOCK(sc); |
| 2969 | retval = ENOBUFS; |
| 2970 | goto finish; |
| 2971 | } |
| 2972 | |
| 2973 | /* |
| 2974 | * Check here if the node is in power save state. |
| 2975 | */ |
| 2976 | ath_tx_update_tim(sc, ni, 1); |
| 2977 | |
| 2978 | if (next != NULL) { |
| 2979 | /* |
| 2980 | * Beware of state changing between frags. |
| 2981 | * XXX check sta power-save state? |
| 2982 | */ |
| 2983 | if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { |
| 2984 | DPRINTF(sc, ATH_DEBUG_XMIT, |
| 2985 | "%s: flush fragmented packet, state %s\n", |
| 2986 | __func__, |
| 2987 | ieee80211_state_name[ni->ni_vap->iv_state]); |
| 2988 | /* XXX dmamap */ |
| 2989 | ath_freetx(next); |
| 2990 | goto reclaim; |
| 2991 | } |
| 2992 | m = next; |
| 2993 | bf = TAILQ_FIRST(&frags); |
| 2994 | KASSERT(bf != NULL, ("no buf for txfrag")); |
| 2995 | TAILQ_REMOVE(&frags, bf, bf_list); |
| 2996 | goto nextfrag; |
| 2997 | } |
| 2998 | |
| 2999 | /* |
| 3000 | * Bump watchdog timer. |
| 3001 | */ |
| 3002 | sc->sc_wd_timer = 5; |
| 3003 | |
| 3004 | finish: |
| 3005 | ATH_TX_UNLOCK(sc); |
| 3006 | |
| 3007 | /* |
| 3008 | * Finished transmitting! |
| 3009 | */ |
| 3010 | ATH_PCU_LOCK(sc); |
| 3011 | sc->sc_txstart_cnt--; |
| 3012 | ATH_PCU_UNLOCK(sc); |
| 3013 | |
| 3014 | ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished"); |
| 3015 | |
| 3016 | return (retval); |
| 3017 | } |
| 3018 | #endif |
| 3019 | |
| 3020 | static int |
| 3021 | ath_media_change(struct ifnet *ifp) |
| 3022 | { |
| 3023 | int error = ieee80211_media_change(ifp); |
| 3024 | /* NB: only the fixed rate can change and that doesn't need a reset */ |
| 3025 | return (error == ENETRESET ? 0 : error); |
| 3026 | } |
| 3027 | |
| 3028 | /* |
| 3029 | * Block/unblock tx+rx processing while a key change is done. |
| 3030 | * We assume the caller serializes key management operations |
| 3031 | * so we only need to worry about synchronization with other |
| 3032 | * uses that originate in the driver. |
| 3033 | */ |
| 3034 | static void |
| 3035 | ath_key_update_begin(struct ieee80211vap *vap) |
| 3036 | { |
| 3037 | struct ifnet *ifp = vap->iv_ic->ic_ifp; |
| 3038 | struct ath_softc *sc = ifp->if_softc; |
| 3039 | |
| 3040 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); |
| 3041 | taskqueue_block(sc->sc_tq); |
| 3042 | IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ |
| 3043 | } |
| 3044 | |
| 3045 | static void |
| 3046 | ath_key_update_end(struct ieee80211vap *vap) |
| 3047 | { |
| 3048 | struct ifnet *ifp = vap->iv_ic->ic_ifp; |
| 3049 | struct ath_softc *sc = ifp->if_softc; |
| 3050 | |
| 3051 | DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); |
| 3052 | IF_UNLOCK(&ifp->if_snd); |
| 3053 | taskqueue_unblock(sc->sc_tq); |
| 3054 | } |
| 3055 | |
| 3056 | static void |
| 3057 | ath_update_promisc(struct ifnet *ifp) |
| 3058 | { |
| 3059 | struct ath_softc *sc = ifp->if_softc; |
| 3060 | u_int32_t rfilt; |
| 3061 | |
| 3062 | /* configure rx filter */ |
| 3063 | rfilt = ath_calcrxfilter(sc); |
| 3064 | ath_hal_setrxfilter(sc->sc_ah, rfilt); |
| 3065 | |
| 3066 | DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); |
| 3067 | } |
| 3068 | |
| 3069 | static void |
| 3070 | ath_update_mcast(struct ifnet *ifp) |
| 3071 | { |
| 3072 | struct ath_softc *sc = ifp->if_softc; |
| 3073 | u_int32_t mfilt[2]; |
| 3074 | |
| 3075 | /* calculate and install multicast filter */ |
| 3076 | if ((ifp->if_flags & IFF_ALLMULTI) == 0) { |
| 3077 | struct ifmultiaddr *ifma; |
| 3078 | /* |
| 3079 | * Merge multicast addresses to form the hardware filter. |
| 3080 | */ |
| 3081 | mfilt[0] = mfilt[1] = 0; |
| 3082 | #if 0 |
| 3083 | if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */ |
| 3084 | #endif |
| 3085 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
| 3086 | caddr_t dl; |
| 3087 | u_int32_t val; |
| 3088 | u_int8_t pos; |
| 3089 | |
| 3090 | /* calculate XOR of eight 6bit values */ |
| 3091 | dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); |
| 3092 | val = LE_READ_4(dl + 0); |
| 3093 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
| 3094 | val = LE_READ_4(dl + 3); |
| 3095 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
| 3096 | pos &= 0x3f; |
| 3097 | mfilt[pos / 32] |= (1 << (pos % 32)); |
| 3098 | } |
| 3099 | #if 0 |
| 3100 | if_maddr_runlock(ifp); |
| 3101 | #endif |
| 3102 | } else |
| 3103 | mfilt[0] = mfilt[1] = ~0; |
| 3104 | ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); |
| 3105 | DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", |
| 3106 | __func__, mfilt[0], mfilt[1]); |
| 3107 | } |
| 3108 | |
| 3109 | void |
| 3110 | ath_mode_init(struct ath_softc *sc) |
| 3111 | { |
| 3112 | struct ifnet *ifp = sc->sc_ifp; |
| 3113 | struct ath_hal *ah = sc->sc_ah; |
| 3114 | u_int32_t rfilt; |
| 3115 | |
| 3116 | /* configure rx filter */ |
| 3117 | rfilt = ath_calcrxfilter(sc); |
| 3118 | ath_hal_setrxfilter(ah, rfilt); |
| 3119 | |
| 3120 | /* configure operational mode */ |
| 3121 | ath_hal_setopmode(ah); |
| 3122 | |
| 3123 | DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE, |
| 3124 | "%s: ah=%p, ifp=%p, if_addr=%p\n", |
| 3125 | __func__, |
| 3126 | ah, |
| 3127 | ifp, |
| 3128 | (ifp == NULL) ? NULL : ifp->if_addr); |
| 3129 | |
| 3130 | /* handle any link-level address change */ |
| 3131 | ath_hal_setmac(ah, IF_LLADDR(ifp)); |
| 3132 | |
| 3133 | /* calculate and install multicast filter */ |
| 3134 | ath_update_mcast(ifp); |
| 3135 | } |
| 3136 | |
| 3137 | /* |
| 3138 | * Set the slot time based on the current setting. |
| 3139 | */ |
| 3140 | void |
| 3141 | ath_setslottime(struct ath_softc *sc) |
| 3142 | { |
| 3143 | struct ieee80211com *ic = sc->sc_ifp->if_l2com; |
| 3144 | struct ath_hal *ah = sc->sc_ah; |
| 3145 | u_int usec; |
| 3146 | |
| 3147 | if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) |
| 3148 | usec = 13; |
| 3149 | else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) |
| 3150 | usec = 21; |
| 3151 | else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { |
| 3152 | /* honor short/long slot time only in 11g */ |
| 3153 | /* XXX shouldn't honor on pure g or turbo g channel */ |
| 3154 | if (ic->ic_flags & IEEE80211_F_SHSLOT) |
| 3155 | usec = HAL_SLOT_TIME_9; |
| 3156 | else |
| 3157 | usec = HAL_SLOT_TIME_20; |
| 3158 | } else |
| 3159 | usec = HAL_SLOT_TIME_9; |
| 3160 | |
| 3161 | DPRINTF(sc, ATH_DEBUG_RESET, |
| 3162 | "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", |
| 3163 | __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, |
| 3164 | ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); |
| 3165 | |
| 3166 | ath_hal_setslottime(ah, usec); |
| 3167 | sc->sc_updateslot = OK; |
| 3168 | } |
| 3169 | |
| 3170 | /* |
| 3171 | * Callback from the 802.11 layer to update the |
| 3172 | * slot time based on the current setting. |
| 3173 | */ |
| 3174 | static void |
| 3175 | ath_updateslot(struct ifnet *ifp) |
| 3176 | { |
| 3177 | struct ath_softc *sc = ifp->if_softc; |
| 3178 | struct ieee80211com *ic = ifp->if_l2com; |
| 3179 | |
| 3180 | /* |
| 3181 | * When not coordinating the BSS, change the hardware |
| 3182 | * immediately. For other operation we defer the change |
| 3183 | * until beacon updates have propagated to the stations. |
| 3184 | */ |
| 3185 | if (ic->ic_opmode == IEEE80211_M_HOSTAP || |
| 3186 | ic->ic_opmode == IEEE80211_M_MBSS) |
| 3187 | sc->sc_updateslot = UPDATE; |
| 3188 | else |
| 3189 | ath_setslottime(sc); |
| 3190 | } |
| 3191 | |
| 3192 | /* |
| 3193 | * Append the contents of src to dst; both queues |
| 3194 | * are assumed to be locked. |
| 3195 | */ |
| 3196 | void |
| 3197 | ath_txqmove(struct ath_txq *dst, struct ath_txq *src) |
| 3198 | { |
| 3199 | |
| 3200 | ATH_TXQ_LOCK_ASSERT(src); |
| 3201 | ATH_TXQ_LOCK_ASSERT(dst); |
| 3202 | |
| 3203 | TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list); |
| 3204 | dst->axq_link = src->axq_link; |
| 3205 | src->axq_link = NULL; |
| 3206 | dst->axq_depth += src->axq_depth; |
| 3207 | dst->axq_aggr_depth += src->axq_aggr_depth; |
| 3208 | src->axq_depth = 0; |
| 3209 | src->axq_aggr_depth = 0; |
| 3210 | } |
| 3211 | |
| 3212 | /* |
| 3213 | * Reset the hardware, with no loss. |
| 3214 | * |
| 3215 | * This can't be used for a general case reset. |
| 3216 | */ |
| 3217 | static void |
| 3218 | ath_reset_proc(void *arg, int pending) |
| 3219 | { |
| 3220 | struct ath_softc *sc = arg; |
| 3221 | struct ifnet *ifp = sc->sc_ifp; |
| 3222 | |
| 3223 | #if 0 |
| 3224 | if_printf(ifp, "%s: resetting\n", __func__); |
| 3225 | #endif |
| 3226 | wlan_serialize_enter(); |
| 3227 | ath_reset(ifp, ATH_RESET_NOLOSS); |
| 3228 | wlan_serialize_exit(); |
| 3229 | } |
| 3230 | |
| 3231 | /* |
| 3232 | * Reset the hardware after detecting beacons have stopped. |
| 3233 | */ |
| 3234 | static void |
| 3235 | ath_bstuck_proc(void *arg, int pending) |
| 3236 | { |
| 3237 | struct ath_softc *sc = arg; |
| 3238 | struct ifnet *ifp = sc->sc_ifp; |
| 3239 | uint32_t hangs = 0; |
| 3240 | |
| 3241 | wlan_serialize_enter(); |
| 3242 | if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) |
| 3243 | if_printf(ifp, "bb hang detected (0x%x)\n", hangs); |
| 3244 | |
| 3245 | #ifdef ATH_DEBUG_ALQ |
| 3246 | if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON)) |
| 3247 | if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL); |
| 3248 | #endif |
| 3249 | |
| 3250 | if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", |
| 3251 | sc->sc_bmisscount); |
| 3252 | sc->sc_stats.ast_bstuck++; |
| 3253 | /* |
| 3254 | * This assumes that there's no simultaneous channel mode change |
| 3255 | * occuring. |
| 3256 | */ |
| 3257 | ath_reset(ifp, ATH_RESET_NOLOSS); |
| 3258 | wlan_serialize_exit(); |
| 3259 | } |
| 3260 | |
| 3261 | static void |
| 3262 | ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) |
| 3263 | { |
| 3264 | bus_addr_t *paddr = (bus_addr_t*) arg; |
| 3265 | KASSERT(error == 0, ("error %u on bus_dma callback", error)); |
| 3266 | *paddr = segs->ds_addr; |
| 3267 | } |
| 3268 | |
| 3269 | /* |
| 3270 | * Allocate the descriptors and appropriate DMA tag/setup. |
| 3271 | * |
| 3272 | * For some situations (eg EDMA TX completion), there isn't a requirement |
| 3273 | * for the ath_buf entries to be allocated. |
| 3274 | */ |
| 3275 | int |
| 3276 | ath_descdma_alloc_desc(struct ath_softc *sc, |
| 3277 | struct ath_descdma *dd, ath_bufhead *head, |
| 3278 | const char *name, int ds_size, int ndesc) |
| 3279 | { |
| 3280 | #define DS2PHYS(_dd, _ds) \ |
| 3281 | ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) |
| 3282 | #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ |
| 3283 | ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) |
| 3284 | struct ifnet *ifp = sc->sc_ifp; |
| 3285 | int error; |
| 3286 | |
| 3287 | dd->dd_descsize = ds_size; |
| 3288 | |
| 3289 | DPRINTF(sc, ATH_DEBUG_RESET, |
| 3290 | "%s: %s DMA: %u desc, %d bytes per descriptor\n", |
| 3291 | __func__, name, ndesc, dd->dd_descsize); |
| 3292 | |
| 3293 | dd->dd_name = name; |
| 3294 | dd->dd_desc_len = dd->dd_descsize * ndesc; |
| 3295 | |
| 3296 | /* |
| 3297 | * Merlin work-around: |
| 3298 | * Descriptors that cross the 4KB boundary can't be used. |
| 3299 | * Assume one skipped descriptor per 4KB page. |
| 3300 | */ |
| 3301 | if (! ath_hal_split4ktrans(sc->sc_ah)) { |
| 3302 | int numpages = dd->dd_desc_len / 4096; |
| 3303 | dd->dd_desc_len += ds_size * numpages; |
| 3304 | } |
| 3305 | |
| 3306 | /* |
| 3307 | * Setup DMA descriptor area. |
| 3308 | * |
| 3309 | * BUS_DMA_ALLOCNOW is not used; we never use bounce |
| 3310 | * buffers for the descriptors themselves. |
| 3311 | */ |
| 3312 | error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ |
| 3313 | PAGE_SIZE, 0, /* alignment, bounds */ |
| 3314 | BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ |
| 3315 | BUS_SPACE_MAXADDR, /* highaddr */ |
| 3316 | NULL, NULL, /* filter, filterarg */ |
| 3317 | dd->dd_desc_len, /* maxsize */ |
| 3318 | 1, /* nsegments */ |
| 3319 | dd->dd_desc_len, /* maxsegsize */ |
| 3320 | 0, /* flags */ |
| 3321 | &dd->dd_dmat); |
| 3322 | if (error != 0) { |
| 3323 | if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); |
| 3324 | return error; |
| 3325 | } |
| 3326 | |
| 3327 | /* allocate descriptors */ |
| 3328 | error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, |
| 3329 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
| 3330 | &dd->dd_dmamap); |
| 3331 | if (error != 0) { |
| 3332 | if_printf(ifp, "unable to alloc memory for %u %s descriptors, " |
| 3333 | "error %u\n", ndesc, dd->dd_name, error); |
| 3334 | goto fail1; |
| 3335 | } |
| 3336 | |
| 3337 | error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, |
| 3338 | dd->dd_desc, dd->dd_desc_len, |
| 3339 | ath_load_cb, &dd->dd_desc_paddr, |
| 3340 | BUS_DMA_NOWAIT); |
| 3341 | if (error != 0) { |
| 3342 | if_printf(ifp, "unable to map %s descriptors, error %u\n", |
| 3343 | dd->dd_name, error); |
| 3344 | goto fail2; |
| 3345 | } |
| 3346 | |
| 3347 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", |
| 3348 | __func__, dd->dd_name, (uint8_t *) dd->dd_desc, |
| 3349 | (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, |
| 3350 | /*XXX*/ (u_long) dd->dd_desc_len); |
| 3351 | |
| 3352 | return (0); |
| 3353 | |
| 3354 | fail2: |
| 3355 | bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); |
| 3356 | fail1: |
| 3357 | bus_dma_tag_destroy(dd->dd_dmat); |
| 3358 | memset(dd, 0, sizeof(*dd)); |
| 3359 | return error; |
| 3360 | #undef DS2PHYS |
| 3361 | #undef ATH_DESC_4KB_BOUND_CHECK |
| 3362 | } |
| 3363 | |
| 3364 | int |
| 3365 | ath_descdma_setup(struct ath_softc *sc, |
| 3366 | struct ath_descdma *dd, ath_bufhead *head, |
| 3367 | const char *name, int ds_size, int nbuf, int ndesc) |
| 3368 | { |
| 3369 | #define DS2PHYS(_dd, _ds) \ |
| 3370 | ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) |
| 3371 | #define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \ |
| 3372 | ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0) |
| 3373 | struct ifnet *ifp = sc->sc_ifp; |
| 3374 | uint8_t *ds; |
| 3375 | struct ath_buf *bf; |
| 3376 | int i, bsize, error; |
| 3377 | |
| 3378 | /* Allocate descriptors */ |
| 3379 | error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size, |
| 3380 | nbuf * ndesc); |
| 3381 | |
| 3382 | /* Assume any errors during allocation were dealt with */ |
| 3383 | if (error != 0) { |
| 3384 | return (error); |
| 3385 | } |
| 3386 | |
| 3387 | ds = (uint8_t *) dd->dd_desc; |
| 3388 | |
| 3389 | /* allocate rx buffers */ |
| 3390 | bsize = sizeof(struct ath_buf) * nbuf; |
| 3391 | bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT|M_ZERO); |
| 3392 | if (bf == NULL) { |
| 3393 | if_printf(ifp, "malloc of %s buffers failed, size %u\n", |
| 3394 | dd->dd_name, bsize); |
| 3395 | goto fail3; |
| 3396 | } |
| 3397 | dd->dd_bufptr = bf; |
| 3398 | |
| 3399 | TAILQ_INIT(head); |
| 3400 | for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) { |
| 3401 | bf->bf_desc = (struct ath_desc *) ds; |
| 3402 | bf->bf_daddr = DS2PHYS(dd, ds); |
| 3403 | if (! ath_hal_split4ktrans(sc->sc_ah)) { |
| 3404 | /* |
| 3405 | * Merlin WAR: Skip descriptor addresses which |
| 3406 | * cause 4KB boundary crossing along any point |
| 3407 | * in the descriptor. |
| 3408 | */ |
| 3409 | if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr, |
| 3410 | dd->dd_descsize)) { |
| 3411 | /* Start at the next page */ |
| 3412 | ds += 0x1000 - (bf->bf_daddr & 0xFFF); |
| 3413 | bf->bf_desc = (struct ath_desc *) ds; |
| 3414 | bf->bf_daddr = DS2PHYS(dd, ds); |
| 3415 | } |
| 3416 | } |
| 3417 | error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, |
| 3418 | &bf->bf_dmamap); |
| 3419 | if (error != 0) { |
| 3420 | if_printf(ifp, "unable to create dmamap for %s " |
| 3421 | "buffer %u, error %u\n", dd->dd_name, i, error); |
| 3422 | ath_descdma_cleanup(sc, dd, head); |
| 3423 | return error; |
| 3424 | } |
| 3425 | bf->bf_lastds = bf->bf_desc; /* Just an initial value */ |
| 3426 | TAILQ_INSERT_TAIL(head, bf, bf_list); |
| 3427 | } |
| 3428 | |
| 3429 | /* |
| 3430 | * XXX TODO: ensure that ds doesn't overflow the descriptor |
| 3431 | * allocation otherwise weird stuff will occur and crash your |
| 3432 | * machine. |
| 3433 | */ |
| 3434 | return 0; |
| 3435 | /* XXX this should likely just call ath_descdma_cleanup() */ |
| 3436 | fail3: |
| 3437 | bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); |
| 3438 | bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); |
| 3439 | bus_dma_tag_destroy(dd->dd_dmat); |
| 3440 | memset(dd, 0, sizeof(*dd)); |
| 3441 | return error; |
| 3442 | #undef DS2PHYS |
| 3443 | #undef ATH_DESC_4KB_BOUND_CHECK |
| 3444 | } |
| 3445 | |
| 3446 | /* |
| 3447 | * Allocate ath_buf entries but no descriptor contents. |
| 3448 | * |
| 3449 | * This is for RX EDMA where the descriptors are the header part of |
| 3450 | * the RX buffer. |
| 3451 | */ |
| 3452 | int |
| 3453 | ath_descdma_setup_rx_edma(struct ath_softc *sc, |
| 3454 | struct ath_descdma *dd, ath_bufhead *head, |
| 3455 | const char *name, int nbuf, int rx_status_len) |
| 3456 | { |
| 3457 | struct ifnet *ifp = sc->sc_ifp; |
| 3458 | struct ath_buf *bf; |
| 3459 | int i, bsize, error; |
| 3460 | |
| 3461 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n", |
| 3462 | __func__, name, nbuf); |
| 3463 | |
| 3464 | dd->dd_name = name; |
| 3465 | /* |
| 3466 | * This is (mostly) purely for show. We're not allocating any actual |
| 3467 | * descriptors here as EDMA RX has the descriptor be part |
| 3468 | * of the RX buffer. |
| 3469 | * |
| 3470 | * However, dd_desc_len is used by ath_descdma_free() to determine |
| 3471 | * whether we have already freed this DMA mapping. |
| 3472 | */ |
| 3473 | dd->dd_desc_len = rx_status_len * nbuf; |
| 3474 | dd->dd_descsize = rx_status_len; |
| 3475 | |
| 3476 | /* allocate rx buffers */ |
| 3477 | bsize = sizeof(struct ath_buf) * nbuf; |
| 3478 | bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO); |
| 3479 | if (bf == NULL) { |
| 3480 | if_printf(ifp, "malloc of %s buffers failed, size %u\n", |
| 3481 | dd->dd_name, bsize); |
| 3482 | error = ENOMEM; |
| 3483 | goto fail3; |
| 3484 | } |
| 3485 | dd->dd_bufptr = bf; |
| 3486 | |
| 3487 | TAILQ_INIT(head); |
| 3488 | for (i = 0; i < nbuf; i++, bf++) { |
| 3489 | bf->bf_desc = NULL; |
| 3490 | bf->bf_daddr = 0; |
| 3491 | bf->bf_lastds = NULL; /* Just an initial value */ |
| 3492 | |
| 3493 | error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, |
| 3494 | &bf->bf_dmamap); |
| 3495 | if (error != 0) { |
| 3496 | if_printf(ifp, "unable to create dmamap for %s " |
| 3497 | "buffer %u, error %u\n", dd->dd_name, i, error); |
| 3498 | ath_descdma_cleanup(sc, dd, head); |
| 3499 | return error; |
| 3500 | } |
| 3501 | TAILQ_INSERT_TAIL(head, bf, bf_list); |
| 3502 | } |
| 3503 | return 0; |
| 3504 | fail3: |
| 3505 | memset(dd, 0, sizeof(*dd)); |
| 3506 | return error; |
| 3507 | } |
| 3508 | |
| 3509 | void |
| 3510 | ath_descdma_cleanup(struct ath_softc *sc, |
| 3511 | struct ath_descdma *dd, ath_bufhead *head) |
| 3512 | { |
| 3513 | struct ath_buf *bf; |
| 3514 | struct ieee80211_node *ni; |
| 3515 | int do_warning = 0; |
| 3516 | |
| 3517 | if (dd->dd_dmamap != 0) { |
| 3518 | bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); |
| 3519 | bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); |
| 3520 | bus_dma_tag_destroy(dd->dd_dmat); |
| 3521 | } |
| 3522 | |
| 3523 | if (head != NULL) { |
| 3524 | TAILQ_FOREACH(bf, head, bf_list) { |
| 3525 | if (bf->bf_m) { |
| 3526 | /* |
| 3527 | * XXX warn if there's buffers here. |
| 3528 | * XXX it should have been freed by the |
| 3529 | * owner! |
| 3530 | */ |
| 3531 | |
| 3532 | if (do_warning == 0) { |
| 3533 | do_warning = 1; |
| 3534 | device_printf(sc->sc_dev, |
| 3535 | "%s: %s: mbuf should've been" |
| 3536 | " unmapped/freed!\n", |
| 3537 | __func__, |
| 3538 | dd->dd_name); |
| 3539 | } |
| 3540 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, |
| 3541 | BUS_DMASYNC_POSTREAD); |
| 3542 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
| 3543 | m_freem(bf->bf_m); |
| 3544 | bf->bf_m = NULL; |
| 3545 | } |
| 3546 | if (bf->bf_dmamap != NULL) { |
| 3547 | bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); |
| 3548 | bf->bf_dmamap = NULL; |
| 3549 | } |
| 3550 | ni = bf->bf_node; |
| 3551 | bf->bf_node = NULL; |
| 3552 | if (ni != NULL) { |
| 3553 | /* |
| 3554 | * Reclaim node reference. |
| 3555 | */ |
| 3556 | ieee80211_free_node(ni); |
| 3557 | } |
| 3558 | } |
| 3559 | } |
| 3560 | |
| 3561 | if (head != NULL) |
| 3562 | TAILQ_INIT(head); |
| 3563 | |
| 3564 | if (dd->dd_bufptr != NULL) |
| 3565 | kfree(dd->dd_bufptr, M_ATHDEV); |
| 3566 | memset(dd, 0, sizeof(*dd)); |
| 3567 | } |
| 3568 | |
| 3569 | static int |
| 3570 | ath_desc_alloc(struct ath_softc *sc) |
| 3571 | { |
| 3572 | int error; |
| 3573 | |
| 3574 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, |
| 3575 | "tx", sc->sc_tx_desclen, ath_txbuf, ATH_MAX_SCATTER); |
| 3576 | if (error != 0) { |
| 3577 | return error; |
| 3578 | } |
| 3579 | sc->sc_txbuf_cnt = ath_txbuf; |
| 3580 | |
| 3581 | error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt, |
| 3582 | "tx_mgmt", sc->sc_tx_desclen, ath_txbuf_mgmt, |
| 3583 | ATH_TXDESC); |
| 3584 | if (error != 0) { |
| 3585 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); |
| 3586 | return error; |
| 3587 | } |
| 3588 | |
| 3589 | /* |
| 3590 | * XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the |
| 3591 | * flag doesn't have to be set in ath_getbuf_locked(). |
| 3592 | */ |
| 3593 | |
| 3594 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, |
| 3595 | "beacon", sc->sc_tx_desclen, ATH_BCBUF, 1); |
| 3596 | if (error != 0) { |
| 3597 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); |
| 3598 | ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, |
| 3599 | &sc->sc_txbuf_mgmt); |
| 3600 | return error; |
| 3601 | } |
| 3602 | return 0; |
| 3603 | } |
| 3604 | |
| 3605 | static void |
| 3606 | ath_desc_free(struct ath_softc *sc) |
| 3607 | { |
| 3608 | |
| 3609 | if (sc->sc_bdma.dd_desc_len != 0) |
| 3610 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); |
| 3611 | if (sc->sc_txdma.dd_desc_len != 0) |
| 3612 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); |
| 3613 | if (sc->sc_txdma_mgmt.dd_desc_len != 0) |
| 3614 | ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt, |
| 3615 | &sc->sc_txbuf_mgmt); |
| 3616 | } |
| 3617 | |
| 3618 | static struct ieee80211_node * |
| 3619 | ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) |
| 3620 | { |
| 3621 | struct ieee80211com *ic = vap->iv_ic; |
| 3622 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 3623 | const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; |
| 3624 | struct ath_node *an; |
| 3625 | |
| 3626 | an = kmalloc(space, M_80211_NODE, M_INTWAIT|M_ZERO); |
| 3627 | if (an == NULL) { |
| 3628 | /* XXX stat+msg */ |
| 3629 | return NULL; |
| 3630 | } |
| 3631 | ath_rate_node_init(sc, an); |
| 3632 | |
| 3633 | /* Setup the mutex - there's no associd yet so set the name to NULL */ |
| 3634 | ksnprintf(an->an_name, sizeof(an->an_name), "%s: node %p", |
| 3635 | device_get_nameunit(sc->sc_dev), an); |
| 3636 | #if 0 |
| 3637 | mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF); |
| 3638 | #endif |
| 3639 | |
| 3640 | /* XXX setup ath_tid */ |
| 3641 | ath_tx_tid_init(sc, an); |
| 3642 | |
| 3643 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, mac, ":", an); |
| 3644 | return &an->an_node; |
| 3645 | } |
| 3646 | |
| 3647 | static void |
| 3648 | ath_node_cleanup(struct ieee80211_node *ni) |
| 3649 | { |
| 3650 | struct ieee80211com *ic = ni->ni_ic; |
| 3651 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 3652 | |
| 3653 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, |
| 3654 | ni->ni_macaddr, ":", ATH_NODE(ni)); |
| 3655 | |
| 3656 | /* Cleanup ath_tid, free unused bufs, unlink bufs in TXQ */ |
| 3657 | ath_tx_node_flush(sc, ATH_NODE(ni)); |
| 3658 | ath_rate_node_cleanup(sc, ATH_NODE(ni)); |
| 3659 | sc->sc_node_cleanup(ni); |
| 3660 | } |
| 3661 | |
| 3662 | static void |
| 3663 | ath_node_free(struct ieee80211_node *ni) |
| 3664 | { |
| 3665 | struct ieee80211com *ic = ni->ni_ic; |
| 3666 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 3667 | |
| 3668 | DPRINTF(sc, ATH_DEBUG_NODE, "%s: %6D: an %p\n", __func__, |
| 3669 | ni->ni_macaddr, ":", ATH_NODE(ni)); |
| 3670 | #if 0 |
| 3671 | mtx_destroy(&ATH_NODE(ni)->an_mtx); |
| 3672 | #endif |
| 3673 | sc->sc_node_free(ni); |
| 3674 | } |
| 3675 | |
| 3676 | static void |
| 3677 | ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) |
| 3678 | { |
| 3679 | struct ieee80211com *ic = ni->ni_ic; |
| 3680 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 3681 | struct ath_hal *ah = sc->sc_ah; |
| 3682 | |
| 3683 | *rssi = ic->ic_node_getrssi(ni); |
| 3684 | if (ni->ni_chan != IEEE80211_CHAN_ANYC) |
| 3685 | *noise = ath_hal_getchannoise(ah, ni->ni_chan); |
| 3686 | else |
| 3687 | *noise = -95; /* nominally correct */ |
| 3688 | } |
| 3689 | |
| 3690 | /* |
| 3691 | * Set the default antenna. |
| 3692 | */ |
| 3693 | void |
| 3694 | ath_setdefantenna(struct ath_softc *sc, u_int antenna) |
| 3695 | { |
| 3696 | struct ath_hal *ah = sc->sc_ah; |
| 3697 | |
| 3698 | /* XXX block beacon interrupts */ |
| 3699 | ath_hal_setdefantenna(ah, antenna); |
| 3700 | if (sc->sc_defant != antenna) |
| 3701 | sc->sc_stats.ast_ant_defswitch++; |
| 3702 | sc->sc_defant = antenna; |
| 3703 | sc->sc_rxotherant = 0; |
| 3704 | } |
| 3705 | |
| 3706 | static void |
| 3707 | ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) |
| 3708 | { |
| 3709 | txq->axq_qnum = qnum; |
| 3710 | txq->axq_ac = 0; |
| 3711 | txq->axq_depth = 0; |
| 3712 | txq->axq_aggr_depth = 0; |
| 3713 | txq->axq_intrcnt = 0; |
| 3714 | txq->axq_link = NULL; |
| 3715 | txq->axq_softc = sc; |
| 3716 | TAILQ_INIT(&txq->axq_q); |
| 3717 | TAILQ_INIT(&txq->axq_tidq); |
| 3718 | TAILQ_INIT(&txq->fifo.axq_q); |
| 3719 | ATH_TXQ_LOCK_INIT(sc, txq); |
| 3720 | } |
| 3721 | |
| 3722 | /* |
| 3723 | * Setup a h/w transmit queue. |
| 3724 | */ |
| 3725 | static struct ath_txq * |
| 3726 | ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) |
| 3727 | { |
| 3728 | #define N(a) (sizeof(a)/sizeof(a[0])) |
| 3729 | struct ath_hal *ah = sc->sc_ah; |
| 3730 | HAL_TXQ_INFO qi; |
| 3731 | int qnum; |
| 3732 | |
| 3733 | memset(&qi, 0, sizeof(qi)); |
| 3734 | qi.tqi_subtype = subtype; |
| 3735 | qi.tqi_aifs = HAL_TXQ_USEDEFAULT; |
| 3736 | qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; |
| 3737 | qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; |
| 3738 | /* |
| 3739 | * Enable interrupts only for EOL and DESC conditions. |
| 3740 | * We mark tx descriptors to receive a DESC interrupt |
| 3741 | * when a tx queue gets deep; otherwise waiting for the |
| 3742 | * EOL to reap descriptors. Note that this is done to |
| 3743 | * reduce interrupt load and this only defers reaping |
| 3744 | * descriptors, never transmitting frames. Aside from |
| 3745 | * reducing interrupts this also permits more concurrency. |
| 3746 | * The only potential downside is if the tx queue backs |
| 3747 | * up in which case the top half of the kernel may backup |
| 3748 | * due to a lack of tx descriptors. |
| 3749 | */ |
| 3750 | if (sc->sc_isedma) |
| 3751 | qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | |
| 3752 | HAL_TXQ_TXOKINT_ENABLE; |
| 3753 | else |
| 3754 | qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | |
| 3755 | HAL_TXQ_TXDESCINT_ENABLE; |
| 3756 | |
| 3757 | qnum = ath_hal_setuptxqueue(ah, qtype, &qi); |
| 3758 | if (qnum == -1) { |
| 3759 | /* |
| 3760 | * NB: don't print a message, this happens |
| 3761 | * normally on parts with too few tx queues |
| 3762 | */ |
| 3763 | return NULL; |
| 3764 | } |
| 3765 | if (qnum >= N(sc->sc_txq)) { |
| 3766 | device_printf(sc->sc_dev, |
| 3767 | "hal qnum %u out of range, max %zu!\n", |
| 3768 | qnum, N(sc->sc_txq)); |
| 3769 | ath_hal_releasetxqueue(ah, qnum); |
| 3770 | return NULL; |
| 3771 | } |
| 3772 | if (!ATH_TXQ_SETUP(sc, qnum)) { |
| 3773 | ath_txq_init(sc, &sc->sc_txq[qnum], qnum); |
| 3774 | sc->sc_txqsetup |= 1<<qnum; |
| 3775 | } |
| 3776 | return &sc->sc_txq[qnum]; |
| 3777 | #undef N |
| 3778 | } |
| 3779 | |
| 3780 | /* |
| 3781 | * Setup a hardware data transmit queue for the specified |
| 3782 | * access control. The hal may not support all requested |
| 3783 | * queues in which case it will return a reference to a |
| 3784 | * previously setup queue. We record the mapping from ac's |
| 3785 | * to h/w queues for use by ath_tx_start and also track |
| 3786 | * the set of h/w queues being used to optimize work in the |
| 3787 | * transmit interrupt handler and related routines. |
| 3788 | */ |
| 3789 | static int |
| 3790 | ath_tx_setup(struct ath_softc *sc, int ac, int haltype) |
| 3791 | { |
| 3792 | #define N(a) (sizeof(a)/sizeof(a[0])) |
| 3793 | struct ath_txq *txq; |
| 3794 | |
| 3795 | if (ac >= N(sc->sc_ac2q)) { |
| 3796 | device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", |
| 3797 | ac, N(sc->sc_ac2q)); |
| 3798 | return 0; |
| 3799 | } |
| 3800 | txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); |
| 3801 | if (txq != NULL) { |
| 3802 | txq->axq_ac = ac; |
| 3803 | sc->sc_ac2q[ac] = txq; |
| 3804 | return 1; |
| 3805 | } else |
| 3806 | return 0; |
| 3807 | #undef N |
| 3808 | } |
| 3809 | |
| 3810 | /* |
| 3811 | * Update WME parameters for a transmit queue. |
| 3812 | */ |
| 3813 | static int |
| 3814 | ath_txq_update(struct ath_softc *sc, int ac) |
| 3815 | { |
| 3816 | #define ATH_EXPONENT_TO_VALUE(v) ((1<<v)-1) |
| 3817 | #define ATH_TXOP_TO_US(v) (v<<5) |
| 3818 | struct ifnet *ifp = sc->sc_ifp; |
| 3819 | struct ieee80211com *ic = ifp->if_l2com; |
| 3820 | struct ath_txq *txq = sc->sc_ac2q[ac]; |
| 3821 | struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; |
| 3822 | struct ath_hal *ah = sc->sc_ah; |
| 3823 | HAL_TXQ_INFO qi; |
| 3824 | |
| 3825 | ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); |
| 3826 | #ifdef IEEE80211_SUPPORT_TDMA |
| 3827 | if (sc->sc_tdma) { |
| 3828 | /* |
| 3829 | * AIFS is zero so there's no pre-transmit wait. The |
| 3830 | * burst time defines the slot duration and is configured |
| 3831 | * through net80211. The QCU is setup to not do post-xmit |
| 3832 | * back off, lockout all lower-priority QCU's, and fire |
| 3833 | * off the DMA beacon alert timer which is setup based |
| 3834 | * on the slot configuration. |
| 3835 | */ |
| 3836 | qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE |
| 3837 | | HAL_TXQ_TXERRINT_ENABLE |
| 3838 | | HAL_TXQ_TXURNINT_ENABLE |
| 3839 | | HAL_TXQ_TXEOLINT_ENABLE |
| 3840 | | HAL_TXQ_DBA_GATED |
| 3841 | | HAL_TXQ_BACKOFF_DISABLE |
| 3842 | | HAL_TXQ_ARB_LOCKOUT_GLOBAL |
| 3843 | ; |
| 3844 | qi.tqi_aifs = 0; |
| 3845 | /* XXX +dbaprep? */ |
| 3846 | qi.tqi_readyTime = sc->sc_tdmaslotlen; |
| 3847 | qi.tqi_burstTime = qi.tqi_readyTime; |
| 3848 | } else { |
| 3849 | #endif |
| 3850 | /* |
| 3851 | * XXX shouldn't this just use the default flags |
| 3852 | * used in the previous queue setup? |
| 3853 | */ |
| 3854 | qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE |
| 3855 | | HAL_TXQ_TXERRINT_ENABLE |
| 3856 | | HAL_TXQ_TXDESCINT_ENABLE |
| 3857 | | HAL_TXQ_TXURNINT_ENABLE |
| 3858 | | HAL_TXQ_TXEOLINT_ENABLE |
| 3859 | ; |
| 3860 | qi.tqi_aifs = wmep->wmep_aifsn; |
| 3861 | qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); |
| 3862 | qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); |
| 3863 | qi.tqi_readyTime = 0; |
| 3864 | qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); |
| 3865 | #ifdef IEEE80211_SUPPORT_TDMA |
| 3866 | } |
| 3867 | #endif |
| 3868 | |
| 3869 | DPRINTF(sc, ATH_DEBUG_RESET, |
| 3870 | "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n", |
| 3871 | __func__, txq->axq_qnum, qi.tqi_qflags, |
| 3872 | qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime); |
| 3873 | |
| 3874 | if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { |
| 3875 | if_printf(ifp, "unable to update hardware queue " |
| 3876 | "parameters for %s traffic!\n", |
| 3877 | ieee80211_wme_acnames[ac]); |
| 3878 | return 0; |
| 3879 | } else { |
| 3880 | ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ |
| 3881 | return 1; |
| 3882 | } |
| 3883 | #undef ATH_TXOP_TO_US |
| 3884 | #undef ATH_EXPONENT_TO_VALUE |
| 3885 | } |
| 3886 | |
| 3887 | /* |
| 3888 | * Callback from the 802.11 layer to update WME parameters. |
| 3889 | */ |
| 3890 | int |
| 3891 | ath_wme_update(struct ieee80211com *ic) |
| 3892 | { |
| 3893 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 3894 | |
| 3895 | return !ath_txq_update(sc, WME_AC_BE) || |
| 3896 | !ath_txq_update(sc, WME_AC_BK) || |
| 3897 | !ath_txq_update(sc, WME_AC_VI) || |
| 3898 | !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; |
| 3899 | } |
| 3900 | |
| 3901 | /* |
| 3902 | * Reclaim resources for a setup queue. |
| 3903 | */ |
| 3904 | static void |
| 3905 | ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
| 3906 | { |
| 3907 | |
| 3908 | ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); |
| 3909 | sc->sc_txqsetup &= ~(1<<txq->axq_qnum); |
| 3910 | ATH_TXQ_LOCK_DESTROY(txq); |
| 3911 | } |
| 3912 | |
| 3913 | /* |
| 3914 | * Reclaim all tx queue resources. |
| 3915 | */ |
| 3916 | static void |
| 3917 | ath_tx_cleanup(struct ath_softc *sc) |
| 3918 | { |
| 3919 | int i; |
| 3920 | |
| 3921 | ATH_TXBUF_LOCK_DESTROY(sc); |
| 3922 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| 3923 | if (ATH_TXQ_SETUP(sc, i)) |
| 3924 | ath_tx_cleanupq(sc, &sc->sc_txq[i]); |
| 3925 | } |
| 3926 | |
| 3927 | /* |
| 3928 | * Return h/w rate index for an IEEE rate (w/o basic rate bit) |
| 3929 | * using the current rates in sc_rixmap. |
| 3930 | */ |
| 3931 | int |
| 3932 | ath_tx_findrix(const struct ath_softc *sc, uint8_t rate) |
| 3933 | { |
| 3934 | int rix = sc->sc_rixmap[rate]; |
| 3935 | /* NB: return lowest rix for invalid rate */ |
| 3936 | return (rix == 0xff ? 0 : rix); |
| 3937 | } |
| 3938 | |
| 3939 | static void |
| 3940 | ath_tx_update_stats(struct ath_softc *sc, struct ath_tx_status *ts, |
| 3941 | struct ath_buf *bf) |
| 3942 | { |
| 3943 | struct ieee80211_node *ni = bf->bf_node; |
| 3944 | struct ifnet *ifp = sc->sc_ifp; |
| 3945 | struct ieee80211com *ic = ifp->if_l2com; |
| 3946 | int sr, lr, pri; |
| 3947 | |
| 3948 | if (ts->ts_status == 0) { |
| 3949 | u_int8_t txant = ts->ts_antenna; |
| 3950 | sc->sc_stats.ast_ant_tx[txant]++; |
| 3951 | sc->sc_ant_tx[txant]++; |
| 3952 | if (ts->ts_finaltsi != 0) |
| 3953 | sc->sc_stats.ast_tx_altrate++; |
| 3954 | pri = M_WME_GETAC(bf->bf_m); |
| 3955 | if (pri >= WME_AC_VO) |
| 3956 | ic->ic_wme.wme_hipri_traffic++; |
| 3957 | if ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) |
| 3958 | ni->ni_inact = ni->ni_inact_reload; |
| 3959 | } else { |
| 3960 | if (ts->ts_status & HAL_TXERR_XRETRY) |
| 3961 | sc->sc_stats.ast_tx_xretries++; |
| 3962 | if (ts->ts_status & HAL_TXERR_FIFO) |
| 3963 | sc->sc_stats.ast_tx_fifoerr++; |
| 3964 | if (ts->ts_status & HAL_TXERR_FILT) |
| 3965 | sc->sc_stats.ast_tx_filtered++; |
| 3966 | if (ts->ts_status & HAL_TXERR_XTXOP) |
| 3967 | sc->sc_stats.ast_tx_xtxop++; |
| 3968 | if (ts->ts_status & HAL_TXERR_TIMER_EXPIRED) |
| 3969 | sc->sc_stats.ast_tx_timerexpired++; |
| 3970 | |
| 3971 | if (bf->bf_m->m_flags & M_FF) |
| 3972 | sc->sc_stats.ast_ff_txerr++; |
| 3973 | } |
| 3974 | /* XXX when is this valid? */ |
| 3975 | if (ts->ts_flags & HAL_TX_DESC_CFG_ERR) |
| 3976 | sc->sc_stats.ast_tx_desccfgerr++; |
| 3977 | /* |
| 3978 | * This can be valid for successful frame transmission! |
| 3979 | * If there's a TX FIFO underrun during aggregate transmission, |
| 3980 | * the MAC will pad the rest of the aggregate with delimiters. |
| 3981 | * If a BA is returned, the frame is marked as "OK" and it's up |
| 3982 | * to the TX completion code to notice which frames weren't |
| 3983 | * successfully transmitted. |
| 3984 | */ |
| 3985 | if (ts->ts_flags & HAL_TX_DATA_UNDERRUN) |
| 3986 | sc->sc_stats.ast_tx_data_underrun++; |
| 3987 | if (ts->ts_flags & HAL_TX_DELIM_UNDERRUN) |
| 3988 | sc->sc_stats.ast_tx_delim_underrun++; |
| 3989 | |
| 3990 | sr = ts->ts_shortretry; |
| 3991 | lr = ts->ts_longretry; |
| 3992 | sc->sc_stats.ast_tx_shortretry += sr; |
| 3993 | sc->sc_stats.ast_tx_longretry += lr; |
| 3994 | |
| 3995 | } |
| 3996 | |
| 3997 | /* |
| 3998 | * The default completion. If fail is 1, this means |
| 3999 | * "please don't retry the frame, and just return -1 status |
| 4000 | * to the net80211 stack. |
| 4001 | */ |
| 4002 | void |
| 4003 | ath_tx_default_comp(struct ath_softc *sc, struct ath_buf *bf, int fail) |
| 4004 | { |
| 4005 | struct ath_tx_status *ts = &bf->bf_status.ds_txstat; |
| 4006 | int st; |
| 4007 | |
| 4008 | if (fail == 1) |
| 4009 | st = -1; |
| 4010 | else |
| 4011 | st = ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) ? |
| 4012 | ts->ts_status : HAL_TXERR_XRETRY; |
| 4013 | |
| 4014 | #if 0 |
| 4015 | if (bf->bf_state.bfs_dobaw) |
| 4016 | device_printf(sc->sc_dev, |
| 4017 | "%s: bf %p: seqno %d: dobaw should've been cleared!\n", |
| 4018 | __func__, |
| 4019 | bf, |
| 4020 | SEQNO(bf->bf_state.bfs_seqno)); |
| 4021 | #endif |
| 4022 | if (bf->bf_next != NULL) |
| 4023 | device_printf(sc->sc_dev, |
| 4024 | "%s: bf %p: seqno %d: bf_next not NULL!\n", |
| 4025 | __func__, |
| 4026 | bf, |
| 4027 | SEQNO(bf->bf_state.bfs_seqno)); |
| 4028 | |
| 4029 | /* |
| 4030 | * Check if the node software queue is empty; if so |
| 4031 | * then clear the TIM. |
| 4032 | * |
| 4033 | * This needs to be done before the buffer is freed as |
| 4034 | * otherwise the node reference will have been released |
| 4035 | * and the node may not actually exist any longer. |
| 4036 | * |
| 4037 | * XXX I don't like this belonging here, but it's cleaner |
| 4038 | * to do it here right now then all the other places |
| 4039 | * where ath_tx_default_comp() is called. |
| 4040 | * |
| 4041 | * XXX TODO: during drain, ensure that the callback is |
| 4042 | * being called so we get a chance to update the TIM. |
| 4043 | */ |
| 4044 | if (bf->bf_node) { |
| 4045 | ATH_TX_LOCK(sc); |
| 4046 | ath_tx_update_tim(sc, bf->bf_node, 0); |
| 4047 | ATH_TX_UNLOCK(sc); |
| 4048 | } |
| 4049 | |
| 4050 | /* |
| 4051 | * Do any tx complete callback. Note this must |
| 4052 | * be done before releasing the node reference. |
| 4053 | * This will free the mbuf, release the net80211 |
| 4054 | * node and recycle the ath_buf. |
| 4055 | */ |
| 4056 | ath_tx_freebuf(sc, bf, st); |
| 4057 | } |
| 4058 | |
| 4059 | /* |
| 4060 | * Update rate control with the given completion status. |
| 4061 | */ |
| 4062 | void |
| 4063 | ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni, |
| 4064 | struct ath_rc_series *rc, struct ath_tx_status *ts, int frmlen, |
| 4065 | int nframes, int nbad) |
| 4066 | { |
| 4067 | struct ath_node *an; |
| 4068 | |
| 4069 | /* Only for unicast frames */ |
| 4070 | if (ni == NULL) |
| 4071 | return; |
| 4072 | |
| 4073 | an = ATH_NODE(ni); |
| 4074 | ATH_NODE_UNLOCK_ASSERT(an); |
| 4075 | |
| 4076 | if ((ts->ts_status & HAL_TXERR_FILT) == 0) { |
| 4077 | ATH_NODE_LOCK(an); |
| 4078 | ath_rate_tx_complete(sc, an, rc, ts, frmlen, nframes, nbad); |
| 4079 | ATH_NODE_UNLOCK(an); |
| 4080 | } |
| 4081 | } |
| 4082 | |
| 4083 | /* |
| 4084 | * Process the completion of the given buffer. |
| 4085 | * |
| 4086 | * This calls the rate control update and then the buffer completion. |
| 4087 | * This will either free the buffer or requeue it. In any case, the |
| 4088 | * bf pointer should be treated as invalid after this function is called. |
| 4089 | */ |
| 4090 | void |
| 4091 | ath_tx_process_buf_completion(struct ath_softc *sc, struct ath_txq *txq, |
| 4092 | struct ath_tx_status *ts, struct ath_buf *bf) |
| 4093 | { |
| 4094 | struct ieee80211_node *ni = bf->bf_node; |
| 4095 | struct ath_node *an = NULL; |
| 4096 | |
| 4097 | ATH_TX_UNLOCK_ASSERT(sc); |
| 4098 | ATH_TXQ_UNLOCK_ASSERT(txq); |
| 4099 | |
| 4100 | /* If unicast frame, update general statistics */ |
| 4101 | if (ni != NULL) { |
| 4102 | an = ATH_NODE(ni); |
| 4103 | /* update statistics */ |
| 4104 | ath_tx_update_stats(sc, ts, bf); |
| 4105 | } |
| 4106 | |
| 4107 | /* |
| 4108 | * Call the completion handler. |
| 4109 | * The completion handler is responsible for |
| 4110 | * calling the rate control code. |
| 4111 | * |
| 4112 | * Frames with no completion handler get the |
| 4113 | * rate control code called here. |
| 4114 | */ |
| 4115 | if (bf->bf_comp == NULL) { |
| 4116 | if ((ts->ts_status & HAL_TXERR_FILT) == 0 && |
| 4117 | (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0) { |
| 4118 | /* |
| 4119 | * XXX assume this isn't an aggregate |
| 4120 | * frame. |
| 4121 | */ |
| 4122 | ath_tx_update_ratectrl(sc, ni, |
| 4123 | bf->bf_state.bfs_rc, ts, |
| 4124 | bf->bf_state.bfs_pktlen, 1, |
| 4125 | (ts->ts_status == 0 ? 0 : 1)); |
| 4126 | } |
| 4127 | ath_tx_default_comp(sc, bf, 0); |
| 4128 | } else |
| 4129 | bf->bf_comp(sc, bf, 0); |
| 4130 | } |
| 4131 | |
| 4132 | |
| 4133 | |
| 4134 | /* |
| 4135 | * Process completed xmit descriptors from the specified queue. |
| 4136 | * Kick the packet scheduler if needed. This can occur from this |
| 4137 | * particular task. |
| 4138 | */ |
| 4139 | static int |
| 4140 | ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched) |
| 4141 | { |
| 4142 | struct ath_hal *ah = sc->sc_ah; |
| 4143 | struct ath_buf *bf; |
| 4144 | struct ath_desc *ds; |
| 4145 | struct ath_tx_status *ts; |
| 4146 | struct ieee80211_node *ni; |
| 4147 | #ifdef IEEE80211_SUPPORT_SUPERG |
| 4148 | struct ieee80211com *ic = sc->sc_ifp->if_l2com; |
| 4149 | #endif /* IEEE80211_SUPPORT_SUPERG */ |
| 4150 | int nacked; |
| 4151 | HAL_STATUS status; |
| 4152 | |
| 4153 | DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", |
| 4154 | __func__, txq->axq_qnum, |
| 4155 | (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), |
| 4156 | txq->axq_link); |
| 4157 | |
| 4158 | ATH_KTR(sc, ATH_KTR_TXCOMP, 4, |
| 4159 | "ath_tx_processq: txq=%u head %p link %p depth %p", |
| 4160 | txq->axq_qnum, |
| 4161 | (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), |
| 4162 | txq->axq_link, |
| 4163 | txq->axq_depth); |
| 4164 | |
| 4165 | nacked = 0; |
| 4166 | for (;;) { |
| 4167 | ATH_TXQ_LOCK(txq); |
| 4168 | txq->axq_intrcnt = 0; /* reset periodic desc intr count */ |
| 4169 | bf = TAILQ_FIRST(&txq->axq_q); |
| 4170 | if (bf == NULL) { |
| 4171 | ATH_TXQ_UNLOCK(txq); |
| 4172 | break; |
| 4173 | } |
| 4174 | ds = bf->bf_lastds; /* XXX must be setup correctly! */ |
| 4175 | ts = &bf->bf_status.ds_txstat; |
| 4176 | |
| 4177 | status = ath_hal_txprocdesc(ah, ds, ts); |
| 4178 | #ifdef ATH_DEBUG |
| 4179 | if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) |
| 4180 | ath_printtxbuf(sc, bf, txq->axq_qnum, 0, |
| 4181 | status == HAL_OK); |
| 4182 | else if ((sc->sc_debug & ATH_DEBUG_RESET) && (dosched == 0)) |
| 4183 | ath_printtxbuf(sc, bf, txq->axq_qnum, 0, |
| 4184 | status == HAL_OK); |
| 4185 | #endif |
| 4186 | #ifdef ATH_DEBUG_ALQ |
| 4187 | if (if_ath_alq_checkdebug(&sc->sc_alq, |
| 4188 | ATH_ALQ_EDMA_TXSTATUS)) { |
| 4189 | if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS, |
| 4190 | sc->sc_tx_statuslen, |
| 4191 | (char *) ds); |
| 4192 | } |
| 4193 | #endif |
| 4194 | |
| 4195 | if (status == HAL_EINPROGRESS) { |
| 4196 | ATH_KTR(sc, ATH_KTR_TXCOMP, 3, |
| 4197 | "ath_tx_processq: txq=%u, bf=%p ds=%p, HAL_EINPROGRESS", |
| 4198 | txq->axq_qnum, bf, ds); |
| 4199 | ATH_TXQ_UNLOCK(txq); |
| 4200 | break; |
| 4201 | } |
| 4202 | ATH_TXQ_REMOVE(txq, bf, bf_list); |
| 4203 | |
| 4204 | /* |
| 4205 | * Sanity check. |
| 4206 | */ |
| 4207 | if (txq->axq_qnum != bf->bf_state.bfs_tx_queue) { |
| 4208 | device_printf(sc->sc_dev, |
| 4209 | "%s: TXQ=%d: bf=%p, bfs_tx_queue=%d\n", |
| 4210 | __func__, |
| 4211 | txq->axq_qnum, |
| 4212 | bf, |
| 4213 | bf->bf_state.bfs_tx_queue); |
| 4214 | } |
| 4215 | if (txq->axq_qnum != bf->bf_last->bf_state.bfs_tx_queue) { |
| 4216 | device_printf(sc->sc_dev, |
| 4217 | "%s: TXQ=%d: bf_last=%p, bfs_tx_queue=%d\n", |
| 4218 | __func__, |
| 4219 | txq->axq_qnum, |
| 4220 | bf->bf_last, |
| 4221 | bf->bf_last->bf_state.bfs_tx_queue); |
| 4222 | } |
| 4223 | |
| 4224 | #if 0 |
| 4225 | if (txq->axq_depth > 0) { |
| 4226 | /* |
| 4227 | * More frames follow. Mark the buffer busy |
| 4228 | * so it's not re-used while the hardware may |
| 4229 | * still re-read the link field in the descriptor. |
| 4230 | * |
| 4231 | * Use the last buffer in an aggregate as that |
| 4232 | * is where the hardware may be - intermediate |
| 4233 | * descriptors won't be "busy". |
| 4234 | */ |
| 4235 | bf->bf_last->bf_flags |= ATH_BUF_BUSY; |
| 4236 | } else |
| 4237 | txq->axq_link = NULL; |
| 4238 | #else |
| 4239 | bf->bf_last->bf_flags |= ATH_BUF_BUSY; |
| 4240 | #endif |
| 4241 | if (bf->bf_state.bfs_aggr) |
| 4242 | txq->axq_aggr_depth--; |
| 4243 | |
| 4244 | ni = bf->bf_node; |
| 4245 | |
| 4246 | ATH_KTR(sc, ATH_KTR_TXCOMP, 5, |
| 4247 | "ath_tx_processq: txq=%u, bf=%p, ds=%p, ni=%p, ts_status=0x%08x", |
| 4248 | txq->axq_qnum, bf, ds, ni, ts->ts_status); |
| 4249 | /* |
| 4250 | * If unicast frame was ack'd update RSSI, |
| 4251 | * including the last rx time used to |
| 4252 | * workaround phantom bmiss interrupts. |
| 4253 | */ |
| 4254 | if (ni != NULL && ts->ts_status == 0 && |
| 4255 | ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) { |
| 4256 | nacked++; |
| 4257 | sc->sc_stats.ast_tx_rssi = ts->ts_rssi; |
| 4258 | ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, |
| 4259 | ts->ts_rssi); |
| 4260 | } |
| 4261 | ATH_TXQ_UNLOCK(txq); |
| 4262 | |
| 4263 | /* |
| 4264 | * Update statistics and call completion |
| 4265 | */ |
| 4266 | ath_tx_process_buf_completion(sc, txq, ts, bf); |
| 4267 | |
| 4268 | /* XXX at this point, bf and ni may be totally invalid */ |
| 4269 | } |
| 4270 | #ifdef IEEE80211_SUPPORT_SUPERG |
| 4271 | /* |
| 4272 | * Flush fast-frame staging queue when traffic slows. |
| 4273 | */ |
| 4274 | if (txq->axq_depth <= 1) |
| 4275 | ieee80211_ff_flush(ic, txq->axq_ac); |
| 4276 | #endif |
| 4277 | |
| 4278 | /* Kick the software TXQ scheduler */ |
| 4279 | if (dosched) { |
| 4280 | ATH_TX_LOCK(sc); |
| 4281 | ath_txq_sched(sc, txq); |
| 4282 | ATH_TX_UNLOCK(sc); |
| 4283 | } |
| 4284 | |
| 4285 | ATH_KTR(sc, ATH_KTR_TXCOMP, 1, |
| 4286 | "ath_tx_processq: txq=%u: done", |
| 4287 | txq->axq_qnum); |
| 4288 | |
| 4289 | return nacked; |
| 4290 | } |
| 4291 | |
| 4292 | #define TXQACTIVE(t, q) ( (t) & (1 << (q))) |
| 4293 | |
| 4294 | /* |
| 4295 | * Deferred processing of transmit interrupt; special-cased |
| 4296 | * for a single hardware transmit queue (e.g. 5210 and 5211). |
| 4297 | */ |
| 4298 | static void |
| 4299 | ath_tx_proc_q0(void *arg, int npending) |
| 4300 | { |
| 4301 | struct ath_softc *sc = arg; |
| 4302 | struct ifnet *ifp = sc->sc_ifp; |
| 4303 | uint32_t txqs; |
| 4304 | |
| 4305 | wlan_serialize_enter(); |
| 4306 | ATH_PCU_LOCK(sc); |
| 4307 | sc->sc_txproc_cnt++; |
| 4308 | txqs = sc->sc_txq_active; |
| 4309 | sc->sc_txq_active &= ~txqs; |
| 4310 | ATH_PCU_UNLOCK(sc); |
| 4311 | |
| 4312 | ATH_KTR(sc, ATH_KTR_TXCOMP, 1, |
| 4313 | "ath_tx_proc_q0: txqs=0x%08x", txqs); |
| 4314 | |
| 4315 | if (TXQACTIVE(txqs, 0) && ath_tx_processq(sc, &sc->sc_txq[0], 1)) |
| 4316 | /* XXX why is lastrx updated in tx code? */ |
| 4317 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
| 4318 | if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) |
| 4319 | ath_tx_processq(sc, sc->sc_cabq, 1); |
| 4320 | IF_LOCK(&ifp->if_snd); |
| 4321 | ifq_clr_oactive(&ifp->if_snd); |
| 4322 | IF_UNLOCK(&ifp->if_snd); |
| 4323 | sc->sc_wd_timer = 0; |
| 4324 | |
| 4325 | if (sc->sc_softled) |
| 4326 | ath_led_event(sc, sc->sc_txrix); |
| 4327 | |
| 4328 | ATH_PCU_LOCK(sc); |
| 4329 | sc->sc_txproc_cnt--; |
| 4330 | ATH_PCU_UNLOCK(sc); |
| 4331 | |
| 4332 | ath_tx_kick(sc); |
| 4333 | wlan_serialize_exit(); |
| 4334 | } |
| 4335 | |
| 4336 | /* |
| 4337 | * Deferred processing of transmit interrupt; special-cased |
| 4338 | * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). |
| 4339 | */ |
| 4340 | static void |
| 4341 | ath_tx_proc_q0123(void *arg, int npending) |
| 4342 | { |
| 4343 | struct ath_softc *sc = arg; |
| 4344 | struct ifnet *ifp = sc->sc_ifp; |
| 4345 | int nacked; |
| 4346 | uint32_t txqs; |
| 4347 | |
| 4348 | wlan_serialize_enter(); |
| 4349 | ATH_PCU_LOCK(sc); |
| 4350 | sc->sc_txproc_cnt++; |
| 4351 | txqs = sc->sc_txq_active; |
| 4352 | sc->sc_txq_active &= ~txqs; |
| 4353 | ATH_PCU_UNLOCK(sc); |
| 4354 | |
| 4355 | ATH_KTR(sc, ATH_KTR_TXCOMP, 1, |
| 4356 | "ath_tx_proc_q0123: txqs=0x%08x", txqs); |
| 4357 | |
| 4358 | /* |
| 4359 | * Process each active queue. |
| 4360 | */ |
| 4361 | nacked = 0; |
| 4362 | if (TXQACTIVE(txqs, 0)) |
| 4363 | nacked += ath_tx_processq(sc, &sc->sc_txq[0], 1); |
| 4364 | if (TXQACTIVE(txqs, 1)) |
| 4365 | nacked += ath_tx_processq(sc, &sc->sc_txq[1], 1); |
| 4366 | if (TXQACTIVE(txqs, 2)) |
| 4367 | nacked += ath_tx_processq(sc, &sc->sc_txq[2], 1); |
| 4368 | if (TXQACTIVE(txqs, 3)) |
| 4369 | nacked += ath_tx_processq(sc, &sc->sc_txq[3], 1); |
| 4370 | if (TXQACTIVE(txqs, sc->sc_cabq->axq_qnum)) |
| 4371 | ath_tx_processq(sc, sc->sc_cabq, 1); |
| 4372 | if (nacked) |
| 4373 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
| 4374 | |
| 4375 | IF_LOCK(&ifp->if_snd); |
| 4376 | ifq_clr_oactive(&ifp->if_snd); |
| 4377 | IF_UNLOCK(&ifp->if_snd); |
| 4378 | sc->sc_wd_timer = 0; |
| 4379 | |
| 4380 | if (sc->sc_softled) |
| 4381 | ath_led_event(sc, sc->sc_txrix); |
| 4382 | |
| 4383 | ATH_PCU_LOCK(sc); |
| 4384 | sc->sc_txproc_cnt--; |
| 4385 | ATH_PCU_UNLOCK(sc); |
| 4386 | |
| 4387 | ath_tx_kick(sc); |
| 4388 | wlan_serialize_exit(); |
| 4389 | } |
| 4390 | |
| 4391 | /* |
| 4392 | * Deferred processing of transmit interrupt. |
| 4393 | */ |
| 4394 | static void |
| 4395 | ath_tx_proc(void *arg, int npending) |
| 4396 | { |
| 4397 | struct ath_softc *sc = arg; |
| 4398 | struct ifnet *ifp = sc->sc_ifp; |
| 4399 | int i, nacked; |
| 4400 | uint32_t txqs; |
| 4401 | |
| 4402 | wlan_serialize_enter(); |
| 4403 | ATH_PCU_LOCK(sc); |
| 4404 | sc->sc_txproc_cnt++; |
| 4405 | txqs = sc->sc_txq_active; |
| 4406 | sc->sc_txq_active &= ~txqs; |
| 4407 | ATH_PCU_UNLOCK(sc); |
| 4408 | |
| 4409 | ATH_KTR(sc, ATH_KTR_TXCOMP, 1, "ath_tx_proc: txqs=0x%08x", txqs); |
| 4410 | |
| 4411 | /* |
| 4412 | * Process each active queue. |
| 4413 | */ |
| 4414 | nacked = 0; |
| 4415 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) |
| 4416 | if (ATH_TXQ_SETUP(sc, i) && TXQACTIVE(txqs, i)) |
| 4417 | nacked += ath_tx_processq(sc, &sc->sc_txq[i], 1); |
| 4418 | if (nacked) |
| 4419 | sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); |
| 4420 | |
| 4421 | /* XXX check this inside of IF_LOCK? */ |
| 4422 | IF_LOCK(&ifp->if_snd); |
| 4423 | ifq_clr_oactive(&ifp->if_snd); |
| 4424 | IF_UNLOCK(&ifp->if_snd); |
| 4425 | sc->sc_wd_timer = 0; |
| 4426 | |
| 4427 | if (sc->sc_softled) |
| 4428 | ath_led_event(sc, sc->sc_txrix); |
| 4429 | |
| 4430 | ATH_PCU_LOCK(sc); |
| 4431 | sc->sc_txproc_cnt--; |
| 4432 | ATH_PCU_UNLOCK(sc); |
| 4433 | |
| 4434 | ath_tx_kick(sc); |
| 4435 | wlan_serialize_exit(); |
| 4436 | } |
| 4437 | #undef TXQACTIVE |
| 4438 | |
| 4439 | /* |
| 4440 | * Deferred processing of TXQ rescheduling. |
| 4441 | */ |
| 4442 | static void |
| 4443 | ath_txq_sched_tasklet(void *arg, int npending) |
| 4444 | { |
| 4445 | struct ath_softc *sc = arg; |
| 4446 | int i; |
| 4447 | |
| 4448 | /* XXX is skipping ok? */ |
| 4449 | ATH_PCU_LOCK(sc); |
| 4450 | #if 0 |
| 4451 | if (sc->sc_inreset_cnt > 0) { |
| 4452 | device_printf(sc->sc_dev, |
| 4453 | "%s: sc_inreset_cnt > 0; skipping\n", __func__); |
| 4454 | ATH_PCU_UNLOCK(sc); |
| 4455 | return; |
| 4456 | } |
| 4457 | #endif |
| 4458 | sc->sc_txproc_cnt++; |
| 4459 | ATH_PCU_UNLOCK(sc); |
| 4460 | |
| 4461 | ATH_TX_LOCK(sc); |
| 4462 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { |
| 4463 | if (ATH_TXQ_SETUP(sc, i)) { |
| 4464 | ath_txq_sched(sc, &sc->sc_txq[i]); |
| 4465 | } |
| 4466 | } |
| 4467 | ATH_TX_UNLOCK(sc); |
| 4468 | |
| 4469 | ATH_PCU_LOCK(sc); |
| 4470 | sc->sc_txproc_cnt--; |
| 4471 | ATH_PCU_UNLOCK(sc); |
| 4472 | } |
| 4473 | |
| 4474 | void |
| 4475 | ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf) |
| 4476 | { |
| 4477 | |
| 4478 | ATH_TXBUF_LOCK_ASSERT(sc); |
| 4479 | |
| 4480 | if (bf->bf_flags & ATH_BUF_MGMT) |
| 4481 | TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list); |
| 4482 | else { |
| 4483 | TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); |
| 4484 | sc->sc_txbuf_cnt++; |
| 4485 | if (sc->sc_txbuf_cnt > ath_txbuf) { |
| 4486 | device_printf(sc->sc_dev, |
| 4487 | "%s: sc_txbuf_cnt > %d?\n", |
| 4488 | __func__, |
| 4489 | ath_txbuf); |
| 4490 | sc->sc_txbuf_cnt = ath_txbuf; |
| 4491 | } |
| 4492 | } |
| 4493 | } |
| 4494 | |
| 4495 | void |
| 4496 | ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf) |
| 4497 | { |
| 4498 | |
| 4499 | ATH_TXBUF_LOCK_ASSERT(sc); |
| 4500 | |
| 4501 | if (bf->bf_flags & ATH_BUF_MGMT) |
| 4502 | TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list); |
| 4503 | else { |
| 4504 | TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list); |
| 4505 | sc->sc_txbuf_cnt++; |
| 4506 | if (sc->sc_txbuf_cnt > ATH_TXBUF) { |
| 4507 | device_printf(sc->sc_dev, |
| 4508 | "%s: sc_txbuf_cnt > %d?\n", |
| 4509 | __func__, |
| 4510 | ATH_TXBUF); |
| 4511 | sc->sc_txbuf_cnt = ATH_TXBUF; |
| 4512 | } |
| 4513 | } |
| 4514 | } |
| 4515 | |
| 4516 | /* |
| 4517 | * Free the holding buffer if it exists |
| 4518 | */ |
| 4519 | void |
| 4520 | ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq) |
| 4521 | { |
| 4522 | ATH_TXBUF_UNLOCK_ASSERT(sc); |
| 4523 | ATH_TXQ_LOCK_ASSERT(txq); |
| 4524 | |
| 4525 | if (txq->axq_holdingbf == NULL) |
| 4526 | return; |
| 4527 | |
| 4528 | txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY; |
| 4529 | |
| 4530 | ATH_TXBUF_LOCK(sc); |
| 4531 | ath_returnbuf_tail(sc, txq->axq_holdingbf); |
| 4532 | ATH_TXBUF_UNLOCK(sc); |
| 4533 | |
| 4534 | txq->axq_holdingbf = NULL; |
| 4535 | } |
| 4536 | |
| 4537 | /* |
| 4538 | * Add this buffer to the holding queue, freeing the previous |
| 4539 | * one if it exists. |
| 4540 | */ |
| 4541 | static void |
| 4542 | ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf) |
| 4543 | { |
| 4544 | struct ath_txq *txq; |
| 4545 | |
| 4546 | txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; |
| 4547 | |
| 4548 | ATH_TXBUF_UNLOCK_ASSERT(sc); |
| 4549 | ATH_TXQ_LOCK_ASSERT(txq); |
| 4550 | |
| 4551 | /* XXX assert ATH_BUF_BUSY is set */ |
| 4552 | |
| 4553 | /* XXX assert the tx queue is under the max number */ |
| 4554 | if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) { |
| 4555 | device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n", |
| 4556 | __func__, |
| 4557 | bf, |
| 4558 | bf->bf_state.bfs_tx_queue); |
| 4559 | bf->bf_flags &= ~ATH_BUF_BUSY; |
| 4560 | ath_returnbuf_tail(sc, bf); |
| 4561 | return; |
| 4562 | } |
| 4563 | ath_txq_freeholdingbuf(sc, txq); |
| 4564 | txq->axq_holdingbf = bf; |
| 4565 | } |
| 4566 | |
| 4567 | /* |
| 4568 | * Return a buffer to the pool and update the 'busy' flag on the |
| 4569 | * previous 'tail' entry. |
| 4570 | * |
| 4571 | * This _must_ only be called when the buffer is involved in a completed |
| 4572 | * TX. The logic is that if it was part of an active TX, the previous |
| 4573 | * buffer on the list is now not involved in a halted TX DMA queue, waiting |
| 4574 | * for restart (eg for TDMA.) |
| 4575 | * |
| 4576 | * The caller must free the mbuf and recycle the node reference. |
| 4577 | * |
| 4578 | * XXX This method of handling busy / holding buffers is insanely stupid. |
| 4579 | * It requires bf_state.bfs_tx_queue to be correctly assigned. It would |
| 4580 | * be much nicer if buffers in the processq() methods would instead be |
| 4581 | * always completed there (pushed onto a txq or ath_bufhead) so we knew |
| 4582 | * exactly what hardware queue they came from in the first place. |
| 4583 | */ |
| 4584 | void |
| 4585 | ath_freebuf(struct ath_softc *sc, struct ath_buf *bf) |
| 4586 | { |
| 4587 | struct ath_txq *txq; |
| 4588 | |
| 4589 | txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue]; |
| 4590 | |
| 4591 | KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__)); |
| 4592 | KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__)); |
| 4593 | |
| 4594 | /* |
| 4595 | * If this buffer is busy, push it onto the holding queue. |
| 4596 | */ |
| 4597 | if (bf->bf_flags & ATH_BUF_BUSY) { |
| 4598 | ATH_TXQ_LOCK(txq); |
| 4599 | ath_txq_addholdingbuf(sc, bf); |
| 4600 | ATH_TXQ_UNLOCK(txq); |
| 4601 | return; |
| 4602 | } |
| 4603 | |
| 4604 | /* |
| 4605 | * Not a busy buffer, so free normally |
| 4606 | */ |
| 4607 | ATH_TXBUF_LOCK(sc); |
| 4608 | ath_returnbuf_tail(sc, bf); |
| 4609 | ATH_TXBUF_UNLOCK(sc); |
| 4610 | } |
| 4611 | |
| 4612 | /* |
| 4613 | * This is currently used by ath_tx_draintxq() and |
| 4614 | * ath_tx_tid_free_pkts(). |
| 4615 | * |
| 4616 | * It recycles a single ath_buf. |
| 4617 | */ |
| 4618 | void |
| 4619 | ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status) |
| 4620 | { |
| 4621 | struct ieee80211_node *ni = bf->bf_node; |
| 4622 | struct mbuf *m0 = bf->bf_m; |
| 4623 | |
| 4624 | /* |
| 4625 | * Make sure that we only sync/unload if there's an mbuf. |
| 4626 | * If not (eg we cloned a buffer), the unload will have already |
| 4627 | * occured. |
| 4628 | */ |
| 4629 | if (bf->bf_m != NULL) { |
| 4630 | bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, |
| 4631 | BUS_DMASYNC_POSTWRITE); |
| 4632 | bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); |
| 4633 | } |
| 4634 | |
| 4635 | bf->bf_node = NULL; |
| 4636 | bf->bf_m = NULL; |
| 4637 | |
| 4638 | /* Free the buffer, it's not needed any longer */ |
| 4639 | ath_freebuf(sc, bf); |
| 4640 | |
| 4641 | /* Pass the buffer back to net80211 - completing it */ |
| 4642 | ieee80211_tx_complete(ni, m0, status); |
| 4643 | } |
| 4644 | |
| 4645 | static struct ath_buf * |
| 4646 | ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq) |
| 4647 | { |
| 4648 | struct ath_buf *bf; |
| 4649 | |
| 4650 | ATH_TXQ_LOCK_ASSERT(txq); |
| 4651 | |
| 4652 | /* |
| 4653 | * Drain the FIFO queue first, then if it's |
| 4654 | * empty, move to the normal frame queue. |
| 4655 | */ |
| 4656 | bf = TAILQ_FIRST(&txq->fifo.axq_q); |
| 4657 | if (bf != NULL) { |
| 4658 | /* |
| 4659 | * Is it the last buffer in this set? |
| 4660 | * Decrement the FIFO counter. |
| 4661 | */ |
| 4662 | if (bf->bf_flags & ATH_BUF_FIFOEND) { |
| 4663 | if (txq->axq_fifo_depth == 0) { |
| 4664 | device_printf(sc->sc_dev, |
| 4665 | "%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n", |
| 4666 | __func__, |
| 4667 | txq->axq_qnum, |
| 4668 | txq->fifo.axq_depth); |
| 4669 | } else |
| 4670 | txq->axq_fifo_depth--; |
| 4671 | } |
| 4672 | ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list); |
| 4673 | return (bf); |
| 4674 | } |
| 4675 | |
| 4676 | /* |
| 4677 | * Debugging! |
| 4678 | */ |
| 4679 | if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) { |
| 4680 | device_printf(sc->sc_dev, |
| 4681 | "%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n", |
| 4682 | __func__, |
| 4683 | txq->axq_qnum, |
| 4684 | txq->axq_fifo_depth, |
| 4685 | txq->fifo.axq_depth); |
| 4686 | } |
| 4687 | |
| 4688 | /* |
| 4689 | * Now drain the pending queue. |
| 4690 | */ |
| 4691 | bf = TAILQ_FIRST(&txq->axq_q); |
| 4692 | if (bf == NULL) { |
| 4693 | txq->axq_link = NULL; |
| 4694 | return (NULL); |
| 4695 | } |
| 4696 | ATH_TXQ_REMOVE(txq, bf, bf_list); |
| 4697 | return (bf); |
| 4698 | } |
| 4699 | |
| 4700 | void |
| 4701 | ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) |
| 4702 | { |
| 4703 | #ifdef ATH_DEBUG |
| 4704 | struct ath_hal *ah = sc->sc_ah; |
| 4705 | #endif |
| 4706 | struct ath_buf *bf; |
| 4707 | u_int ix; |
| 4708 | |
| 4709 | /* |
| 4710 | * NB: this assumes output has been stopped and |
| 4711 | * we do not need to block ath_tx_proc |
| 4712 | */ |
| 4713 | for (ix = 0;; ix++) { |
| 4714 | ATH_TXQ_LOCK(txq); |
| 4715 | bf = ath_tx_draintxq_get_one(sc, txq); |
| 4716 | if (bf == NULL) { |
| 4717 | ATH_TXQ_UNLOCK(txq); |
| 4718 | break; |
| 4719 | } |
| 4720 | if (bf->bf_state.bfs_aggr) |
| 4721 | txq->axq_aggr_depth--; |
| 4722 | #ifdef ATH_DEBUG |
| 4723 | if (sc->sc_debug & ATH_DEBUG_RESET) { |
| 4724 | struct ieee80211com *ic = sc->sc_ifp->if_l2com; |
| 4725 | int status = 0; |
| 4726 | |
| 4727 | /* |
| 4728 | * EDMA operation has a TX completion FIFO |
| 4729 | * separate from the TX descriptor, so this |
| 4730 | * method of checking the "completion" status |
| 4731 | * is wrong. |
| 4732 | */ |
| 4733 | if (! sc->sc_isedma) { |
| 4734 | status = (ath_hal_txprocdesc(ah, |
| 4735 | bf->bf_lastds, |
| 4736 | &bf->bf_status.ds_txstat) == HAL_OK); |
| 4737 | } |
| 4738 | ath_printtxbuf(sc, bf, txq->axq_qnum, ix, status); |
| 4739 | ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *), |
| 4740 | bf->bf_m->m_len, 0, -1); |
| 4741 | } |
| 4742 | #endif /* ATH_DEBUG */ |
| 4743 | /* |
| 4744 | * Since we're now doing magic in the completion |
| 4745 | * functions, we -must- call it for aggregation |
| 4746 | * destinations or BAW tracking will get upset. |
| 4747 | */ |
| 4748 | /* |
| 4749 | * Clear ATH_BUF_BUSY; the completion handler |
| 4750 | * will free the buffer. |
| 4751 | */ |
| 4752 | ATH_TXQ_UNLOCK(txq); |
| 4753 | bf->bf_flags &= ~ATH_BUF_BUSY; |
| 4754 | if (bf->bf_comp) |
| 4755 | bf->bf_comp(sc, bf, 1); |
| 4756 | else |
| 4757 | ath_tx_default_comp(sc, bf, 1); |
| 4758 | } |
| 4759 | |
| 4760 | /* |
| 4761 | * Free the holding buffer if it exists |
| 4762 | */ |
| 4763 | ATH_TXQ_LOCK(txq); |
| 4764 | ath_txq_freeholdingbuf(sc, txq); |
| 4765 | ATH_TXQ_UNLOCK(txq); |
| 4766 | |
| 4767 | /* |
| 4768 | * Drain software queued frames which are on |
| 4769 | * active TIDs. |
| 4770 | */ |
| 4771 | ath_tx_txq_drain(sc, txq); |
| 4772 | } |
| 4773 | |
| 4774 | static void |
| 4775 | ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) |
| 4776 | { |
| 4777 | struct ath_hal *ah = sc->sc_ah; |
| 4778 | |
| 4779 | ATH_TXQ_LOCK_ASSERT(txq); |
| 4780 | |
| 4781 | DPRINTF(sc, ATH_DEBUG_RESET, |
| 4782 | "%s: tx queue [%u] %p, active=%d, hwpending=%d, flags 0x%08x, " |
| 4783 | "link %p, holdingbf=%p\n", |
| 4784 | __func__, |
| 4785 | txq->axq_qnum, |
| 4786 | (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), |
| 4787 | (int) (!! ath_hal_txqenabled(ah, txq->axq_qnum)), |
| 4788 | (int) ath_hal_numtxpending(ah, txq->axq_qnum), |
| 4789 | txq->axq_flags, |
| 4790 | txq->axq_link, |
| 4791 | txq->axq_holdingbf); |
| 4792 | |
| 4793 | (void) ath_hal_stoptxdma(ah, txq->axq_qnum); |
| 4794 | /* We've stopped TX DMA, so mark this as stopped. */ |
| 4795 | txq->axq_flags &= ~ATH_TXQ_PUTRUNNING; |
| 4796 | |
| 4797 | #ifdef ATH_DEBUG |
| 4798 | if ((sc->sc_debug & ATH_DEBUG_RESET) |
| 4799 | && (txq->axq_holdingbf != NULL)) { |
| 4800 | ath_printtxbuf(sc, txq->axq_holdingbf, txq->axq_qnum, 0, 0); |
| 4801 | } |
| 4802 | #endif |
| 4803 | } |
| 4804 | |
| 4805 | int |
| 4806 | ath_stoptxdma(struct ath_softc *sc) |
| 4807 | { |
| 4808 | struct ath_hal *ah = sc->sc_ah; |
| 4809 | int i; |
| 4810 | |
| 4811 | /* XXX return value */ |
| 4812 | if (sc->sc_invalid) |
| 4813 | return 0; |
| 4814 | |
| 4815 | if (!sc->sc_invalid) { |
| 4816 | /* don't touch the hardware if marked invalid */ |
| 4817 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", |
| 4818 | __func__, sc->sc_bhalq, |
| 4819 | (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), |
| 4820 | NULL); |
| 4821 | |
| 4822 | /* stop the beacon queue */ |
| 4823 | (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); |
| 4824 | |
| 4825 | /* Stop the data queues */ |
| 4826 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { |
| 4827 | if (ATH_TXQ_SETUP(sc, i)) { |
| 4828 | ATH_TXQ_LOCK(&sc->sc_txq[i]); |
| 4829 | ath_tx_stopdma(sc, &sc->sc_txq[i]); |
| 4830 | ATH_TXQ_UNLOCK(&sc->sc_txq[i]); |
| 4831 | } |
| 4832 | } |
| 4833 | } |
| 4834 | |
| 4835 | return 1; |
| 4836 | } |
| 4837 | |
| 4838 | #ifdef ATH_DEBUG |
| 4839 | void |
| 4840 | ath_tx_dump(struct ath_softc *sc, struct ath_txq *txq) |
| 4841 | { |
| 4842 | struct ath_hal *ah = sc->sc_ah; |
| 4843 | struct ath_buf *bf; |
| 4844 | int i = 0; |
| 4845 | |
| 4846 | if (! (sc->sc_debug & ATH_DEBUG_RESET)) |
| 4847 | return; |
| 4848 | |
| 4849 | device_printf(sc->sc_dev, "%s: Q%d: begin\n", |
| 4850 | __func__, txq->axq_qnum); |
| 4851 | TAILQ_FOREACH(bf, &txq->axq_q, bf_list) { |
| 4852 | ath_printtxbuf(sc, bf, txq->axq_qnum, i, |
| 4853 | ath_hal_txprocdesc(ah, bf->bf_lastds, |
| 4854 | &bf->bf_status.ds_txstat) == HAL_OK); |
| 4855 | i++; |
| 4856 | } |
| 4857 | device_printf(sc->sc_dev, "%s: Q%d: end\n", |
| 4858 | __func__, txq->axq_qnum); |
| 4859 | } |
| 4860 | #endif /* ATH_DEBUG */ |
| 4861 | |
| 4862 | /* |
| 4863 | * Drain the transmit queues and reclaim resources. |
| 4864 | */ |
| 4865 | void |
| 4866 | ath_legacy_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type) |
| 4867 | { |
| 4868 | struct ath_hal *ah = sc->sc_ah; |
| 4869 | struct ifnet *ifp = sc->sc_ifp; |
| 4870 | int i; |
| 4871 | struct ath_buf *bf_last; |
| 4872 | |
| 4873 | (void) ath_stoptxdma(sc); |
| 4874 | |
| 4875 | /* |
| 4876 | * Dump the queue contents |
| 4877 | */ |
| 4878 | for (i = 0; i < HAL_NUM_TX_QUEUES; i++) { |
| 4879 | /* |
| 4880 | * XXX TODO: should we just handle the completed TX frames |
| 4881 | * here, whether or not the reset is a full one or not? |
| 4882 | */ |
| 4883 | if (ATH_TXQ_SETUP(sc, i)) { |
| 4884 | #ifdef ATH_DEBUG |
| 4885 | if (sc->sc_debug & ATH_DEBUG_RESET) |
| 4886 | ath_tx_dump(sc, &sc->sc_txq[i]); |
| 4887 | #endif /* ATH_DEBUG */ |
| 4888 | if (reset_type == ATH_RESET_NOLOSS) { |
| 4889 | ath_tx_processq(sc, &sc->sc_txq[i], 0); |
| 4890 | ATH_TXQ_LOCK(&sc->sc_txq[i]); |
| 4891 | /* |
| 4892 | * Free the holding buffer; DMA is now |
| 4893 | * stopped. |
| 4894 | */ |
| 4895 | ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]); |
| 4896 | /* |
| 4897 | * Setup the link pointer to be the |
| 4898 | * _last_ buffer/descriptor in the list. |
| 4899 | * If there's nothing in the list, set it |
| 4900 | * to NULL. |
| 4901 | */ |
| 4902 | bf_last = ATH_TXQ_LAST(&sc->sc_txq[i], |
| 4903 | axq_q_s); |
| 4904 | if (bf_last != NULL) { |
| 4905 | ath_hal_gettxdesclinkptr(ah, |
| 4906 | bf_last->bf_lastds, |
| 4907 | &sc->sc_txq[i].axq_link); |
| 4908 | } else { |
| 4909 | sc->sc_txq[i].axq_link = NULL; |
| 4910 | } |
| 4911 | ATH_TXQ_UNLOCK(&sc->sc_txq[i]); |
| 4912 | } else |
| 4913 | ath_tx_draintxq(sc, &sc->sc_txq[i]); |
| 4914 | } |
| 4915 | } |
| 4916 | #ifdef ATH_DEBUG |
| 4917 | if (sc->sc_debug & ATH_DEBUG_RESET) { |
| 4918 | struct ath_buf *bf = TAILQ_FIRST(&sc->sc_bbuf); |
| 4919 | if (bf != NULL && bf->bf_m != NULL) { |
| 4920 | ath_printtxbuf(sc, bf, sc->sc_bhalq, 0, |
| 4921 | ath_hal_txprocdesc(ah, bf->bf_lastds, |
| 4922 | &bf->bf_status.ds_txstat) == HAL_OK); |
| 4923 | ieee80211_dump_pkt(ifp->if_l2com, |
| 4924 | mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len, |
| 4925 | 0, -1); |
| 4926 | } |
| 4927 | } |
| 4928 | #endif /* ATH_DEBUG */ |
| 4929 | IF_LOCK(&ifp->if_snd); |
| 4930 | ifq_clr_oactive(&ifp->if_snd); |
| 4931 | IF_UNLOCK(&ifp->if_snd); |
| 4932 | sc->sc_wd_timer = 0; |
| 4933 | } |
| 4934 | |
| 4935 | /* |
| 4936 | * Update internal state after a channel change. |
| 4937 | */ |
| 4938 | static void |
| 4939 | ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) |
| 4940 | { |
| 4941 | enum ieee80211_phymode mode; |
| 4942 | |
| 4943 | /* |
| 4944 | * Change channels and update the h/w rate map |
| 4945 | * if we're switching; e.g. 11a to 11b/g. |
| 4946 | */ |
| 4947 | mode = ieee80211_chan2mode(chan); |
| 4948 | if (mode != sc->sc_curmode) |
| 4949 | ath_setcurmode(sc, mode); |
| 4950 | sc->sc_curchan = chan; |
| 4951 | } |
| 4952 | |
| 4953 | /* |
| 4954 | * Set/change channels. If the channel is really being changed, |
| 4955 | * it's done by resetting the chip. To accomplish this we must |
| 4956 | * first cleanup any pending DMA, then restart stuff after a la |
| 4957 | * ath_init. |
| 4958 | */ |
| 4959 | static int |
| 4960 | ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) |
| 4961 | { |
| 4962 | struct ifnet *ifp = sc->sc_ifp; |
| 4963 | struct ieee80211com *ic = ifp->if_l2com; |
| 4964 | struct ath_hal *ah = sc->sc_ah; |
| 4965 | int ret = 0; |
| 4966 | |
| 4967 | /* Treat this as an interface reset */ |
| 4968 | ATH_PCU_UNLOCK_ASSERT(sc); |
| 4969 | ATH_UNLOCK_ASSERT(sc); |
| 4970 | |
| 4971 | /* (Try to) stop TX/RX from occuring */ |
| 4972 | taskqueue_block(sc->sc_tq); |
| 4973 | |
| 4974 | ATH_PCU_LOCK(sc); |
| 4975 | |
| 4976 | /* Stop new RX/TX/interrupt completion */ |
| 4977 | if (ath_reset_grablock(sc, 1) == 0) { |
| 4978 | device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n", |
| 4979 | __func__); |
| 4980 | } |
| 4981 | |
| 4982 | ath_hal_intrset(ah, 0); |
| 4983 | |
| 4984 | /* Stop pending RX/TX completion */ |
| 4985 | ath_txrx_stop_locked(sc); |
| 4986 | |
| 4987 | ATH_PCU_UNLOCK(sc); |
| 4988 | |
| 4989 | DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n", |
| 4990 | __func__, ieee80211_chan2ieee(ic, chan), |
| 4991 | chan->ic_freq, chan->ic_flags); |
| 4992 | if (chan != sc->sc_curchan) { |
| 4993 | HAL_STATUS status; |
| 4994 | /* |
| 4995 | * To switch channels clear any pending DMA operations; |
| 4996 | * wait long enough for the RX fifo to drain, reset the |
| 4997 | * hardware at the new frequency, and then re-enable |
| 4998 | * the relevant bits of the h/w. |
| 4999 | */ |
| 5000 | #if 0 |
| 5001 | ath_hal_intrset(ah, 0); /* disable interrupts */ |
| 5002 | #endif |
| 5003 | ath_stoprecv(sc, 1); /* turn off frame recv */ |
| 5004 | /* |
| 5005 | * First, handle completed TX/RX frames. |
| 5006 | */ |
| 5007 | ath_rx_flush(sc); |
| 5008 | ath_draintxq(sc, ATH_RESET_NOLOSS); |
| 5009 | /* |
| 5010 | * Next, flush the non-scheduled frames. |
| 5011 | */ |
| 5012 | ath_draintxq(sc, ATH_RESET_FULL); /* clear pending tx frames */ |
| 5013 | |
| 5014 | ath_update_chainmasks(sc, chan); |
| 5015 | ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask, |
| 5016 | sc->sc_cur_rxchainmask); |
| 5017 | if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) { |
| 5018 | if_printf(ifp, "%s: unable to reset " |
| 5019 | "channel %u (%u MHz, flags 0x%x), hal status %u\n", |
| 5020 | __func__, ieee80211_chan2ieee(ic, chan), |
| 5021 | chan->ic_freq, chan->ic_flags, status); |
| 5022 | ret = EIO; |
| 5023 | goto finish; |
| 5024 | } |
| 5025 | sc->sc_diversity = ath_hal_getdiversity(ah); |
| 5026 | |
| 5027 | /* Let DFS at it in case it's a DFS channel */ |
| 5028 | ath_dfs_radar_enable(sc, chan); |
| 5029 | |
| 5030 | /* Let spectral at in case spectral is enabled */ |
| 5031 | ath_spectral_enable(sc, chan); |
| 5032 | |
| 5033 | /* |
| 5034 | * Let bluetooth coexistence at in case it's needed for this |
| 5035 | * channel |
| 5036 | */ |
| 5037 | ath_btcoex_enable(sc, ic->ic_curchan); |
| 5038 | |
| 5039 | /* |
| 5040 | * If we're doing TDMA, enforce the TXOP limitation for chips |
| 5041 | * that support it. |
| 5042 | */ |
| 5043 | if (sc->sc_hasenforcetxop && sc->sc_tdma) |
| 5044 | ath_hal_setenforcetxop(sc->sc_ah, 1); |
| 5045 | else |
| 5046 | ath_hal_setenforcetxop(sc->sc_ah, 0); |
| 5047 | |
| 5048 | /* |
| 5049 | * Re-enable rx framework. |
| 5050 | */ |
| 5051 | if (ath_startrecv(sc) != 0) { |
| 5052 | if_printf(ifp, "%s: unable to restart recv logic\n", |
| 5053 | __func__); |
| 5054 | ret = EIO; |
| 5055 | goto finish; |
| 5056 | } |
| 5057 | |
| 5058 | /* |
| 5059 | * Change channels and update the h/w rate map |
| 5060 | * if we're switching; e.g. 11a to 11b/g. |
| 5061 | */ |
| 5062 | ath_chan_change(sc, chan); |
| 5063 | |
| 5064 | /* |
| 5065 | * Reset clears the beacon timers; reset them |
| 5066 | * here if needed. |
| 5067 | */ |
| 5068 | if (sc->sc_beacons) { /* restart beacons */ |
| 5069 | #ifdef IEEE80211_SUPPORT_TDMA |
| 5070 | if (sc->sc_tdma) |
| 5071 | ath_tdma_config(sc, NULL); |
| 5072 | else |
| 5073 | #endif |
| 5074 | ath_beacon_config(sc, NULL); |
| 5075 | } |
| 5076 | |
| 5077 | /* |
| 5078 | * Re-enable interrupts. |
| 5079 | */ |
| 5080 | #if 0 |
| 5081 | ath_hal_intrset(ah, sc->sc_imask); |
| 5082 | #endif |
| 5083 | } |
| 5084 | |
| 5085 | finish: |
| 5086 | ATH_PCU_LOCK(sc); |
| 5087 | sc->sc_inreset_cnt--; |
| 5088 | /* XXX only do this if sc_inreset_cnt == 0? */ |
| 5089 | ath_hal_intrset(ah, sc->sc_imask); |
| 5090 | ATH_PCU_UNLOCK(sc); |
| 5091 | |
| 5092 | IF_LOCK(&ifp->if_snd); |
| 5093 | ifq_clr_oactive(&ifp->if_snd); |
| 5094 | IF_UNLOCK(&ifp->if_snd); |
| 5095 | ath_txrx_start(sc); |
| 5096 | /* XXX ath_start? */ |
| 5097 | |
| 5098 | return ret; |
| 5099 | } |
| 5100 | |
| 5101 | /* |
| 5102 | * Periodically recalibrate the PHY to account |
| 5103 | * for temperature/environment changes. |
| 5104 | */ |
| 5105 | static void |
| 5106 | ath_calibrate(void *arg) |
| 5107 | { |
| 5108 | struct ath_softc *sc = arg; |
| 5109 | struct ath_hal *ah = sc->sc_ah; |
| 5110 | struct ifnet *ifp = sc->sc_ifp; |
| 5111 | struct ieee80211com *ic = ifp->if_l2com; |
| 5112 | HAL_BOOL longCal, isCalDone = AH_TRUE; |
| 5113 | HAL_BOOL aniCal, shortCal = AH_FALSE; |
| 5114 | int nextcal; |
| 5115 | |
| 5116 | wlan_serialize_enter(); |
| 5117 | if (ic->ic_flags & IEEE80211_F_SCAN) /* defer, off channel */ |
| 5118 | goto restart; |
| 5119 | longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz); |
| 5120 | aniCal = (ticks - sc->sc_lastani >= ath_anicalinterval*hz/1000); |
| 5121 | if (sc->sc_doresetcal) |
| 5122 | shortCal = (ticks - sc->sc_lastshortcal >= ath_shortcalinterval*hz/1000); |
| 5123 | |
| 5124 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: shortCal=%d; longCal=%d; aniCal=%d\n", __func__, shortCal, longCal, aniCal); |
| 5125 | if (aniCal) { |
| 5126 | sc->sc_stats.ast_ani_cal++; |
| 5127 | sc->sc_lastani = ticks; |
| 5128 | ath_hal_ani_poll(ah, sc->sc_curchan); |
| 5129 | } |
| 5130 | |
| 5131 | if (longCal) { |
| 5132 | sc->sc_stats.ast_per_cal++; |
| 5133 | sc->sc_lastlongcal = ticks; |
| 5134 | if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { |
| 5135 | /* |
| 5136 | * Rfgain is out of bounds, reset the chip |
| 5137 | * to load new gain values. |
| 5138 | */ |
| 5139 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, |
| 5140 | "%s: rfgain change\n", __func__); |
| 5141 | sc->sc_stats.ast_per_rfgain++; |
| 5142 | sc->sc_resetcal = 0; |
| 5143 | sc->sc_doresetcal = AH_TRUE; |
| 5144 | taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); |
| 5145 | callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); |
| 5146 | goto done; |
| 5147 | } |
| 5148 | /* |
| 5149 | * If this long cal is after an idle period, then |
| 5150 | * reset the data collection state so we start fresh. |
| 5151 | */ |
| 5152 | if (sc->sc_resetcal) { |
| 5153 | (void) ath_hal_calreset(ah, sc->sc_curchan); |
| 5154 | sc->sc_lastcalreset = ticks; |
| 5155 | sc->sc_lastshortcal = ticks; |
| 5156 | sc->sc_resetcal = 0; |
| 5157 | sc->sc_doresetcal = AH_TRUE; |
| 5158 | } |
| 5159 | } |
| 5160 | |
| 5161 | /* Only call if we're doing a short/long cal, not for ANI calibration */ |
| 5162 | if (shortCal || longCal) { |
| 5163 | isCalDone = AH_FALSE; |
| 5164 | if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) { |
| 5165 | if (longCal) { |
| 5166 | /* |
| 5167 | * Calibrate noise floor data again in case of change. |
| 5168 | */ |
| 5169 | ath_hal_process_noisefloor(ah); |
| 5170 | } |
| 5171 | } else { |
| 5172 | DPRINTF(sc, ATH_DEBUG_ANY, |
| 5173 | "%s: calibration of channel %u failed\n", |
| 5174 | __func__, sc->sc_curchan->ic_freq); |
| 5175 | sc->sc_stats.ast_per_calfail++; |
| 5176 | } |
| 5177 | if (shortCal) |
| 5178 | sc->sc_lastshortcal = ticks; |
| 5179 | } |
| 5180 | if (!isCalDone) { |
| 5181 | restart: |
| 5182 | /* |
| 5183 | * Use a shorter interval to potentially collect multiple |
| 5184 | * data samples required to complete calibration. Once |
| 5185 | * we're told the work is done we drop back to a longer |
| 5186 | * interval between requests. We're more aggressive doing |
| 5187 | * work when operating as an AP to improve operation right |
| 5188 | * after startup. |
| 5189 | */ |
| 5190 | sc->sc_lastshortcal = ticks; |
| 5191 | nextcal = ath_shortcalinterval*hz/1000; |
| 5192 | if (sc->sc_opmode != HAL_M_HOSTAP) |
| 5193 | nextcal *= 10; |
| 5194 | sc->sc_doresetcal = AH_TRUE; |
| 5195 | } else { |
| 5196 | /* nextcal should be the shortest time for next event */ |
| 5197 | nextcal = ath_longcalinterval*hz; |
| 5198 | if (sc->sc_lastcalreset == 0) |
| 5199 | sc->sc_lastcalreset = sc->sc_lastlongcal; |
| 5200 | else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz) |
| 5201 | sc->sc_resetcal = 1; /* setup reset next trip */ |
| 5202 | sc->sc_doresetcal = AH_FALSE; |
| 5203 | } |
| 5204 | /* ANI calibration may occur more often than short/long/resetcal */ |
| 5205 | if (ath_anicalinterval > 0) |
| 5206 | nextcal = MIN(nextcal, ath_anicalinterval*hz/1000); |
| 5207 | |
| 5208 | if (nextcal != 0) { |
| 5209 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n", |
| 5210 | __func__, nextcal, isCalDone ? "" : "!"); |
| 5211 | callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc); |
| 5212 | } else { |
| 5213 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n", |
| 5214 | __func__); |
| 5215 | /* NB: don't rearm timer */ |
| 5216 | } |
| 5217 | done: |
| 5218 | wlan_serialize_exit(); |
| 5219 | } |
| 5220 | |
| 5221 | static void |
| 5222 | ath_scan_start(struct ieee80211com *ic) |
| 5223 | { |
| 5224 | struct ifnet *ifp = ic->ic_ifp; |
| 5225 | struct ath_softc *sc = ifp->if_softc; |
| 5226 | struct ath_hal *ah = sc->sc_ah; |
| 5227 | u_int32_t rfilt; |
| 5228 | |
| 5229 | /* XXX calibration timer? */ |
| 5230 | |
| 5231 | ATH_LOCK(sc); |
| 5232 | sc->sc_scanning = 1; |
| 5233 | sc->sc_syncbeacon = 0; |
| 5234 | rfilt = ath_calcrxfilter(sc); |
| 5235 | ATH_UNLOCK(sc); |
| 5236 | |
| 5237 | ATH_PCU_LOCK(sc); |
| 5238 | ath_hal_setrxfilter(ah, rfilt); |
| 5239 | ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); |
| 5240 | ATH_PCU_UNLOCK(sc); |
| 5241 | |
| 5242 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", |
| 5243 | __func__, rfilt, ath_hal_ether_sprintf(ifp->if_broadcastaddr)); |
| 5244 | } |
| 5245 | |
| 5246 | static void |
| 5247 | ath_scan_end(struct ieee80211com *ic) |
| 5248 | { |
| 5249 | struct ifnet *ifp = ic->ic_ifp; |
| 5250 | struct ath_softc *sc = ifp->if_softc; |
| 5251 | struct ath_hal *ah = sc->sc_ah; |
| 5252 | u_int32_t rfilt; |
| 5253 | |
| 5254 | ATH_LOCK(sc); |
| 5255 | sc->sc_scanning = 0; |
| 5256 | rfilt = ath_calcrxfilter(sc); |
| 5257 | ATH_UNLOCK(sc); |
| 5258 | |
| 5259 | ATH_PCU_LOCK(sc); |
| 5260 | ath_hal_setrxfilter(ah, rfilt); |
| 5261 | ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); |
| 5262 | |
| 5263 | ath_hal_process_noisefloor(ah); |
| 5264 | ATH_PCU_UNLOCK(sc); |
| 5265 | |
| 5266 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", |
| 5267 | __func__, rfilt, ath_hal_ether_sprintf(sc->sc_curbssid), |
| 5268 | sc->sc_curaid); |
| 5269 | } |
| 5270 | |
| 5271 | #ifdef ATH_ENABLE_11N |
| 5272 | /* |
| 5273 | * For now, just do a channel change. |
| 5274 | * |
| 5275 | * Later, we'll go through the hard slog of suspending tx/rx, changing rate |
| 5276 | * control state and resetting the hardware without dropping frames out |
| 5277 | * of the queue. |
| 5278 | * |
| 5279 | * The unfortunate trouble here is making absolutely sure that the |
| 5280 | * channel width change has propagated enough so the hardware |
| 5281 | * absolutely isn't handed bogus frames for it's current operating |
| 5282 | * mode. (Eg, 40MHz frames in 20MHz mode.) Since TX and RX can and |
| 5283 | * does occur in parallel, we need to make certain we've blocked |
| 5284 | * any further ongoing TX (and RX, that can cause raw TX) |
| 5285 | * before we do this. |
| 5286 | */ |
| 5287 | static void |
| 5288 | ath_update_chw(struct ieee80211com *ic) |
| 5289 | { |
| 5290 | struct ifnet *ifp = ic->ic_ifp; |
| 5291 | struct ath_softc *sc = ifp->if_softc; |
| 5292 | |
| 5293 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: called\n", __func__); |
| 5294 | ath_set_channel(ic); |
| 5295 | } |
| 5296 | #endif /* ATH_ENABLE_11N */ |
| 5297 | |
| 5298 | static void |
| 5299 | ath_set_channel(struct ieee80211com *ic) |
| 5300 | { |
| 5301 | struct ifnet *ifp = ic->ic_ifp; |
| 5302 | struct ath_softc *sc = ifp->if_softc; |
| 5303 | |
| 5304 | (void) ath_chan_set(sc, ic->ic_curchan); |
| 5305 | /* |
| 5306 | * If we are returning to our bss channel then mark state |
| 5307 | * so the next recv'd beacon's tsf will be used to sync the |
| 5308 | * beacon timers. Note that since we only hear beacons in |
| 5309 | * sta/ibss mode this has no effect in other operating modes. |
| 5310 | */ |
| 5311 | ATH_LOCK(sc); |
| 5312 | if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) |
| 5313 | sc->sc_syncbeacon = 1; |
| 5314 | ATH_UNLOCK(sc); |
| 5315 | } |
| 5316 | |
| 5317 | /* |
| 5318 | * Walk the vap list and check if there any vap's in RUN state. |
| 5319 | */ |
| 5320 | static int |
| 5321 | ath_isanyrunningvaps(struct ieee80211vap *this) |
| 5322 | { |
| 5323 | struct ieee80211com *ic = this->iv_ic; |
| 5324 | struct ieee80211vap *vap; |
| 5325 | |
| 5326 | IEEE80211_LOCK_ASSERT(ic); |
| 5327 | |
| 5328 | TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { |
| 5329 | if (vap != this && vap->iv_state >= IEEE80211_S_RUN) |
| 5330 | return 1; |
| 5331 | } |
| 5332 | return 0; |
| 5333 | } |
| 5334 | |
| 5335 | static int |
| 5336 | ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) |
| 5337 | { |
| 5338 | struct ieee80211com *ic = vap->iv_ic; |
| 5339 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 5340 | struct ath_vap *avp = ATH_VAP(vap); |
| 5341 | struct ath_hal *ah = sc->sc_ah; |
| 5342 | struct ieee80211_node *ni = NULL; |
| 5343 | int i, error, stamode; |
| 5344 | u_int32_t rfilt; |
| 5345 | int csa_run_transition = 0; |
| 5346 | |
| 5347 | static const HAL_LED_STATE leds[] = { |
| 5348 | HAL_LED_INIT, /* IEEE80211_S_INIT */ |
| 5349 | HAL_LED_SCAN, /* IEEE80211_S_SCAN */ |
| 5350 | HAL_LED_AUTH, /* IEEE80211_S_AUTH */ |
| 5351 | HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ |
| 5352 | HAL_LED_RUN, /* IEEE80211_S_CAC */ |
| 5353 | HAL_LED_RUN, /* IEEE80211_S_RUN */ |
| 5354 | HAL_LED_RUN, /* IEEE80211_S_CSA */ |
| 5355 | HAL_LED_RUN, /* IEEE80211_S_SLEEP */ |
| 5356 | }; |
| 5357 | |
| 5358 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, |
| 5359 | ieee80211_state_name[vap->iv_state], |
| 5360 | ieee80211_state_name[nstate]); |
| 5361 | |
| 5362 | /* |
| 5363 | * net80211 _should_ have the comlock asserted at this point. |
| 5364 | * There are some comments around the calls to vap->iv_newstate |
| 5365 | * which indicate that it (newstate) may end up dropping the |
| 5366 | * lock. This and the subsequent lock assert check after newstate |
| 5367 | * are an attempt to catch these and figure out how/why. |
| 5368 | */ |
| 5369 | IEEE80211_LOCK_ASSERT(ic); |
| 5370 | |
| 5371 | if (vap->iv_state == IEEE80211_S_CSA && nstate == IEEE80211_S_RUN) |
| 5372 | csa_run_transition = 1; |
| 5373 | |
| 5374 | callout_drain(&sc->sc_cal_ch); |
| 5375 | ath_hal_setledstate(ah, leds[nstate]); /* set LED */ |
| 5376 | |
| 5377 | if (nstate == IEEE80211_S_SCAN) { |
| 5378 | /* |
| 5379 | * Scanning: turn off beacon miss and don't beacon. |
| 5380 | * Mark beacon state so when we reach RUN state we'll |
| 5381 | * [re]setup beacons. Unblock the task q thread so |
| 5382 | * deferred interrupt processing is done. |
| 5383 | */ |
| 5384 | ath_hal_intrset(ah, |
| 5385 | sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); |
| 5386 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
| 5387 | sc->sc_beacons = 0; |
| 5388 | taskqueue_unblock(sc->sc_tq); |
| 5389 | } |
| 5390 | |
| 5391 | ni = ieee80211_ref_node(vap->iv_bss); |
| 5392 | rfilt = ath_calcrxfilter(sc); |
| 5393 | stamode = (vap->iv_opmode == IEEE80211_M_STA || |
| 5394 | vap->iv_opmode == IEEE80211_M_AHDEMO || |
| 5395 | vap->iv_opmode == IEEE80211_M_IBSS); |
| 5396 | if (stamode && nstate == IEEE80211_S_RUN) { |
| 5397 | sc->sc_curaid = ni->ni_associd; |
| 5398 | IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); |
| 5399 | ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); |
| 5400 | } |
| 5401 | DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", |
| 5402 | __func__, rfilt, |
| 5403 | ath_hal_ether_sprintf(sc->sc_curbssid), sc->sc_curaid); |
| 5404 | ath_hal_setrxfilter(ah, rfilt); |
| 5405 | |
| 5406 | /* XXX is this to restore keycache on resume? */ |
| 5407 | if (vap->iv_opmode != IEEE80211_M_STA && |
| 5408 | (vap->iv_flags & IEEE80211_F_PRIVACY)) { |
| 5409 | for (i = 0; i < IEEE80211_WEP_NKID; i++) |
| 5410 | if (ath_hal_keyisvalid(ah, i)) |
| 5411 | ath_hal_keysetmac(ah, i, ni->ni_bssid); |
| 5412 | } |
| 5413 | |
| 5414 | /* |
| 5415 | * Invoke the parent method to do net80211 work. |
| 5416 | */ |
| 5417 | error = avp->av_newstate(vap, nstate, arg); |
| 5418 | if (error != 0) |
| 5419 | goto bad; |
| 5420 | |
| 5421 | /* |
| 5422 | * See above: ensure av_newstate() doesn't drop the lock |
| 5423 | * on us. |
| 5424 | */ |
| 5425 | IEEE80211_LOCK_ASSERT(ic); |
| 5426 | |
| 5427 | if (nstate == IEEE80211_S_RUN) { |
| 5428 | /* NB: collect bss node again, it may have changed */ |
| 5429 | ieee80211_free_node(ni); |
| 5430 | ni = ieee80211_ref_node(vap->iv_bss); |
| 5431 | |
| 5432 | DPRINTF(sc, ATH_DEBUG_STATE, |
| 5433 | "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " |
| 5434 | "capinfo 0x%04x chan %d\n", __func__, |
| 5435 | vap->iv_flags, ni->ni_intval, |
| 5436 | ath_hal_ether_sprintf(ni->ni_bssid), |
| 5437 | ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); |
| 5438 | |
| 5439 | switch (vap->iv_opmode) { |
| 5440 | #ifdef IEEE80211_SUPPORT_TDMA |
| 5441 | case IEEE80211_M_AHDEMO: |
| 5442 | if ((vap->iv_caps & IEEE80211_C_TDMA) == 0) |
| 5443 | break; |
| 5444 | /* fall thru... */ |
| 5445 | #endif |
| 5446 | case IEEE80211_M_HOSTAP: |
| 5447 | case IEEE80211_M_IBSS: |
| 5448 | case IEEE80211_M_MBSS: |
| 5449 | /* |
| 5450 | * Allocate and setup the beacon frame. |
| 5451 | * |
| 5452 | * Stop any previous beacon DMA. This may be |
| 5453 | * necessary, for example, when an ibss merge |
| 5454 | * causes reconfiguration; there will be a state |
| 5455 | * transition from RUN->RUN that means we may |
| 5456 | * be called with beacon transmission active. |
| 5457 | */ |
| 5458 | ath_hal_stoptxdma(ah, sc->sc_bhalq); |
| 5459 | |
| 5460 | error = ath_beacon_alloc(sc, ni); |
| 5461 | if (error != 0) |
| 5462 | goto bad; |
| 5463 | /* |
| 5464 | * If joining an adhoc network defer beacon timer |
| 5465 | * configuration to the next beacon frame so we |
| 5466 | * have a current TSF to use. Otherwise we're |
| 5467 | * starting an ibss/bss so there's no need to delay; |
| 5468 | * if this is the first vap moving to RUN state, then |
| 5469 | * beacon state needs to be [re]configured. |
| 5470 | */ |
| 5471 | if (vap->iv_opmode == IEEE80211_M_IBSS && |
| 5472 | ni->ni_tstamp.tsf != 0) { |
| 5473 | sc->sc_syncbeacon = 1; |
| 5474 | } else if (!sc->sc_beacons) { |
| 5475 | #ifdef IEEE80211_SUPPORT_TDMA |
| 5476 | if (vap->iv_caps & IEEE80211_C_TDMA) |
| 5477 | ath_tdma_config(sc, vap); |
| 5478 | else |
| 5479 | #endif |
| 5480 | ath_beacon_config(sc, vap); |
| 5481 | sc->sc_beacons = 1; |
| 5482 | } |
| 5483 | break; |
| 5484 | case IEEE80211_M_STA: |
| 5485 | /* |
| 5486 | * Defer beacon timer configuration to the next |
| 5487 | * beacon frame so we have a current TSF to use |
| 5488 | * (any TSF collected when scanning is likely old). |
| 5489 | * However if it's due to a CSA -> RUN transition, |
| 5490 | * force a beacon update so we pick up a lack of |
| 5491 | * beacons from an AP in CAC and thus force a |
| 5492 | * scan. |
| 5493 | * |
| 5494 | * And, there's also corner cases here where |
| 5495 | * after a scan, the AP may have disappeared. |
| 5496 | * In that case, we may not receive an actual |
| 5497 | * beacon to update the beacon timer and thus we |
| 5498 | * won't get notified of the missing beacons. |
| 5499 | */ |
| 5500 | sc->sc_syncbeacon = 1; |
| 5501 | #if 0 |
| 5502 | if (csa_run_transition) |
| 5503 | #endif |
| 5504 | ath_beacon_config(sc, vap); |
| 5505 | |
| 5506 | /* |
| 5507 | * PR: kern/175227 |
| 5508 | * |
| 5509 | * Reconfigure beacons during reset; as otherwise |
| 5510 | * we won't get the beacon timers reprogrammed |
| 5511 | * after a reset and thus we won't pick up a |
| 5512 | * beacon miss interrupt. |
| 5513 | * |
| 5514 | * Hopefully we'll see a beacon before the BMISS |
| 5515 | * timer fires (too often), leading to a STA |
| 5516 | * disassociation. |
| 5517 | */ |
| 5518 | sc->sc_beacons = 1; |
| 5519 | break; |
| 5520 | case IEEE80211_M_MONITOR: |
| 5521 | /* |
| 5522 | * Monitor mode vaps have only INIT->RUN and RUN->RUN |
| 5523 | * transitions so we must re-enable interrupts here to |
| 5524 | * handle the case of a single monitor mode vap. |
| 5525 | */ |
| 5526 | ath_hal_intrset(ah, sc->sc_imask); |
| 5527 | break; |
| 5528 | case IEEE80211_M_WDS: |
| 5529 | break; |
| 5530 | default: |
| 5531 | break; |
| 5532 | } |
| 5533 | /* |
| 5534 | * Let the hal process statistics collected during a |
| 5535 | * scan so it can provide calibrated noise floor data. |
| 5536 | */ |
| 5537 | ath_hal_process_noisefloor(ah); |
| 5538 | /* |
| 5539 | * Reset rssi stats; maybe not the best place... |
| 5540 | */ |
| 5541 | sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; |
| 5542 | sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; |
| 5543 | sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; |
| 5544 | /* |
| 5545 | * Finally, start any timers and the task q thread |
| 5546 | * (in case we didn't go through SCAN state). |
| 5547 | */ |
| 5548 | if (ath_longcalinterval != 0) { |
| 5549 | /* start periodic recalibration timer */ |
| 5550 | callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc); |
| 5551 | } else { |
| 5552 | DPRINTF(sc, ATH_DEBUG_CALIBRATE, |
| 5553 | "%s: calibration disabled\n", __func__); |
| 5554 | } |
| 5555 | taskqueue_unblock(sc->sc_tq); |
| 5556 | } else if (nstate == IEEE80211_S_INIT) { |
| 5557 | /* |
| 5558 | * If there are no vaps left in RUN state then |
| 5559 | * shutdown host/driver operation: |
| 5560 | * o disable interrupts |
| 5561 | * o disable the task queue thread |
| 5562 | * o mark beacon processing as stopped |
| 5563 | */ |
| 5564 | if (!ath_isanyrunningvaps(vap)) { |
| 5565 | sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); |
| 5566 | /* disable interrupts */ |
| 5567 | ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); |
| 5568 | taskqueue_block(sc->sc_tq); |
| 5569 | sc->sc_beacons = 0; |
| 5570 | } |
| 5571 | #ifdef IEEE80211_SUPPORT_TDMA |
| 5572 | ath_hal_setcca(ah, AH_TRUE); |
| 5573 | #endif |
| 5574 | } |
| 5575 | bad: |
| 5576 | ieee80211_free_node(ni); |
| 5577 | return error; |
| 5578 | } |
| 5579 | |
| 5580 | /* |
| 5581 | * Allocate a key cache slot to the station so we can |
| 5582 | * setup a mapping from key index to node. The key cache |
| 5583 | * slot is needed for managing antenna state and for |
| 5584 | * compression when stations do not use crypto. We do |
| 5585 | * it uniliaterally here; if crypto is employed this slot |
| 5586 | * will be reassigned. |
| 5587 | */ |
| 5588 | static void |
| 5589 | ath_setup_stationkey(struct ieee80211_node *ni) |
| 5590 | { |
| 5591 | struct ieee80211vap *vap = ni->ni_vap; |
| 5592 | struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; |
| 5593 | ieee80211_keyix keyix, rxkeyix; |
| 5594 | |
| 5595 | /* XXX should take a locked ref to vap->iv_bss */ |
| 5596 | if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { |
| 5597 | /* |
| 5598 | * Key cache is full; we'll fall back to doing |
| 5599 | * the more expensive lookup in software. Note |
| 5600 | * this also means no h/w compression. |
| 5601 | */ |
| 5602 | /* XXX msg+statistic */ |
| 5603 | } else { |
| 5604 | /* XXX locking? */ |
| 5605 | ni->ni_ucastkey.wk_keyix = keyix; |
| 5606 | ni->ni_ucastkey.wk_rxkeyix = rxkeyix; |
| 5607 | /* NB: must mark device key to get called back on delete */ |
| 5608 | ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY; |
| 5609 | IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); |
| 5610 | /* NB: this will create a pass-thru key entry */ |
| 5611 | ath_keyset(sc, vap, &ni->ni_ucastkey, vap->iv_bss); |
| 5612 | } |
| 5613 | } |
| 5614 | |
| 5615 | /* |
| 5616 | * Setup driver-specific state for a newly associated node. |
| 5617 | * Note that we're called also on a re-associate, the isnew |
| 5618 | * param tells us if this is the first time or not. |
| 5619 | */ |
| 5620 | static void |
| 5621 | ath_newassoc(struct ieee80211_node *ni, int isnew) |
| 5622 | { |
| 5623 | struct ath_node *an = ATH_NODE(ni); |
| 5624 | struct ieee80211vap *vap = ni->ni_vap; |
| 5625 | struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; |
| 5626 | const struct ieee80211_txparam *tp = ni->ni_txparms; |
| 5627 | |
| 5628 | an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate); |
| 5629 | an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate); |
| 5630 | |
| 5631 | ath_rate_newassoc(sc, an, isnew); |
| 5632 | |
| 5633 | if (isnew && |
| 5634 | (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && |
| 5635 | ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) |
| 5636 | ath_setup_stationkey(ni); |
| 5637 | |
| 5638 | /* |
| 5639 | * If we're reassociating, make sure that any paused queues |
| 5640 | * get unpaused. |
| 5641 | * |
| 5642 | * Now, we may hvae frames in the hardware queue for this node. |
| 5643 | * So if we are reassociating and there are frames in the queue, |
| 5644 | * we need to go through the cleanup path to ensure that they're |
| 5645 | * marked as non-aggregate. |
| 5646 | */ |
| 5647 | if (! isnew) { |
| 5648 | DPRINTF(sc, ATH_DEBUG_NODE, |
| 5649 | "%s: %6D: reassoc; is_powersave=%d\n", |
| 5650 | __func__, |
| 5651 | ni->ni_macaddr, |
| 5652 | ":", |
| 5653 | an->an_is_powersave); |
| 5654 | |
| 5655 | /* XXX for now, we can't hold the lock across assoc */ |
| 5656 | ath_tx_node_reassoc(sc, an); |
| 5657 | |
| 5658 | /* XXX for now, we can't hold the lock across wakeup */ |
| 5659 | if (an->an_is_powersave) |
| 5660 | ath_tx_node_wakeup(sc, an); |
| 5661 | } |
| 5662 | } |
| 5663 | |
| 5664 | static int |
| 5665 | ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg, |
| 5666 | int nchans, struct ieee80211_channel chans[]) |
| 5667 | { |
| 5668 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 5669 | struct ath_hal *ah = sc->sc_ah; |
| 5670 | HAL_STATUS status; |
| 5671 | |
| 5672 | DPRINTF(sc, ATH_DEBUG_REGDOMAIN, |
| 5673 | "%s: rd %u cc %u location %c%s\n", |
| 5674 | __func__, reg->regdomain, reg->country, reg->location, |
| 5675 | reg->ecm ? " ecm" : ""); |
| 5676 | |
| 5677 | status = ath_hal_set_channels(ah, chans, nchans, |
| 5678 | reg->country, reg->regdomain); |
| 5679 | if (status != HAL_OK) { |
| 5680 | DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n", |
| 5681 | __func__, status); |
| 5682 | return EINVAL; /* XXX */ |
| 5683 | } |
| 5684 | |
| 5685 | return 0; |
| 5686 | } |
| 5687 | |
| 5688 | static void |
| 5689 | ath_getradiocaps(struct ieee80211com *ic, |
| 5690 | int maxchans, int *nchans, struct ieee80211_channel chans[]) |
| 5691 | { |
| 5692 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 5693 | struct ath_hal *ah = sc->sc_ah; |
| 5694 | |
| 5695 | DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n", |
| 5696 | __func__, SKU_DEBUG, CTRY_DEFAULT); |
| 5697 | |
| 5698 | /* XXX check return */ |
| 5699 | (void) ath_hal_getchannels(ah, chans, maxchans, nchans, |
| 5700 | HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE); |
| 5701 | |
| 5702 | } |
| 5703 | |
| 5704 | static int |
| 5705 | ath_getchannels(struct ath_softc *sc) |
| 5706 | { |
| 5707 | struct ifnet *ifp = sc->sc_ifp; |
| 5708 | struct ieee80211com *ic = ifp->if_l2com; |
| 5709 | struct ath_hal *ah = sc->sc_ah; |
| 5710 | HAL_STATUS status; |
| 5711 | |
| 5712 | /* |
| 5713 | * Collect channel set based on EEPROM contents. |
| 5714 | */ |
| 5715 | status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX, |
| 5716 | &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE); |
| 5717 | if (status != HAL_OK) { |
| 5718 | if_printf(ifp, "%s: unable to collect channel list from hal, " |
| 5719 | "status %d\n", __func__, status); |
| 5720 | return EINVAL; |
| 5721 | } |
| 5722 | (void) ath_hal_getregdomain(ah, &sc->sc_eerd); |
| 5723 | ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ |
| 5724 | /* XXX map Atheros sku's to net80211 SKU's */ |
| 5725 | /* XXX net80211 types too small */ |
| 5726 | ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd; |
| 5727 | ic->ic_regdomain.country = (uint16_t) sc->sc_eecc; |
| 5728 | ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ |
| 5729 | ic->ic_regdomain.isocc[1] = ' '; |
| 5730 | |
| 5731 | ic->ic_regdomain.ecm = 1; |
| 5732 | ic->ic_regdomain.location = 'I'; |
| 5733 | |
| 5734 | DPRINTF(sc, ATH_DEBUG_REGDOMAIN, |
| 5735 | "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n", |
| 5736 | __func__, sc->sc_eerd, sc->sc_eecc, |
| 5737 | ic->ic_regdomain.regdomain, ic->ic_regdomain.country, |
| 5738 | ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : ""); |
| 5739 | return 0; |
| 5740 | } |
| 5741 | |
| 5742 | static int |
| 5743 | ath_rate_setup(struct ath_softc *sc, u_int mode) |
| 5744 | { |
| 5745 | struct ath_hal *ah = sc->sc_ah; |
| 5746 | const HAL_RATE_TABLE *rt; |
| 5747 | |
| 5748 | switch (mode) { |
| 5749 | case IEEE80211_MODE_11A: |
| 5750 | rt = ath_hal_getratetable(ah, HAL_MODE_11A); |
| 5751 | break; |
| 5752 | case IEEE80211_MODE_HALF: |
| 5753 | rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); |
| 5754 | break; |
| 5755 | case IEEE80211_MODE_QUARTER: |
| 5756 | rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); |
| 5757 | break; |
| 5758 | case IEEE80211_MODE_11B: |
| 5759 | rt = ath_hal_getratetable(ah, HAL_MODE_11B); |
| 5760 | break; |
| 5761 | case IEEE80211_MODE_11G: |
| 5762 | rt = ath_hal_getratetable(ah, HAL_MODE_11G); |
| 5763 | break; |
| 5764 | case IEEE80211_MODE_TURBO_A: |
| 5765 | rt = ath_hal_getratetable(ah, HAL_MODE_108A); |
| 5766 | break; |
| 5767 | case IEEE80211_MODE_TURBO_G: |
| 5768 | rt = ath_hal_getratetable(ah, HAL_MODE_108G); |
| 5769 | break; |
| 5770 | case IEEE80211_MODE_STURBO_A: |
| 5771 | rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); |
| 5772 | break; |
| 5773 | case IEEE80211_MODE_11NA: |
| 5774 | rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); |
| 5775 | break; |
| 5776 | case IEEE80211_MODE_11NG: |
| 5777 | rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); |
| 5778 | break; |
| 5779 | default: |
| 5780 | DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", |
| 5781 | __func__, mode); |
| 5782 | return 0; |
| 5783 | } |
| 5784 | sc->sc_rates[mode] = rt; |
| 5785 | return (rt != NULL); |
| 5786 | } |
| 5787 | |
| 5788 | static void |
| 5789 | ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) |
| 5790 | { |
| 5791 | #define N(a) (sizeof(a)/sizeof(a[0])) |
| 5792 | /* NB: on/off times from the Atheros NDIS driver, w/ permission */ |
| 5793 | static const struct { |
| 5794 | u_int rate; /* tx/rx 802.11 rate */ |
| 5795 | u_int16_t timeOn; /* LED on time (ms) */ |
| 5796 | u_int16_t timeOff; /* LED off time (ms) */ |
| 5797 | } blinkrates[] = { |
| 5798 | { 108, 40, 10 }, |
| 5799 | { 96, 44, 11 }, |
| 5800 | { 72, 50, 13 }, |
| 5801 | { 48, 57, 14 }, |
| 5802 | { 36, 67, 16 }, |
| 5803 | { 24, 80, 20 }, |
| 5804 | { 22, 100, 25 }, |
| 5805 | { 18, 133, 34 }, |
| 5806 | { 12, 160, 40 }, |
| 5807 | { 10, 200, 50 }, |
| 5808 | { 6, 240, 58 }, |
| 5809 | { 4, 267, 66 }, |
| 5810 | { 2, 400, 100 }, |
| 5811 | { 0, 500, 130 }, |
| 5812 | /* XXX half/quarter rates */ |
| 5813 | }; |
| 5814 | const HAL_RATE_TABLE *rt; |
| 5815 | int i, j; |
| 5816 | |
| 5817 | memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); |
| 5818 | rt = sc->sc_rates[mode]; |
| 5819 | KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); |
| 5820 | for (i = 0; i < rt->rateCount; i++) { |
| 5821 | uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL; |
| 5822 | if (rt->info[i].phy != IEEE80211_T_HT) |
| 5823 | sc->sc_rixmap[ieeerate] = i; |
| 5824 | else |
| 5825 | sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i; |
| 5826 | } |
| 5827 | memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); |
| 5828 | for (i = 0; i < N(sc->sc_hwmap); i++) { |
| 5829 | if (i >= rt->rateCount) { |
| 5830 | sc->sc_hwmap[i].ledon = (500 * hz) / 1000; |
| 5831 | sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; |
| 5832 | continue; |
| 5833 | } |
| 5834 | sc->sc_hwmap[i].ieeerate = |
| 5835 | rt->info[i].dot11Rate & IEEE80211_RATE_VAL; |
| 5836 | if (rt->info[i].phy == IEEE80211_T_HT) |
| 5837 | sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS; |
| 5838 | sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; |
| 5839 | if (rt->info[i].shortPreamble || |
| 5840 | rt->info[i].phy == IEEE80211_T_OFDM) |
| 5841 | sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; |
| 5842 | sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags; |
| 5843 | for (j = 0; j < N(blinkrates)-1; j++) |
| 5844 | if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) |
| 5845 | break; |
| 5846 | /* NB: this uses the last entry if the rate isn't found */ |
| 5847 | /* XXX beware of overlow */ |
| 5848 | sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; |
| 5849 | sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; |
| 5850 | } |
| 5851 | sc->sc_currates = rt; |
| 5852 | sc->sc_curmode = mode; |
| 5853 | /* |
| 5854 | * All protection frames are transmited at 2Mb/s for |
| 5855 | * 11g, otherwise at 1Mb/s. |
| 5856 | */ |
| 5857 | if (mode == IEEE80211_MODE_11G) |
| 5858 | sc->sc_protrix = ath_tx_findrix(sc, 2*2); |
| 5859 | else |
| 5860 | sc->sc_protrix = ath_tx_findrix(sc, 2*1); |
| 5861 | /* NB: caller is responsible for resetting rate control state */ |
| 5862 | #undef N |
| 5863 | } |
| 5864 | |
| 5865 | static void |
| 5866 | ath_watchdog(void *arg) |
| 5867 | { |
| 5868 | struct ath_softc *sc = arg; |
| 5869 | int do_reset = 0; |
| 5870 | |
| 5871 | wlan_serialize_enter(); |
| 5872 | if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) { |
| 5873 | struct ifnet *ifp = sc->sc_ifp; |
| 5874 | uint32_t hangs; |
| 5875 | |
| 5876 | if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) && |
| 5877 | hangs != 0) { |
| 5878 | if_printf(ifp, "%s hang detected (0x%x)\n", |
| 5879 | hangs & 0xff ? "bb" : "mac", hangs); |
| 5880 | } else |
| 5881 | if_printf(ifp, "device timeout\n"); |
| 5882 | do_reset = 1; |
| 5883 | ifp->if_oerrors++; |
| 5884 | sc->sc_stats.ast_watchdog++; |
| 5885 | } |
| 5886 | |
| 5887 | /* |
| 5888 | * We can't hold the lock across the ath_reset() call. |
| 5889 | * |
| 5890 | * And since this routine can't hold a lock and sleep, |
| 5891 | * do the reset deferred. |
| 5892 | */ |
| 5893 | if (do_reset) { |
| 5894 | taskqueue_enqueue(sc->sc_tq, &sc->sc_resettask); |
| 5895 | } |
| 5896 | |
| 5897 | callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc); |
| 5898 | wlan_serialize_exit(); |
| 5899 | } |
| 5900 | |
| 5901 | /* |
| 5902 | * Fetch the rate control statistics for the given node. |
| 5903 | */ |
| 5904 | static int |
| 5905 | ath_ioctl_ratestats(struct ath_softc *sc, struct ath_rateioctl *rs) |
| 5906 | { |
| 5907 | struct ath_node *an; |
| 5908 | struct ieee80211com *ic = sc->sc_ifp->if_l2com; |
| 5909 | struct ieee80211_node *ni; |
| 5910 | int error = 0; |
| 5911 | |
| 5912 | /* Perform a lookup on the given node */ |
| 5913 | ni = ieee80211_find_node(&ic->ic_sta, rs->is_u.macaddr); |
| 5914 | if (ni == NULL) { |
| 5915 | error = EINVAL; |
| 5916 | goto bad; |
| 5917 | } |
| 5918 | |
| 5919 | /* Lock the ath_node */ |
| 5920 | an = ATH_NODE(ni); |
| 5921 | ATH_NODE_LOCK(an); |
| 5922 | |
| 5923 | /* Fetch the rate control stats for this node */ |
| 5924 | error = ath_rate_fetch_node_stats(sc, an, rs); |
| 5925 | |
| 5926 | /* No matter what happens here, just drop through */ |
| 5927 | |
| 5928 | /* Unlock the ath_node */ |
| 5929 | ATH_NODE_UNLOCK(an); |
| 5930 | |
| 5931 | /* Unref the node */ |
| 5932 | ieee80211_node_decref(ni); |
| 5933 | |
| 5934 | bad: |
| 5935 | return (error); |
| 5936 | } |
| 5937 | |
| 5938 | #ifdef ATH_DIAGAPI |
| 5939 | /* |
| 5940 | * Diagnostic interface to the HAL. This is used by various |
| 5941 | * tools to do things like retrieve register contents for |
| 5942 | * debugging. The mechanism is intentionally opaque so that |
| 5943 | * it can change frequently w/o concern for compatiblity. |
| 5944 | */ |
| 5945 | static int |
| 5946 | ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) |
| 5947 | { |
| 5948 | struct ath_hal *ah = sc->sc_ah; |
| 5949 | u_int id = ad->ad_id & ATH_DIAG_ID; |
| 5950 | void *indata = NULL; |
| 5951 | void *outdata = NULL; |
| 5952 | u_int32_t insize = ad->ad_in_size; |
| 5953 | u_int32_t outsize = ad->ad_out_size; |
| 5954 | int error = 0; |
| 5955 | |
| 5956 | if (ad->ad_id & ATH_DIAG_IN) { |
| 5957 | /* |
| 5958 | * Copy in data. |
| 5959 | */ |
| 5960 | indata = kmalloc(insize, M_TEMP, M_INTWAIT); |
| 5961 | if (indata == NULL) { |
| 5962 | error = ENOMEM; |
| 5963 | goto bad; |
| 5964 | } |
| 5965 | error = copyin(ad->ad_in_data, indata, insize); |
| 5966 | if (error) |
| 5967 | goto bad; |
| 5968 | } |
| 5969 | if (ad->ad_id & ATH_DIAG_DYN) { |
| 5970 | /* |
| 5971 | * Allocate a buffer for the results (otherwise the HAL |
| 5972 | * returns a pointer to a buffer where we can read the |
| 5973 | * results). Note that we depend on the HAL leaving this |
| 5974 | * pointer for us to use below in reclaiming the buffer; |
| 5975 | * may want to be more defensive. |
| 5976 | */ |
| 5977 | outdata = kmalloc(outsize, M_TEMP, M_INTWAIT); |
| 5978 | if (outdata == NULL) { |
| 5979 | error = ENOMEM; |
| 5980 | goto bad; |
| 5981 | } |
| 5982 | } |
| 5983 | if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { |
| 5984 | if (outsize < ad->ad_out_size) |
| 5985 | ad->ad_out_size = outsize; |
| 5986 | if (outdata != NULL) |
| 5987 | error = copyout(outdata, ad->ad_out_data, |
| 5988 | ad->ad_out_size); |
| 5989 | } else { |
| 5990 | error = EINVAL; |
| 5991 | } |
| 5992 | bad: |
| 5993 | if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) |
| 5994 | kfree(indata, M_TEMP); |
| 5995 | if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) |
| 5996 | kfree(outdata, M_TEMP); |
| 5997 | return error; |
| 5998 | } |
| 5999 | #endif /* ATH_DIAGAPI */ |
| 6000 | |
| 6001 | static int |
| 6002 | ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, |
| 6003 | struct ucred *cr __unused) |
| 6004 | { |
| 6005 | #define IS_RUNNING(ifp) \ |
| 6006 | ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING)) |
| 6007 | struct ath_softc *sc = ifp->if_softc; |
| 6008 | struct ieee80211com *ic = ifp->if_l2com; |
| 6009 | struct ifreq *ifr = (struct ifreq *)data; |
| 6010 | const HAL_RATE_TABLE *rt; |
| 6011 | int error = 0; |
| 6012 | |
| 6013 | switch (cmd) { |
| 6014 | case SIOCSIFFLAGS: |
| 6015 | ATH_LOCK(sc); |
| 6016 | if (IS_RUNNING(ifp)) { |
| 6017 | /* |
| 6018 | * To avoid rescanning another access point, |
| 6019 | * do not call ath_init() here. Instead, |
| 6020 | * only reflect promisc mode settings. |
| 6021 | */ |
| 6022 | ath_mode_init(sc); |
| 6023 | } else if (ifp->if_flags & IFF_UP) { |
| 6024 | /* |
| 6025 | * Beware of being called during attach/detach |
| 6026 | * to reset promiscuous mode. In that case we |
| 6027 | * will still be marked UP but not RUNNING. |
| 6028 | * However trying to re-init the interface |
| 6029 | * is the wrong thing to do as we've already |
| 6030 | * torn down much of our state. There's |
| 6031 | * probably a better way to deal with this. |
| 6032 | */ |
| 6033 | if (!sc->sc_invalid) |
| 6034 | ath_init(sc); /* XXX lose error */ |
| 6035 | } else { |
| 6036 | ath_stop_locked(ifp); |
| 6037 | #ifdef notyet |
| 6038 | /* XXX must wakeup in places like ath_vap_delete */ |
| 6039 | if (!sc->sc_invalid) |
| 6040 | ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); |
| 6041 | #endif |
| 6042 | } |
| 6043 | ATH_UNLOCK(sc); |
| 6044 | break; |
| 6045 | case SIOCGIFMEDIA: |
| 6046 | case SIOCSIFMEDIA: |
| 6047 | error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); |
| 6048 | break; |
| 6049 | case SIOCGATHSTATS: |
| 6050 | /* NB: embed these numbers to get a consistent view */ |
| 6051 | sc->sc_stats.ast_tx_packets = ifp->if_opackets; |
| 6052 | sc->sc_stats.ast_rx_packets = ifp->if_ipackets; |
| 6053 | sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi); |
| 6054 | sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi); |
| 6055 | #ifdef IEEE80211_SUPPORT_TDMA |
| 6056 | sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap); |
| 6057 | sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam); |
| 6058 | #endif |
| 6059 | rt = sc->sc_currates; |
| 6060 | sc->sc_stats.ast_tx_rate = |
| 6061 | rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC; |
| 6062 | if (rt->info[sc->sc_txrix].phy & IEEE80211_T_HT) |
| 6063 | sc->sc_stats.ast_tx_rate |= IEEE80211_RATE_MCS; |
| 6064 | return copyout(&sc->sc_stats, |
| 6065 | ifr->ifr_data, sizeof (sc->sc_stats)); |
| 6066 | case SIOCGATHAGSTATS: |
| 6067 | return copyout(&sc->sc_aggr_stats, |
| 6068 | ifr->ifr_data, sizeof (sc->sc_aggr_stats)); |
| 6069 | case SIOCZATHSTATS: |
| 6070 | error = priv_check(curthread, PRIV_DRIVER); |
| 6071 | if (error == 0) { |
| 6072 | memset(&sc->sc_stats, 0, sizeof(sc->sc_stats)); |
| 6073 | memset(&sc->sc_aggr_stats, 0, |
| 6074 | sizeof(sc->sc_aggr_stats)); |
| 6075 | memset(&sc->sc_intr_stats, 0, |
| 6076 | sizeof(sc->sc_intr_stats)); |
| 6077 | } |
| 6078 | break; |
| 6079 | #ifdef ATH_DIAGAPI |
| 6080 | case SIOCGATHDIAG: |
| 6081 | error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); |
| 6082 | break; |
| 6083 | case SIOCGATHPHYERR: |
| 6084 | error = ath_ioctl_phyerr(sc,(struct ath_diag*) ifr); |
| 6085 | break; |
| 6086 | #endif |
| 6087 | case SIOCGATHSPECTRAL: |
| 6088 | error = ath_ioctl_spectral(sc,(struct ath_diag*) ifr); |
| 6089 | break; |
| 6090 | case SIOCGATHNODERATESTATS: |
| 6091 | error = ath_ioctl_ratestats(sc, (struct ath_rateioctl *) ifr); |
| 6092 | break; |
| 6093 | case SIOCGIFADDR: |
| 6094 | error = ether_ioctl(ifp, cmd, data); |
| 6095 | break; |
| 6096 | default: |
| 6097 | error = EINVAL; |
| 6098 | break; |
| 6099 | } |
| 6100 | return error; |
| 6101 | #undef IS_RUNNING |
| 6102 | } |
| 6103 | |
| 6104 | /* |
| 6105 | * Announce various information on device/driver attach. |
| 6106 | */ |
| 6107 | static void |
| 6108 | ath_announce(struct ath_softc *sc) |
| 6109 | { |
| 6110 | struct ifnet *ifp = sc->sc_ifp; |
| 6111 | struct ath_hal *ah = sc->sc_ah; |
| 6112 | |
| 6113 | if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n", |
| 6114 | ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev, |
| 6115 | ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); |
| 6116 | if_printf(ifp, "2GHz radio: 0x%.4x; 5GHz radio: 0x%.4x\n", |
| 6117 | ah->ah_analog2GhzRev, ah->ah_analog5GhzRev); |
| 6118 | if (bootverbose) { |
| 6119 | int i; |
| 6120 | for (i = 0; i <= WME_AC_VO; i++) { |
| 6121 | struct ath_txq *txq = sc->sc_ac2q[i]; |
| 6122 | if_printf(ifp, "Use hw queue %u for %s traffic\n", |
| 6123 | txq->axq_qnum, ieee80211_wme_acnames[i]); |
| 6124 | } |
| 6125 | if_printf(ifp, "Use hw queue %u for CAB traffic\n", |
| 6126 | sc->sc_cabq->axq_qnum); |
| 6127 | if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); |
| 6128 | } |
| 6129 | if (ath_rxbuf != ATH_RXBUF) |
| 6130 | if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); |
| 6131 | if (ath_txbuf != ATH_TXBUF) |
| 6132 | if_printf(ifp, "using %u tx buffers\n", ath_txbuf); |
| 6133 | if (sc->sc_mcastkey && bootverbose) |
| 6134 | if_printf(ifp, "using multicast key search\n"); |
| 6135 | } |
| 6136 | |
| 6137 | static void |
| 6138 | ath_dfs_tasklet(void *p, int npending) |
| 6139 | { |
| 6140 | struct ath_softc *sc = (struct ath_softc *) p; |
| 6141 | struct ifnet *ifp = sc->sc_ifp; |
| 6142 | struct ieee80211com *ic = ifp->if_l2com; |
| 6143 | |
| 6144 | /* |
| 6145 | * If previous processing has found a radar event, |
| 6146 | * signal this to the net80211 layer to begin DFS |
| 6147 | * processing. |
| 6148 | */ |
| 6149 | wlan_serialize_enter(); |
| 6150 | if (ath_dfs_process_radar_event(sc, sc->sc_curchan)) { |
| 6151 | /* DFS event found, initiate channel change */ |
| 6152 | /* |
| 6153 | * XXX doesn't currently tell us whether the event |
| 6154 | * XXX was found in the primary or extension |
| 6155 | * XXX channel! |
| 6156 | */ |
| 6157 | IEEE80211_LOCK(ic); |
| 6158 | ieee80211_dfs_notify_radar(ic, sc->sc_curchan); |
| 6159 | IEEE80211_UNLOCK(ic); |
| 6160 | } |
| 6161 | wlan_serialize_exit(); |
| 6162 | } |
| 6163 | |
| 6164 | #if 0 |
| 6165 | /* |
| 6166 | * Enable/disable power save. This must be called with |
| 6167 | * no TX driver locks currently held, so it should only |
| 6168 | * be called from the RX path (which doesn't hold any |
| 6169 | * TX driver locks.) |
| 6170 | */ |
| 6171 | static void |
| 6172 | ath_node_powersave(struct ieee80211_node *ni, int enable) |
| 6173 | { |
| 6174 | #ifdef ATH_SW_PSQ |
| 6175 | struct ath_node *an = ATH_NODE(ni); |
| 6176 | struct ieee80211com *ic = ni->ni_ic; |
| 6177 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 6178 | struct ath_vap *avp = ATH_VAP(ni->ni_vap); |
| 6179 | |
| 6180 | /* XXX and no TXQ locks should be held here */ |
| 6181 | |
| 6182 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, "%s: %6D: enable=%d\n", |
| 6183 | __func__, |
| 6184 | ni->ni_macaddr, |
| 6185 | ":", |
| 6186 | !! enable); |
| 6187 | |
| 6188 | /* Suspend or resume software queue handling */ |
| 6189 | if (enable) |
| 6190 | ath_tx_node_sleep(sc, an); |
| 6191 | else |
| 6192 | ath_tx_node_wakeup(sc, an); |
| 6193 | |
| 6194 | /* Update net80211 state */ |
| 6195 | if (avp->av_node_ps) |
| 6196 | avp->av_node_ps(ni, enable); |
| 6197 | #else |
| 6198 | struct ath_vap *avp = ATH_VAP(ni->ni_vap); |
| 6199 | |
| 6200 | /* Update net80211 state */ |
| 6201 | if (avp->av_node_ps) |
| 6202 | avp->av_node_ps(ni, enable); |
| 6203 | #endif/* ATH_SW_PSQ */ |
| 6204 | } |
| 6205 | |
| 6206 | #endif |
| 6207 | |
| 6208 | /* |
| 6209 | * Notification from net80211 that the powersave queue state has |
| 6210 | * changed. |
| 6211 | * |
| 6212 | * Since the software queue also may have some frames: |
| 6213 | * |
| 6214 | * + if the node software queue has frames and the TID state |
| 6215 | * is 0, we set the TIM; |
| 6216 | * + if the node and the stack are both empty, we clear the TIM bit. |
| 6217 | * + If the stack tries to set the bit, always set it. |
| 6218 | * + If the stack tries to clear the bit, only clear it if the |
| 6219 | * software queue in question is also cleared. |
| 6220 | * |
| 6221 | * TODO: this is called during node teardown; so let's ensure this |
| 6222 | * is all correctly handled and that the TIM bit is cleared. |
| 6223 | * It may be that the node flush is called _AFTER_ the net80211 |
| 6224 | * stack clears the TIM. |
| 6225 | * |
| 6226 | * Here is the racy part. Since it's possible >1 concurrent, |
| 6227 | * overlapping TXes will appear complete with a TX completion in |
| 6228 | * another thread, it's possible that the concurrent TIM calls will |
| 6229 | * clash. We can't hold the node lock here because setting the |
| 6230 | * TIM grabs the net80211 comlock and this may cause a LOR. |
| 6231 | * The solution is either to totally serialise _everything_ at |
| 6232 | * this point (ie, all TX, completion and any reset/flush go into |
| 6233 | * one taskqueue) or a new "ath TIM lock" needs to be created that |
| 6234 | * just wraps the driver state change and this call to avp->av_set_tim(). |
| 6235 | * |
| 6236 | * The same race exists in the net80211 power save queue handling |
| 6237 | * as well. Since multiple transmitting threads may queue frames |
| 6238 | * into the driver, as well as ps-poll and the driver transmitting |
| 6239 | * frames (and thus clearing the psq), it's quite possible that |
| 6240 | * a packet entering the PSQ and a ps-poll being handled will |
| 6241 | * race, causing the TIM to be cleared and not re-set. |
| 6242 | */ |
| 6243 | static int |
| 6244 | ath_node_set_tim(struct ieee80211_node *ni, int enable) |
| 6245 | { |
| 6246 | #ifdef ATH_SW_PSQ |
| 6247 | struct ieee80211com *ic = ni->ni_ic; |
| 6248 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 6249 | struct ath_node *an = ATH_NODE(ni); |
| 6250 | struct ath_vap *avp = ATH_VAP(ni->ni_vap); |
| 6251 | int changed = 0; |
| 6252 | |
| 6253 | ATH_TX_LOCK(sc); |
| 6254 | an->an_stack_psq = enable; |
| 6255 | |
| 6256 | /* |
| 6257 | * This will get called for all operating modes, |
| 6258 | * even if avp->av_set_tim is unset. |
| 6259 | * It's currently set for hostap/ibss modes; but |
| 6260 | * the same infrastructure is used for both STA |
| 6261 | * and AP/IBSS node power save. |
| 6262 | */ |
| 6263 | if (avp->av_set_tim == NULL) { |
| 6264 | ATH_TX_UNLOCK(sc); |
| 6265 | return (0); |
| 6266 | } |
| 6267 | |
| 6268 | /* |
| 6269 | * If setting the bit, always set it here. |
| 6270 | * If clearing the bit, only clear it if the |
| 6271 | * software queue is also empty. |
| 6272 | * |
| 6273 | * If the node has left power save, just clear the TIM |
| 6274 | * bit regardless of the state of the power save queue. |
| 6275 | * |
| 6276 | * XXX TODO: although atomics are used, it's quite possible |
| 6277 | * that a race will occur between this and setting/clearing |
| 6278 | * in another thread. TX completion will occur always in |
| 6279 | * one thread, however setting/clearing the TIM bit can come |
| 6280 | * from a variety of different process contexts! |
| 6281 | */ |
| 6282 | if (enable && an->an_tim_set == 1) { |
| 6283 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6284 | "%s: %6D: enable=%d, tim_set=1, ignoring\n", |
| 6285 | __func__, |
| 6286 | ni->ni_macaddr, |
| 6287 | ":", |
| 6288 | enable); |
| 6289 | ATH_TX_UNLOCK(sc); |
| 6290 | } else if (enable) { |
| 6291 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6292 | "%s: %6D: enable=%d, enabling TIM\n", |
| 6293 | __func__, |
| 6294 | ni->ni_macaddr, |
| 6295 | ":", |
| 6296 | enable); |
| 6297 | an->an_tim_set = 1; |
| 6298 | ATH_TX_UNLOCK(sc); |
| 6299 | changed = avp->av_set_tim(ni, enable); |
| 6300 | } else if (an->an_swq_depth == 0) { |
| 6301 | /* disable */ |
| 6302 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6303 | "%s: %6D: enable=%d, an_swq_depth == 0, disabling\n", |
| 6304 | __func__, |
| 6305 | ni->ni_macaddr, |
| 6306 | ":", |
| 6307 | enable); |
| 6308 | an->an_tim_set = 0; |
| 6309 | ATH_TX_UNLOCK(sc); |
| 6310 | changed = avp->av_set_tim(ni, enable); |
| 6311 | } else if (! an->an_is_powersave) { |
| 6312 | /* |
| 6313 | * disable regardless; the node isn't in powersave now |
| 6314 | */ |
| 6315 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6316 | "%s: %6D: enable=%d, an_pwrsave=0, disabling\n", |
| 6317 | __func__, |
| 6318 | ni->ni_macaddr, |
| 6319 | ":", |
| 6320 | enable); |
| 6321 | an->an_tim_set = 0; |
| 6322 | ATH_TX_UNLOCK(sc); |
| 6323 | changed = avp->av_set_tim(ni, enable); |
| 6324 | } else { |
| 6325 | /* |
| 6326 | * psq disable, node is currently in powersave, node |
| 6327 | * software queue isn't empty, so don't clear the TIM bit |
| 6328 | * for now. |
| 6329 | */ |
| 6330 | ATH_TX_UNLOCK(sc); |
| 6331 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6332 | "%s: %6D: enable=%d, an_swq_depth > 0, ignoring\n", |
| 6333 | __func__, |
| 6334 | ni->ni_macaddr, |
| 6335 | ":", |
| 6336 | enable); |
| 6337 | changed = 0; |
| 6338 | } |
| 6339 | |
| 6340 | return (changed); |
| 6341 | #else |
| 6342 | struct ath_vap *avp = ATH_VAP(ni->ni_vap); |
| 6343 | |
| 6344 | /* |
| 6345 | * Some operating modes don't set av_set_tim(), so don't |
| 6346 | * update it here. |
| 6347 | */ |
| 6348 | if (avp->av_set_tim == NULL) |
| 6349 | return (0); |
| 6350 | |
| 6351 | return (avp->av_set_tim(ni, enable)); |
| 6352 | #endif /* ATH_SW_PSQ */ |
| 6353 | } |
| 6354 | |
| 6355 | /* |
| 6356 | * Set or update the TIM from the software queue. |
| 6357 | * |
| 6358 | * Check the software queue depth before attempting to do lock |
| 6359 | * anything; that avoids trying to obtain the lock. Then, |
| 6360 | * re-check afterwards to ensure nothing has changed in the |
| 6361 | * meantime. |
| 6362 | * |
| 6363 | * set: This is designed to be called from the TX path, after |
| 6364 | * a frame has been queued; to see if the swq > 0. |
| 6365 | * |
| 6366 | * clear: This is designed to be called from the buffer completion point |
| 6367 | * (right now it's ath_tx_default_comp()) where the state of |
| 6368 | * a software queue has changed. |
| 6369 | * |
| 6370 | * It makes sense to place it at buffer free / completion rather |
| 6371 | * than after each software queue operation, as there's no real |
| 6372 | * point in churning the TIM bit as the last frames in the software |
| 6373 | * queue are transmitted. If they fail and we retry them, we'd |
| 6374 | * just be setting the TIM bit again anyway. |
| 6375 | */ |
| 6376 | void |
| 6377 | ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni, |
| 6378 | int enable) |
| 6379 | { |
| 6380 | #ifdef ATH_SW_PSQ |
| 6381 | struct ath_node *an; |
| 6382 | struct ath_vap *avp; |
| 6383 | |
| 6384 | /* Don't do this for broadcast/etc frames */ |
| 6385 | if (ni == NULL) |
| 6386 | return; |
| 6387 | |
| 6388 | an = ATH_NODE(ni); |
| 6389 | avp = ATH_VAP(ni->ni_vap); |
| 6390 | |
| 6391 | /* |
| 6392 | * And for operating modes without the TIM handler set, let's |
| 6393 | * just skip those. |
| 6394 | */ |
| 6395 | if (avp->av_set_tim == NULL) |
| 6396 | return; |
| 6397 | |
| 6398 | ATH_TX_LOCK_ASSERT(sc); |
| 6399 | |
| 6400 | if (enable) { |
| 6401 | if (an->an_is_powersave && |
| 6402 | an->an_tim_set == 0 && |
| 6403 | an->an_swq_depth != 0) { |
| 6404 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6405 | "%s: %6D: swq_depth>0, tim_set=0, set!\n", |
| 6406 | __func__, |
| 6407 | ni->ni_macaddr, |
| 6408 | ":"); |
| 6409 | an->an_tim_set = 1; |
| 6410 | (void) avp->av_set_tim(ni, 1); |
| 6411 | } |
| 6412 | } else { |
| 6413 | /* |
| 6414 | * Don't bother grabbing the lock unless the queue is empty. |
| 6415 | */ |
| 6416 | if (&an->an_swq_depth != 0) |
| 6417 | return; |
| 6418 | |
| 6419 | if (an->an_is_powersave && |
| 6420 | an->an_stack_psq == 0 && |
| 6421 | an->an_tim_set == 1 && |
| 6422 | an->an_swq_depth == 0) { |
| 6423 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6424 | "%s: %6D: swq_depth=0, tim_set=1, psq_set=0," |
| 6425 | " clear!\n", |
| 6426 | __func__, |
| 6427 | ni->ni_macaddr, |
| 6428 | ":"); |
| 6429 | an->an_tim_set = 0; |
| 6430 | (void) avp->av_set_tim(ni, 0); |
| 6431 | } |
| 6432 | } |
| 6433 | #else |
| 6434 | return; |
| 6435 | #endif /* ATH_SW_PSQ */ |
| 6436 | } |
| 6437 | |
| 6438 | #if 0 |
| 6439 | /* |
| 6440 | * Received a ps-poll frame from net80211. |
| 6441 | * |
| 6442 | * Here we get a chance to serve out a software-queued frame ourselves |
| 6443 | * before we punt it to net80211 to transmit us one itself - either |
| 6444 | * because there's traffic in the net80211 psq, or a NULL frame to |
| 6445 | * indicate there's nothing else. |
| 6446 | */ |
| 6447 | static void |
| 6448 | ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m) |
| 6449 | { |
| 6450 | #ifdef ATH_SW_PSQ |
| 6451 | struct ath_node *an; |
| 6452 | struct ath_vap *avp; |
| 6453 | struct ieee80211com *ic = ni->ni_ic; |
| 6454 | struct ath_softc *sc = ic->ic_ifp->if_softc; |
| 6455 | int tid; |
| 6456 | |
| 6457 | /* Just paranoia */ |
| 6458 | if (ni == NULL) |
| 6459 | return; |
| 6460 | |
| 6461 | /* |
| 6462 | * Unassociated (temporary node) station. |
| 6463 | */ |
| 6464 | if (ni->ni_associd == 0) |
| 6465 | return; |
| 6466 | |
| 6467 | /* |
| 6468 | * We do have an active node, so let's begin looking into it. |
| 6469 | */ |
| 6470 | an = ATH_NODE(ni); |
| 6471 | avp = ATH_VAP(ni->ni_vap); |
| 6472 | |
| 6473 | /* |
| 6474 | * For now, we just call the original ps-poll method. |
| 6475 | * Once we're ready to flip this on: |
| 6476 | * |
| 6477 | * + Set leak to 1, as no matter what we're going to have |
| 6478 | * to send a frame; |
| 6479 | * + Check the software queue and if there's something in it, |
| 6480 | * schedule the highest TID thas has traffic from this node. |
| 6481 | * Then make sure we schedule the software scheduler to |
| 6482 | * run so it picks up said frame. |
| 6483 | * |
| 6484 | * That way whatever happens, we'll at least send _a_ frame |
| 6485 | * to the given node. |
| 6486 | * |
| 6487 | * Again, yes, it's crappy QoS if the node has multiple |
| 6488 | * TIDs worth of traffic - but let's get it working first |
| 6489 | * before we optimise it. |
| 6490 | * |
| 6491 | * Also yes, there's definitely latency here - we're not |
| 6492 | * direct dispatching to the hardware in this path (and |
| 6493 | * we're likely being called from the packet receive path, |
| 6494 | * so going back into TX may be a little hairy!) but again |
| 6495 | * I'd like to get this working first before optimising |
| 6496 | * turn-around time. |
| 6497 | */ |
| 6498 | |
| 6499 | ATH_TX_LOCK(sc); |
| 6500 | |
| 6501 | /* |
| 6502 | * Legacy - we're called and the node isn't asleep. |
| 6503 | * Immediately punt. |
| 6504 | */ |
| 6505 | if (! an->an_is_powersave) { |
| 6506 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6507 | "%s: %6D: not in powersave?\n", |
| 6508 | __func__, |
| 6509 | ni->ni_macaddr, |
| 6510 | ":"); |
| 6511 | ATH_TX_UNLOCK(sc); |
| 6512 | if (avp->av_recv_pspoll) |
| 6513 | avp->av_recv_pspoll(ni, m); |
| 6514 | return; |
| 6515 | } |
| 6516 | |
| 6517 | /* |
| 6518 | * We're in powersave. |
| 6519 | * |
| 6520 | * Leak a frame. |
| 6521 | */ |
| 6522 | an->an_leak_count = 1; |
| 6523 | |
| 6524 | /* |
| 6525 | * Now, if there's no frames in the node, just punt to |
| 6526 | * recv_pspoll. |
| 6527 | * |
| 6528 | * Don't bother checking if the TIM bit is set, we really |
| 6529 | * only care if there are any frames here! |
| 6530 | */ |
| 6531 | if (an->an_swq_depth == 0) { |
| 6532 | ATH_TX_UNLOCK(sc); |
| 6533 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6534 | "%s: %6D: SWQ empty; punting to net80211\n", |
| 6535 | __func__, |
| 6536 | ni->ni_macaddr, |
| 6537 | ":"); |
| 6538 | if (avp->av_recv_pspoll) |
| 6539 | avp->av_recv_pspoll(ni, m); |
| 6540 | return; |
| 6541 | } |
| 6542 | |
| 6543 | /* |
| 6544 | * Ok, let's schedule the highest TID that has traffic |
| 6545 | * and then schedule something. |
| 6546 | */ |
| 6547 | for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) { |
| 6548 | struct ath_tid *atid = &an->an_tid[tid]; |
| 6549 | /* |
| 6550 | * No frames? Skip. |
| 6551 | */ |
| 6552 | if (atid->axq_depth == 0) |
| 6553 | continue; |
| 6554 | ath_tx_tid_sched(sc, atid); |
| 6555 | /* |
| 6556 | * XXX we could do a direct call to the TXQ |
| 6557 | * scheduler code here to optimise latency |
| 6558 | * at the expense of a REALLY deep callstack. |
| 6559 | */ |
| 6560 | ATH_TX_UNLOCK(sc); |
| 6561 | taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask); |
| 6562 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6563 | "%s: %6D: leaking frame to TID %d\n", |
| 6564 | __func__, |
| 6565 | ni->ni_macaddr, |
| 6566 | ":", |
| 6567 | tid); |
| 6568 | return; |
| 6569 | } |
| 6570 | |
| 6571 | ATH_TX_UNLOCK(sc); |
| 6572 | |
| 6573 | /* |
| 6574 | * XXX nothing in the TIDs at this point? Eek. |
| 6575 | */ |
| 6576 | DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE, |
| 6577 | "%s: %6D: TIDs empty, but ath_node showed traffic?!\n", |
| 6578 | __func__, |
| 6579 | ni->ni_macaddr, |
| 6580 | ":"); |
| 6581 | if (avp->av_recv_pspoll) |
| 6582 | avp->av_recv_pspoll(ni, m); |
| 6583 | #else |
| 6584 | if (avp->av_recv_pspoll) |
| 6585 | avp->av_recv_pspoll(ni, m); |
| 6586 | #endif /* ATH_SW_PSQ */ |
| 6587 | } |
| 6588 | |
| 6589 | #endif |
| 6590 | |
| 6591 | MODULE_VERSION(if_ath, 1); |
| 6592 | MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */ |
| 6593 | #if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ) |
| 6594 | MODULE_DEPEND(if_ath, alq, 1, 1, 1); |
| 6595 | #endif |