ath - Basic re-port, base code compile
[dragonfly.git] / sys / dev / netif / ath / ath / if_ath.c
CommitLineData
86877dfb
RP
1/*-
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
193b341d
SZ
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
193b341d
SZ
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
193b341d
SZ
28 */
29
572ff6f6 30#include <sys/cdefs.h>
572ff6f6 31
193b341d
SZ
32/*
33 * Driver for the Atheros Wireless LAN controller.
34 *
35 * This software is derived from work of Atsushi Onoe; his contribution
36 * is greatly appreciated.
37 */
38
86877dfb 39#include "opt_inet.h"
193b341d 40#include "opt_ath.h"
572ff6f6
MD
41/*
42 * This is needed for register operations which are performed
43 * by the driver - eg, calls to ath_hal_gettsf32().
44 *
45 * It's also required for any AH_DEBUG checks in here, eg the
46 * module dependencies.
47 */
48#include "opt_ah.h"
86877dfb 49#include "opt_wlan.h"
193b341d
SZ
50
51#include <sys/param.h>
572ff6f6 52#include <sys/systm.h>
193b341d 53#include <sys/sysctl.h>
572ff6f6 54#include <sys/mbuf.h>
193b341d 55#include <sys/malloc.h>
86877dfb
RP
56#include <sys/lock.h>
57#include <sys/mutex.h>
193b341d
SZ
58#include <sys/kernel.h>
59#include <sys/socket.h>
60#include <sys/sockio.h>
61#include <sys/errno.h>
62#include <sys/callout.h>
63#include <sys/bus.h>
64#include <sys/endian.h>
65#include <sys/kthread.h>
86877dfb
RP
66#include <sys/taskqueue.h>
67#include <sys/priv.h>
572ff6f6
MD
68#include <sys/module.h>
69#include <sys/ktr.h>
86877dfb 70
193b341d 71#include <net/if.h>
572ff6f6 72#include <net/if_var.h>
193b341d
SZ
73#include <net/if_dl.h>
74#include <net/if_media.h>
75#include <net/if_types.h>
76#include <net/if_arp.h>
572ff6f6 77#include <net/ethernet.h>
193b341d 78#include <net/if_llc.h>
3133c5e3 79#include <net/ifq_var.h>
193b341d 80
5cd80a8c
MD
81#include <netproto/802_11/ieee80211_var.h>
82#include <netproto/802_11/ieee80211_regdomain.h>
86877dfb 83#ifdef IEEE80211_SUPPORT_SUPERG
5cd80a8c 84#include <netproto/802_11/ieee80211_superg.h>
86877dfb
RP
85#endif
86#ifdef IEEE80211_SUPPORT_TDMA
5cd80a8c 87#include <netproto/802_11/ieee80211_tdma.h>
86877dfb 88#endif
193b341d
SZ
89
90#include <net/bpf.h>
91
86877dfb 92#ifdef INET
572ff6f6 93#include <netinet/in.h>
86877dfb 94#include <netinet/if_ether.h>
8982d733 95#endif
193b341d 96
5cd80a8c
MD
97#include <dev/netif/ath/ath/if_athvar.h>
98#include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
99#include <dev/netif/ath/ath_hal/ah_diagcodes.h>
100
101#include <dev/netif/ath/ath/if_ath_debug.h>
102#include <dev/netif/ath/ath/if_ath_misc.h>
103#include <dev/netif/ath/ath/if_ath_tsf.h>
104#include <dev/netif/ath/ath/if_ath_tx.h>
105#include <dev/netif/ath/ath/if_ath_sysctl.h>
106#include <dev/netif/ath/ath/if_ath_led.h>
107#include <dev/netif/ath/ath/if_ath_keycache.h>
108#include <dev/netif/ath/ath/if_ath_rx.h>
109#include <dev/netif/ath/ath/if_ath_rx_edma.h>
110#include <dev/netif/ath/ath/if_ath_tx_edma.h>
111#include <dev/netif/ath/ath/if_ath_beacon.h>
112#include <dev/netif/ath/ath/if_ath_btcoex.h>
113#include <dev/netif/ath/ath/if_ath_spectral.h>
114#include <dev/netif/ath/ath/if_ath_lna_div.h>
115#include <dev/netif/ath/ath/if_athdfs.h>
193b341d 116
86877dfb 117#ifdef ATH_TX99_DIAG
5cd80a8c 118#include <dev/netif/ath/ath_tx99/ath_tx99.h>
572ff6f6
MD
119#endif
120
121#ifdef ATH_DEBUG_ALQ
5cd80a8c 122#include <dev/netif/ath/ath/if_ath_alq.h>
86877dfb
RP
123#endif
124
125/*
572ff6f6
MD
126 * Only enable this if you're working on PS-POLL support.
127 */
128#define ATH_SW_PSQ
129
3133c5e3
MD
130#ifdef __DragonFly__
131#define CURVNET_SET(name)
132#define CURVNET_RESTORE()
133#endif
134
572ff6f6 135/*
86877dfb
RP
136 * ATH_BCBUF determines the number of vap's that can transmit
137 * beacons and also (currently) the number of vap's that can
138 * have unique mac addresses/bssid. When staggering beacons
139 * 4 is probably a good max as otherwise the beacons become
140 * very closely spaced and there is limited time for cab q traffic
141 * to go out. You can burst beacons instead but that is not good
142 * for stations in power save and at some point you really want
143 * another radio (and channel).
144 *
145 * The limit on the number of mac addresses is tied to our use of
146 * the U/L bit and tracking addresses in a byte; it would be
147 * worthwhile to allow more for applications like proxy sta.
148 */
149CTASSERT(ATH_BCBUF <= 8);
150
86877dfb 151static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
572ff6f6
MD
152 const char [IFNAMSIZ], int, enum ieee80211_opmode, int,
153 const uint8_t [IEEE80211_ADDR_LEN],
154 const uint8_t [IEEE80211_ADDR_LEN]);
86877dfb 155static void ath_vap_delete(struct ieee80211vap *);
193b341d 156static void ath_init(void *);
86877dfb 157static void ath_stop_locked(struct ifnet *);
193b341d 158static void ath_stop(struct ifnet *);
86877dfb 159static int ath_reset_vap(struct ieee80211vap *, u_long);
3133c5e3 160#if 0
572ff6f6
MD
161static int ath_transmit(struct ifnet *ifp, struct mbuf *m);
162static void ath_qflush(struct ifnet *ifp);
3133c5e3 163#endif
193b341d 164static int ath_media_change(struct ifnet *);
572ff6f6 165static void ath_watchdog(void *);
3133c5e3 166static int ath_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
86877dfb
RP
167static void ath_fatal_proc(void *, int);
168static void ath_bmiss_vap(struct ieee80211vap *);
572ff6f6 169static void ath_bmiss_proc(void *, int);
86877dfb
RP
170static void ath_key_update_begin(struct ieee80211vap *);
171static void ath_key_update_end(struct ieee80211vap *);
172static void ath_update_mcast(struct ifnet *);
173static void ath_update_promisc(struct ifnet *);
193b341d 174static void ath_updateslot(struct ifnet *);
572ff6f6
MD
175static void ath_bstuck_proc(void *, int);
176static void ath_reset_proc(void *, int);
193b341d
SZ
177static int ath_desc_alloc(struct ath_softc *);
178static void ath_desc_free(struct ath_softc *);
86877dfb
RP
179static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
180 const uint8_t [IEEE80211_ADDR_LEN]);
572ff6f6 181static void ath_node_cleanup(struct ieee80211_node *);
193b341d 182static void ath_node_free(struct ieee80211_node *);
86877dfb
RP
183static void ath_node_getsignal(const struct ieee80211_node *,
184 int8_t *, int8_t *);
ed33fa9f 185static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
193b341d
SZ
186static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
187static int ath_tx_setup(struct ath_softc *, int, int);
193b341d
SZ
188static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
189static void ath_tx_cleanup(struct ath_softc *);
572ff6f6
MD
190static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq,
191 int dosched);
192static void ath_tx_proc_q0(void *, int);
193static void ath_tx_proc_q0123(void *, int);
194static void ath_tx_proc(void *, int);
195static void ath_txq_sched_tasklet(void *, int);
193b341d 196static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
193b341d 197static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
86877dfb
RP
198static void ath_scan_start(struct ieee80211com *);
199static void ath_scan_end(struct ieee80211com *);
200static void ath_set_channel(struct ieee80211com *);
572ff6f6
MD
201#ifdef ATH_ENABLE_11N
202static void ath_update_chw(struct ieee80211com *);
203#endif /* ATH_ENABLE_11N */
204static void ath_calibrate(void *);
86877dfb 205static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
193b341d
SZ
206static void ath_setup_stationkey(struct ieee80211_node *);
207static void ath_newassoc(struct ieee80211_node *, int);
86877dfb
RP
208static int ath_setregdomain(struct ieee80211com *,
209 struct ieee80211_regdomain *, int,
210 struct ieee80211_channel []);
211static void ath_getradiocaps(struct ieee80211com *, int, int *,
212 struct ieee80211_channel []);
213static int ath_getchannels(struct ath_softc *);
193b341d
SZ
214
215static int ath_rate_setup(struct ath_softc *, u_int mode);
216static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
217
193b341d 218static void ath_announce(struct ath_softc *);
86877dfb 219
572ff6f6 220static void ath_dfs_tasklet(void *, int);
3133c5e3 221#if 0
572ff6f6 222static void ath_node_powersave(struct ieee80211_node *, int);
572ff6f6 223static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
3133c5e3
MD
224#endif
225static int ath_node_set_tim(struct ieee80211_node *, int);
86877dfb 226
572ff6f6 227#ifdef IEEE80211_SUPPORT_TDMA
5cd80a8c 228#include <dev/netif/ath/ath/if_ath_tdma.h>
572ff6f6 229#endif
193b341d
SZ
230
231SYSCTL_DECL(_hw_ath);
232
233/* XXX validate sysctl values */
86877dfb
RP
234static int ath_longcalinterval = 30; /* long cals every 30 secs */
235SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
236 0, "long chip calibration interval (secs)");
237static int ath_shortcalinterval = 100; /* short cals every 100 ms */
238SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
239 0, "short chip calibration interval (msecs)");
240static int ath_resetcalinterval = 20*60; /* reset cal state 20 mins */
241SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
242 0, "reset chip calibration results (secs)");
572ff6f6
MD
243static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
244SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
245 0, "ANI calibration (msecs)");
193b341d 246
572ff6f6 247int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
86877dfb 248SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
193b341d
SZ
249 0, "rx buffers allocated");
250TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
572ff6f6 251int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
86877dfb 252SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
193b341d
SZ
253 0, "tx buffers allocated");
254TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
572ff6f6
MD
255int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
256SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
257 0, "tx (mgmt) buffers allocated");
258TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
193b341d 259
572ff6f6 260int ath_bstuck_threshold = 4; /* max missed beacons */
86877dfb
RP
261SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
262 0, "max missed beacon xmits before chip reset");
263
193b341d
SZ
264MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
265
572ff6f6
MD
266void
267ath_legacy_attach_comp_func(struct ath_softc *sc)
268{
269
270 /*
271 * Special case certain configurations. Note the
272 * CAB queue is handled by these specially so don't
273 * include them when checking the txq setup mask.
274 */
275 switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
276 case 0x01:
277 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
278 break;
279 case 0x0f:
280 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
281 break;
282 default:
283 TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
284 break;
285 }
286}
287
288#define HAL_MODE_HT20 (HAL_MODE_11NG_HT20 | HAL_MODE_11NA_HT20)
289#define HAL_MODE_HT40 \
290 (HAL_MODE_11NG_HT40PLUS | HAL_MODE_11NG_HT40MINUS | \
291 HAL_MODE_11NA_HT40PLUS | HAL_MODE_11NA_HT40MINUS)
193b341d 292int
86877dfb 293ath_attach(u_int16_t devid, struct ath_softc *sc)
193b341d 294{
86877dfb
RP
295 struct ifnet *ifp;
296 struct ieee80211com *ic;
193b341d
SZ
297 struct ath_hal *ah = NULL;
298 HAL_STATUS status;
299 int error = 0, i;
86877dfb
RP
300 u_int wmodes;
301 uint8_t macaddr[IEEE80211_ADDR_LEN];
572ff6f6 302 int rx_chainmask, tx_chainmask;
193b341d
SZ
303
304 DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
305
572ff6f6 306 CURVNET_SET(vnet0);
86877dfb
RP
307 ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
308 if (ifp == NULL) {
309 device_printf(sc->sc_dev, "can not if_alloc()\n");
310 error = ENOSPC;
572ff6f6 311 CURVNET_RESTORE();
86877dfb
RP
312 goto bad;
313 }
314 ic = ifp->if_l2com;
315
193b341d
SZ
316 /* set these up early for if_printf use */
317 if_initname(ifp, device_get_name(sc->sc_dev),
86877dfb 318 device_get_unit(sc->sc_dev));
572ff6f6 319 CURVNET_RESTORE();
193b341d 320
3133c5e3
MD
321 /* prepare sysctl tree for use in sub modules */
322 sysctl_ctx_init(&sc->sc_sysctl_ctx);
323 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx,
324 SYSCTL_STATIC_CHILDREN(_hw),
325 OID_AUTO,
326 device_get_nameunit(sc->sc_dev),
327 CTLFLAG_RD, 0, "");
328
329
572ff6f6
MD
330 ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh,
331 sc->sc_eepromdata, &status);
193b341d
SZ
332 if (ah == NULL) {
333 if_printf(ifp, "unable to attach hardware; HAL status %u\n",
334 status);
335 error = ENXIO;
86877dfb 336 goto bad;
193b341d
SZ
337 }
338 sc->sc_ah = ah;
193b341d 339 sc->sc_invalid = 0; /* ready to go, enable interrupt handling */
86877dfb
RP
340#ifdef ATH_DEBUG
341 sc->sc_debug = ath_debug;
342#endif
193b341d
SZ
343
344 /*
572ff6f6
MD
345 * Setup the DMA/EDMA functions based on the current
346 * hardware support.
347 *
348 * This is required before the descriptors are allocated.
349 */
350 if (ath_hal_hasedma(sc->sc_ah)) {
351 sc->sc_isedma = 1;
352 ath_recv_setup_edma(sc);
353 ath_xmit_setup_edma(sc);
354 } else {
355 ath_recv_setup_legacy(sc);
356 ath_xmit_setup_legacy(sc);
357 }
358
359 /*
193b341d
SZ
360 * Check if the MAC has multi-rate retry support.
361 * We do this by trying to setup a fake extended
362 * descriptor. MAC's that don't have support will
363 * return false w/o doing anything. MAC's that do
364 * support it will return true w/o doing anything.
365 */
366 sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
367
368 /*
369 * Check if the device has hardware counters for PHY
370 * errors. If so we need to enable the MIB interrupt
371 * so we can act on stat triggers.
372 */
373 if (ath_hal_hwphycounters(ah))
374 sc->sc_needmib = 1;
375
376 /*
377 * Get the hardware key cache size.
378 */
379 sc->sc_keymax = ath_hal_keycachesize(ah);
380 if (sc->sc_keymax > ATH_KEYMAX) {
381 if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
382 ATH_KEYMAX, sc->sc_keymax);
383 sc->sc_keymax = ATH_KEYMAX;
384 }
385 /*
386 * Reset the key cache since some parts do not
387 * reset the contents on initial power up.
388 */
389 for (i = 0; i < sc->sc_keymax; i++)
390 ath_hal_keyreset(ah, i);
193b341d
SZ
391
392 /*
86877dfb 393 * Collect the default channel list.
193b341d 394 */
86877dfb
RP
395 error = ath_getchannels(sc);
396 if (error != 0)
397 goto bad;
193b341d
SZ
398
399 /*
400 * Setup rate tables for all potential media types.
401 */
402 ath_rate_setup(sc, IEEE80211_MODE_11A);
403 ath_rate_setup(sc, IEEE80211_MODE_11B);
404 ath_rate_setup(sc, IEEE80211_MODE_11G);
405 ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
406 ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
86877dfb
RP
407 ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
408 ath_rate_setup(sc, IEEE80211_MODE_11NA);
409 ath_rate_setup(sc, IEEE80211_MODE_11NG);
410 ath_rate_setup(sc, IEEE80211_MODE_HALF);
411 ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
193b341d
SZ
412
413 /* NB: setup here so ath_rate_update is happy */
414 ath_setcurmode(sc, IEEE80211_MODE_11A);
415
416 /*
572ff6f6 417 * Allocate TX descriptors and populate the lists.
193b341d 418 */
3133c5e3
MD
419 wlan_assert_serialized();
420 wlan_serialize_exit();
193b341d 421 error = ath_desc_alloc(sc);
3133c5e3 422 wlan_serialize_enter();
86877dfb 423 if (error != 0) {
572ff6f6
MD
424 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
425 error);
426 goto bad;
427 }
428 error = ath_txdma_setup(sc);
429 if (error != 0) {
430 if_printf(ifp, "failed to allocate TX descriptors: %d\n",
431 error);
432 goto bad;
433 }
434
435 /*
436 * Allocate RX descriptors and populate the lists.
437 */
438 error = ath_rxdma_setup(sc);
439 if (error != 0) {
440 if_printf(ifp, "failed to allocate RX descriptors: %d\n",
441 error);
86877dfb 442 goto bad;
193b341d 443 }
86877dfb 444
3133c5e3
MD
445 callout_init_mp(&sc->sc_cal_ch);
446 callout_init_mp(&sc->sc_wd_ch);
572ff6f6
MD
447
448 ATH_TXBUF_LOCK_INIT(sc);
449
450 sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
86877dfb 451 taskqueue_thread_enqueue, &sc->sc_tq);
3133c5e3 452 taskqueue_start_threads(&sc->sc_tq, 1, TDPRI_KERN_DAEMON, -1,
86877dfb
RP
453 "%s taskq", ifp->if_xname);
454
572ff6f6
MD
455 TASK_INIT(&sc->sc_rxtask, 0, sc->sc_rx.recv_tasklet, sc);
456 TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
457 TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
458 TASK_INIT(&sc->sc_resettask,0, ath_reset_proc, sc);
459 TASK_INIT(&sc->sc_txqtask, 0, ath_txq_sched_tasklet, sc);
460 TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc);
193b341d
SZ
461
462 /*
463 * Allocate hardware transmit queues: one queue for
464 * beacon frames and one data queue for each QoS
572ff6f6 465 * priority. Note that the hal handles resetting
193b341d
SZ
466 * these queues at the needed time.
467 *
468 * XXX PS-Poll
469 */
572ff6f6 470 sc->sc_bhalq = ath_beaconq_setup(sc);
86877dfb 471 if (sc->sc_bhalq == (u_int) -1) {
193b341d
SZ
472 if_printf(ifp, "unable to setup a beacon xmit queue!\n");
473 error = EIO;
86877dfb 474 goto bad2;
193b341d 475 }
193b341d
SZ
476 sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
477 if (sc->sc_cabq == NULL) {
478 if_printf(ifp, "unable to setup CAB xmit queue!\n");
479 error = EIO;
86877dfb 480 goto bad2;
193b341d 481 }
193b341d
SZ
482 /* NB: insure BK queue is the lowest priority h/w queue */
483 if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
484 if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
485 ieee80211_wme_acnames[WME_AC_BK]);
486 error = EIO;
86877dfb 487 goto bad2;
193b341d
SZ
488 }
489 if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
490 !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
491 !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
86877dfb 492 /*
193b341d
SZ
493 * Not enough hardware tx queues to properly do WME;
494 * just punt and assign them all to the same h/w queue.
495 * We could do a better job of this if, for example,
496 * we allocate queues when we switch from station to
497 * AP mode.
498 */
499 if (sc->sc_ac2q[WME_AC_VI] != NULL)
500 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
501 if (sc->sc_ac2q[WME_AC_BE] != NULL)
502 ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
503 sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
504 sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
505 sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
506 }
507
86877dfb 508 /*
572ff6f6
MD
509 * Attach the TX completion function.
510 *
511 * The non-EDMA chips may have some special case optimisations;
512 * this method gives everyone a chance to attach cleanly.
193b341d 513 */
572ff6f6 514 sc->sc_tx.xmit_attach_comp_func(sc);
193b341d
SZ
515
516 /*
517 * Setup rate control. Some rate control modules
518 * call back to change the anntena state so expose
519 * the necessary entry points.
520 * XXX maybe belongs in struct ath_ratectrl?
521 */
522 sc->sc_setdefantenna = ath_setdefantenna;
523 sc->sc_rc = ath_rate_attach(sc);
524 if (sc->sc_rc == NULL) {
525 error = EIO;
86877dfb 526 goto bad2;
193b341d
SZ
527 }
528
572ff6f6
MD
529 /* Attach DFS module */
530 if (! ath_dfs_attach(sc)) {
531 device_printf(sc->sc_dev,
532 "%s: unable to attach DFS\n", __func__);
533 error = EIO;
534 goto bad2;
535 }
536
537 /* Attach spectral module */
538 if (ath_spectral_attach(sc) < 0) {
539 device_printf(sc->sc_dev,
540 "%s: unable to attach spectral\n", __func__);
541 error = EIO;
542 goto bad2;
543 }
544
545 /* Attach bluetooth coexistence module */
546 if (ath_btcoex_attach(sc) < 0) {
547 device_printf(sc->sc_dev,
548 "%s: unable to attach bluetooth coexistence\n", __func__);
549 error = EIO;
550 goto bad2;
551 }
552
553 /* Attach LNA diversity module */
554 if (ath_lna_div_attach(sc) < 0) {
555 device_printf(sc->sc_dev,
556 "%s: unable to attach LNA diversity\n", __func__);
557 error = EIO;
558 goto bad2;
559 }
560
561 /* Start DFS processing tasklet */
562 TASK_INIT(&sc->sc_dfstask, 0, ath_dfs_tasklet, sc);
563
564 /* Configure LED state */
193b341d
SZ
565 sc->sc_blinking = 0;
566 sc->sc_ledstate = 1;
567 sc->sc_ledon = 0; /* low true */
86877dfb 568 sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */
3133c5e3 569 callout_init_mp(&sc->sc_ledtimer);
572ff6f6
MD
570
571 /*
572 * Don't setup hardware-based blinking.
573 *
574 * Although some NICs may have this configured in the
575 * default reset register values, the user may wish
576 * to alter which pins have which function.
577 *
578 * The reference driver attaches the MAC network LED to GPIO1 and
579 * the MAC power LED to GPIO2. However, the DWA-552 cardbus
580 * NIC has these reversed.
581 */
582 sc->sc_hardled = (1 == 0);
583 sc->sc_led_net_pin = -1;
584 sc->sc_led_pwr_pin = -1;
193b341d
SZ
585 /*
586 * Auto-enable soft led processing for IBM cards and for
587 * 5211 minipci cards. Users can also manually enable/disable
588 * support with a sysctl.
589 */
590 sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
572ff6f6
MD
591 ath_led_config(sc);
592 ath_hal_setledstate(ah, HAL_LED_INIT);
193b341d
SZ
593
594 ifp->if_softc = sc;
595 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
3133c5e3 596#if 0
572ff6f6
MD
597 ifp->if_transmit = ath_transmit;
598 ifp->if_qflush = ath_qflush;
3133c5e3 599#endif
193b341d
SZ
600 ifp->if_ioctl = ath_ioctl;
601 ifp->if_init = ath_init;
3133c5e3
MD
602 ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
603#if 0
572ff6f6
MD
604 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
605 IFQ_SET_READY(&ifp->if_snd);
3133c5e3 606#endif
193b341d 607
86877dfb 608 ic->ic_ifp = ifp;
193b341d
SZ
609 /* XXX not right but it's not used anywhere important */
610 ic->ic_phytype = IEEE80211_T_OFDM;
611 ic->ic_opmode = IEEE80211_M_STA;
612 ic->ic_caps =
86877dfb
RP
613 IEEE80211_C_STA /* station mode */
614 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */
193b341d
SZ
615 | IEEE80211_C_HOSTAP /* hostap mode */
616 | IEEE80211_C_MONITOR /* monitor mode */
617 | IEEE80211_C_AHDEMO /* adhoc demo mode */
86877dfb
RP
618 | IEEE80211_C_WDS /* 4-address traffic works */
619 | IEEE80211_C_MBSS /* mesh point link mode */
193b341d
SZ
620 | IEEE80211_C_SHPREAMBLE /* short preamble supported */
621 | IEEE80211_C_SHSLOT /* short slot time supported */
622 | IEEE80211_C_WPA /* capable of WPA1+WPA2 */
572ff6f6 623#ifndef ATH_ENABLE_11N
86877dfb 624 | IEEE80211_C_BGSCAN /* capable of bg scanning */
572ff6f6 625#endif
86877dfb 626 | IEEE80211_C_TXFRAG /* handle tx frags */
572ff6f6
MD
627#ifdef ATH_ENABLE_DFS
628 | IEEE80211_C_DFS /* Enable radar detection */
629#endif
193b341d
SZ
630 ;
631 /*
632 * Query the hal to figure out h/w crypto support.
633 */
634 if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
86877dfb 635 ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
193b341d 636 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
86877dfb 637 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
193b341d 638 if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
86877dfb 639 ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
193b341d 640 if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
86877dfb 641 ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
193b341d 642 if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
86877dfb 643 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
193b341d
SZ
644 /*
645 * Check if h/w does the MIC and/or whether the
646 * separate key cache entries are required to
647 * handle both tx+rx MIC keys.
648 */
649 if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
86877dfb 650 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
8982d733
SZ
651 /*
652 * If the h/w supports storing tx+rx MIC keys
653 * in one cache slot automatically enable use.
654 */
655 if (ath_hal_hastkipsplit(ah) ||
656 !ath_hal_settkipsplit(ah, AH_FALSE))
193b341d 657 sc->sc_splitmic = 1;
86877dfb
RP
658 /*
659 * If the h/w can do TKIP MIC together with WME then
660 * we use it; otherwise we force the MIC to be done
661 * in software by the net80211 layer.
662 */
663 if (ath_hal_haswmetkipmic(ah))
664 sc->sc_wmetkipmic = 1;
193b341d
SZ
665 }
666 sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
86877dfb
RP
667 /*
668 * Check for multicast key search support.
669 */
670 if (ath_hal_hasmcastkeysearch(sc->sc_ah) &&
671 !ath_hal_getmcastkeysearch(sc->sc_ah)) {
672 ath_hal_setmcastkeysearch(sc->sc_ah, 1);
673 }
193b341d 674 sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
86877dfb 675 /*
8982d733
SZ
676 * Mark key cache slots associated with global keys
677 * as in use. If we knew TKIP was not to be used we
678 * could leave the +32, +64, and +32+64 slots free.
679 */
680 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
681 setbit(sc->sc_keymap, i);
682 setbit(sc->sc_keymap, i+64);
683 if (sc->sc_splitmic) {
684 setbit(sc->sc_keymap, i+32);
685 setbit(sc->sc_keymap, i+32+64);
686 }
687 }
193b341d
SZ
688 /*
689 * TPC support can be done either with a global cap or
690 * per-packet support. The latter is not available on
691 * all parts. We're a bit pedantic here as all parts
692 * support a global cap.
693 */
694 if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
695 ic->ic_caps |= IEEE80211_C_TXPMGT;
696
697 /*
698 * Mark WME capability only if we have sufficient
699 * hardware queues to do proper priority scheduling.
700 */
701 if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
702 ic->ic_caps |= IEEE80211_C_WME;
703 /*
704 * Check for misc other capabilities.
705 */
706 if (ath_hal_hasbursting(ah))
707 ic->ic_caps |= IEEE80211_C_BURST;
86877dfb
RP
708 sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
709 sc->sc_hasbmatch = ath_hal_hasbssidmatch(ah);
710 sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
572ff6f6
MD
711 sc->sc_rxslink = ath_hal_self_linked_final_rxdesc(ah);
712 sc->sc_rxtsf32 = ath_hal_has_long_rxdesc_tsf(ah);
713 sc->sc_hasenforcetxop = ath_hal_hasenforcetxop(ah);
714 sc->sc_rx_lnamixer = ath_hal_hasrxlnamixer(ah);
715 sc->sc_hasdivcomb = ath_hal_hasdivantcomb(ah);
716
86877dfb
RP
717 if (ath_hal_hasfastframes(ah))
718 ic->ic_caps |= IEEE80211_C_FF;
719 wmodes = ath_hal_getwirelessmodes(ah);
720 if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
721 ic->ic_caps |= IEEE80211_C_TURBOP;
722#ifdef IEEE80211_SUPPORT_TDMA
723 if (ath_hal_macversion(ah) > 0x78) {
724 ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
725 ic->ic_tdma_update = ath_tdma_update;
726 }
727#endif
572ff6f6
MD
728
729 /*
730 * TODO: enforce that at least this many frames are available
731 * in the txbuf list before allowing data frames (raw or
732 * otherwise) to be transmitted.
733 */
734 sc->sc_txq_data_minfree = 10;
735 /*
736 * Leave this as default to maintain legacy behaviour.
737 * Shortening the cabq/mcastq may end up causing some
738 * undesirable behaviour.
739 */
740 sc->sc_txq_mcastq_maxdepth = ath_txbuf;
741
742 /*
743 * How deep can the node software TX queue get whilst it's asleep.
744 */
745 sc->sc_txq_node_psq_maxdepth = 16;
746
747 /*
748 * Default the maximum queue depth for a given node
749 * to 1/4'th the TX buffers, or 64, whichever
750 * is larger.
751 */
752 sc->sc_txq_node_maxdepth = MAX(64, ath_txbuf / 4);
753
754 /* Enable CABQ by default */
755 sc->sc_cabq_enable = 1;
756
757 /*
758 * Allow the TX and RX chainmasks to be overridden by
759 * environment variables and/or device.hints.
760 *
761 * This must be done early - before the hardware is
762 * calibrated or before the 802.11n stream calculation
763 * is done.
764 */
765 if (resource_int_value(device_get_name(sc->sc_dev),
766 device_get_unit(sc->sc_dev), "rx_chainmask",
767 &rx_chainmask) == 0) {
768 device_printf(sc->sc_dev, "Setting RX chainmask to 0x%x\n",
769 rx_chainmask);
770 (void) ath_hal_setrxchainmask(sc->sc_ah, rx_chainmask);
771 }
772 if (resource_int_value(device_get_name(sc->sc_dev),
773 device_get_unit(sc->sc_dev), "tx_chainmask",
774 &tx_chainmask) == 0) {
775 device_printf(sc->sc_dev, "Setting TX chainmask to 0x%x\n",
776 tx_chainmask);
777 (void) ath_hal_settxchainmask(sc->sc_ah, tx_chainmask);
778 }
779
780 /*
781 * Query the TX/RX chainmask configuration.
782 *
783 * This is only relevant for 11n devices.
784 */
785 ath_hal_getrxchainmask(ah, &sc->sc_rxchainmask);
786 ath_hal_gettxchainmask(ah, &sc->sc_txchainmask);
787
788 /*
789 * Disable MRR with protected frames by default.
790 * Only 802.11n series NICs can handle this.
791 */
792 sc->sc_mrrprot = 0; /* XXX should be a capability */
793
794 /*
795 * Query the enterprise mode information the HAL.
796 */
797 if (ath_hal_getcapability(ah, HAL_CAP_ENTERPRISE_MODE, 0,
798 &sc->sc_ent_cfg) == HAL_OK)
799 sc->sc_use_ent = 1;
800
801#ifdef ATH_ENABLE_11N
802 /*
803 * Query HT capabilities
804 */
805 if (ath_hal_getcapability(ah, HAL_CAP_HT, 0, NULL) == HAL_OK &&
806 (wmodes & (HAL_MODE_HT20 | HAL_MODE_HT40))) {
807 uint32_t rxs, txs;
808
809 device_printf(sc->sc_dev, "[HT] enabling HT modes\n");
810
811 sc->sc_mrrprot = 1; /* XXX should be a capability */
812
813 ic->ic_htcaps = IEEE80211_HTC_HT /* HT operation */
814 | IEEE80211_HTC_AMPDU /* A-MPDU tx/rx */
815 | IEEE80211_HTC_AMSDU /* A-MSDU tx/rx */
816 | IEEE80211_HTCAP_MAXAMSDU_3839
817 /* max A-MSDU length */
818 | IEEE80211_HTCAP_SMPS_OFF; /* SM power save off */
819 ;
820
821 /*
822 * Enable short-GI for HT20 only if the hardware
823 * advertises support.
824 * Notably, anything earlier than the AR9287 doesn't.
825 */
826 if ((ath_hal_getcapability(ah,
827 HAL_CAP_HT20_SGI, 0, NULL) == HAL_OK) &&
828 (wmodes & HAL_MODE_HT20)) {
829 device_printf(sc->sc_dev,
830 "[HT] enabling short-GI in 20MHz mode\n");
831 ic->ic_htcaps |= IEEE80211_HTCAP_SHORTGI20;
832 }
833
834 if (wmodes & HAL_MODE_HT40)
835 ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40
836 | IEEE80211_HTCAP_SHORTGI40;
837
838 /*
839 * TX/RX streams need to be taken into account when
840 * negotiating which MCS rates it'll receive and
841 * what MCS rates are available for TX.
842 */
843 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 0, &txs);
844 (void) ath_hal_getcapability(ah, HAL_CAP_STREAMS, 1, &rxs);
845 ic->ic_txstream = txs;
846 ic->ic_rxstream = rxs;
847
848 /*
849 * Setup TX and RX STBC based on what the HAL allows and
850 * the currently configured chainmask set.
851 * Ie - don't enable STBC TX if only one chain is enabled.
852 * STBC RX is fine on a single RX chain; it just won't
853 * provide any real benefit.
854 */
855 if (ath_hal_getcapability(ah, HAL_CAP_RX_STBC, 0,
856 NULL) == HAL_OK) {
857 sc->sc_rx_stbc = 1;
858 device_printf(sc->sc_dev,
859 "[HT] 1 stream STBC receive enabled\n");
860 ic->ic_htcaps |= IEEE80211_HTCAP_RXSTBC_1STREAM;
861 }
862 if (txs > 1 && ath_hal_getcapability(ah, HAL_CAP_TX_STBC, 0,
863 NULL) == HAL_OK) {
864 sc->sc_tx_stbc = 1;
865 device_printf(sc->sc_dev,
866 "[HT] 1 stream STBC transmit enabled\n");
867 ic->ic_htcaps |= IEEE80211_HTCAP_TXSTBC;
868 }
869
870 (void) ath_hal_getcapability(ah, HAL_CAP_RTS_AGGR_LIMIT, 1,
871 &sc->sc_rts_aggr_limit);
872 if (sc->sc_rts_aggr_limit != (64 * 1024))
873 device_printf(sc->sc_dev,
874 "[HT] RTS aggregates limited to %d KiB\n",
875 sc->sc_rts_aggr_limit / 1024);
876
877 device_printf(sc->sc_dev,
878 "[HT] %d RX streams; %d TX streams\n", rxs, txs);
879 }
880#endif
881
882 /*
883 * Initial aggregation settings.
884 */
885 sc->sc_hwq_limit_aggr = ATH_AGGR_MIN_QDEPTH;
886 sc->sc_hwq_limit_nonaggr = ATH_NONAGGR_MIN_QDEPTH;
887 sc->sc_tid_hwq_lo = ATH_AGGR_SCHED_LOW;
888 sc->sc_tid_hwq_hi = ATH_AGGR_SCHED_HIGH;
889 sc->sc_aggr_limit = ATH_AGGR_MAXSIZE;
890 sc->sc_delim_min_pad = 0;
891
892 /*
893 * Check if the hardware requires PCI register serialisation.
894 * Some of the Owl based MACs require this.
895 */
3133c5e3 896 if (ncpus > 1 &&
572ff6f6
MD
897 ath_hal_getcapability(ah, HAL_CAP_SERIALISE_WAR,
898 0, NULL) == HAL_OK) {
899 sc->sc_ah->ah_config.ah_serialise_reg_war = 1;
900 device_printf(sc->sc_dev,
901 "Enabling register serialisation\n");
902 }
903
904 /*
905 * Initialise the deferred completed RX buffer list.
906 */
907 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]);
908 TAILQ_INIT(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]);
909
193b341d
SZ
910 /*
911 * Indicate we need the 802.11 header padded to a
912 * 32-bit boundary for 4-address and QoS frames.
913 */
914 ic->ic_flags |= IEEE80211_F_DATAPAD;
915
916 /*
917 * Query the hal about antenna support.
918 */
919 sc->sc_defant = ath_hal_getdefantenna(ah);
920
921 /*
922 * Not all chips have the VEOL support we want to
923 * use with IBSS beacons; check here for it.
924 */
925 sc->sc_hasveol = ath_hal_hasveol(ah);
926
927 /* get mac address from hardware */
86877dfb
RP
928 ath_hal_getmac(ah, macaddr);
929 if (sc->sc_hasbmask)
930 ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
193b341d 931
86877dfb
RP
932 /* NB: used to size node table key mapping array */
933 ic->ic_max_keyix = sc->sc_keymax;
193b341d 934 /* call MI attach routine. */
86877dfb
RP
935 ieee80211_ifattach(ic, macaddr);
936 ic->ic_setregdomain = ath_setregdomain;
937 ic->ic_getradiocaps = ath_getradiocaps;
938 sc->sc_opmode = HAL_M_STA;
939
193b341d 940 /* override default methods */
86877dfb
RP
941 ic->ic_newassoc = ath_newassoc;
942 ic->ic_updateslot = ath_updateslot;
943 ic->ic_wme.wme_update = ath_wme_update;
944 ic->ic_vap_create = ath_vap_create;
945 ic->ic_vap_delete = ath_vap_delete;
946 ic->ic_raw_xmit = ath_raw_xmit;
947 ic->ic_update_mcast = ath_update_mcast;
948 ic->ic_update_promisc = ath_update_promisc;
193b341d
SZ
949 ic->ic_node_alloc = ath_node_alloc;
950 sc->sc_node_free = ic->ic_node_free;
951 ic->ic_node_free = ath_node_free;
572ff6f6
MD
952 sc->sc_node_cleanup = ic->ic_node_cleanup;
953 ic->ic_node_cleanup = ath_node_cleanup;
86877dfb
RP
954 ic->ic_node_getsignal = ath_node_getsignal;
955 ic->ic_scan_start = ath_scan_start;
956 ic->ic_scan_end = ath_scan_end;
957 ic->ic_set_channel = ath_set_channel;
572ff6f6
MD
958#ifdef ATH_ENABLE_11N
959 /* 802.11n specific - but just override anyway */
960 sc->sc_addba_request = ic->ic_addba_request;
961 sc->sc_addba_response = ic->ic_addba_response;
962 sc->sc_addba_stop = ic->ic_addba_stop;
963 sc->sc_bar_response = ic->ic_bar_response;
964 sc->sc_addba_response_timeout = ic->ic_addba_response_timeout;
965
966 ic->ic_addba_request = ath_addba_request;
967 ic->ic_addba_response = ath_addba_response;
968 ic->ic_addba_response_timeout = ath_addba_response_timeout;
969 ic->ic_addba_stop = ath_addba_stop;
970 ic->ic_bar_response = ath_bar_response;
971
972 ic->ic_update_chw = ath_update_chw;
973#endif /* ATH_ENABLE_11N */
974
975#ifdef ATH_ENABLE_RADIOTAP_VENDOR_EXT
976 /*
977 * There's one vendor bitmap entry in the RX radiotap
978 * header; make sure that's taken into account.
979 */
980 ieee80211_radiotap_attachv(ic,
981 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), 0,
982 ATH_TX_RADIOTAP_PRESENT,
983 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), 1,
984 ATH_RX_RADIOTAP_PRESENT);
985#else
986 /*
987 * No vendor bitmap/extensions are present.
988 */
86877dfb
RP
989 ieee80211_radiotap_attach(ic,
990 &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
991 ATH_TX_RADIOTAP_PRESENT,
992 &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
993 ATH_RX_RADIOTAP_PRESENT);
572ff6f6
MD
994#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
995
996 /*
997 * Setup the ALQ logging if required
998 */
999#ifdef ATH_DEBUG_ALQ
1000 if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
1001 if_ath_alq_setcfg(&sc->sc_alq,
1002 sc->sc_ah->ah_macVersion,
1003 sc->sc_ah->ah_macRev,
1004 sc->sc_ah->ah_phyRev,
1005 sc->sc_ah->ah_magic);
1006#endif
86877dfb 1007
193b341d
SZ
1008 /*
1009 * Setup dynamic sysctl's now that country code and
1010 * regdomain are available from the hal.
1011 */
1012 ath_sysctlattach(sc);
5d3c818f 1013 ath_sysctl_stats_attach(sc);
572ff6f6 1014 ath_sysctl_hal_attach(sc);
193b341d 1015
193b341d
SZ
1016 if (bootverbose)
1017 ieee80211_announce(ic);
1018 ath_announce(sc);
193b341d 1019 return 0;
86877dfb
RP
1020bad2:
1021 ath_tx_cleanup(sc);
1022 ath_desc_free(sc);
572ff6f6
MD
1023 ath_txdma_teardown(sc);
1024 ath_rxdma_teardown(sc);
86877dfb
RP
1025bad:
1026 if (ah)
1027 ath_hal_detach(ah);
572ff6f6
MD
1028
1029 /*
1030 * To work around scoping issues with CURVNET_SET/CURVNET_RESTORE..
1031 */
3133c5e3 1032#if !defined(__DragonFly__)
572ff6f6
MD
1033 if (ifp != NULL && ifp->if_vnet) {
1034 CURVNET_SET(ifp->if_vnet);
1035 if_free(ifp);
1036 CURVNET_RESTORE();
3133c5e3
MD
1037 } else
1038#endif
1039 if (ifp != NULL)
86877dfb
RP
1040 if_free(ifp);
1041 sc->sc_invalid = 1;
193b341d
SZ
1042 return error;
1043}
1044
1045int
1046ath_detach(struct ath_softc *sc)
1047{
86877dfb 1048 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1049
1050 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1051 __func__, ifp->if_flags);
1052
572ff6f6 1053 /*
193b341d 1054 * NB: the order of these is important:
86877dfb 1055 * o stop the chip so no more interrupts will fire
193b341d
SZ
1056 * o call the 802.11 layer before detaching the hal to
1057 * insure callbacks into the driver to delete global
1058 * key cache entries can be handled
86877dfb 1059 * o free the taskqueue which drains any pending tasks
193b341d
SZ
1060 * o reclaim the tx queue data structures after calling
1061 * the 802.11 layer as we'll get called back to reclaim
1062 * node state and potentially want to use them
1063 * o to cleanup the tx queues the hal is called, so detach
1064 * it last
1065 * Other than that, it's straightforward...
1066 */
86877dfb
RP
1067 ath_stop(ifp);
1068 ieee80211_ifdetach(ifp->if_l2com);
1069 taskqueue_free(sc->sc_tq);
1070#ifdef ATH_TX99_DIAG
1071 if (sc->sc_tx99 != NULL)
1072 sc->sc_tx99->detach(sc->sc_tx99);
1073#endif
1074 ath_rate_detach(sc->sc_rc);
572ff6f6
MD
1075#ifdef ATH_DEBUG_ALQ
1076 if_ath_alq_tidyup(&sc->sc_alq);
1077#endif
1078 ath_lna_div_detach(sc);
1079 ath_btcoex_detach(sc);
1080 ath_spectral_detach(sc);
1081 ath_dfs_detach(sc);
86877dfb 1082 ath_desc_free(sc);
572ff6f6
MD
1083 ath_txdma_teardown(sc);
1084 ath_rxdma_teardown(sc);
86877dfb
RP
1085 ath_tx_cleanup(sc);
1086 ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
572ff6f6
MD
1087
1088 CURVNET_SET(ifp->if_vnet);
86877dfb 1089 if_free(ifp);
572ff6f6 1090 CURVNET_RESTORE();
193b341d 1091
3133c5e3
MD
1092 if (sc->sc_sysctl_tree) {
1093 sysctl_ctx_free(&sc->sc_sysctl_ctx);
1094 sc->sc_sysctl_tree = NULL;
1095 }
1096
86877dfb
RP
1097 return 0;
1098}
193b341d 1099
86877dfb
RP
1100/*
1101 * MAC address handling for multiple BSS on the same radio.
1102 * The first vap uses the MAC address from the EEPROM. For
1103 * subsequent vap's we set the U/L bit (bit 1) in the MAC
1104 * address and use the next six bits as an index.
1105 */
1106static void
1107assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
1108{
1109 int i;
193b341d 1110
86877dfb
RP
1111 if (clone && sc->sc_hasbmask) {
1112 /* NB: we only do this if h/w supports multiple bssid */
1113 for (i = 0; i < 8; i++)
1114 if ((sc->sc_bssidmask & (1<<i)) == 0)
1115 break;
1116 if (i != 0)
1117 mac[0] |= (i << 2)|0x2;
1118 } else
1119 i = 0;
1120 sc->sc_bssidmask |= 1<<i;
1121 sc->sc_hwbssidmask[0] &= ~mac[0];
1122 if (i == 0)
1123 sc->sc_nbssid0++;
1124}
1125
1126static void
1127reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
1128{
1129 int i = mac[0] >> 2;
1130 uint8_t mask;
1131
1132 if (i != 0 || --sc->sc_nbssid0 == 0) {
1133 sc->sc_bssidmask &= ~(1<<i);
1134 /* recalculate bssid mask from remaining addresses */
1135 mask = 0xff;
1136 for (i = 1; i < 8; i++)
1137 if (sc->sc_bssidmask & (1<<i))
1138 mask &= ~((i<<2)|0x2);
1139 sc->sc_hwbssidmask[0] |= mask;
1140 }
1141}
193b341d 1142
86877dfb
RP
1143/*
1144 * Assign a beacon xmit slot. We try to space out
1145 * assignments so when beacons are staggered the
1146 * traffic coming out of the cab q has maximal time
1147 * to go out before the next beacon is scheduled.
1148 */
1149static int
1150assign_bslot(struct ath_softc *sc)
1151{
1152 u_int slot, free;
1153
1154 free = 0;
1155 for (slot = 0; slot < ATH_BCBUF; slot++)
1156 if (sc->sc_bslot[slot] == NULL) {
1157 if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
1158 sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
1159 return slot;
1160 free = slot;
1161 /* NB: keep looking for a double slot */
1162 }
1163 return free;
1164}
193b341d 1165
86877dfb 1166static struct ieee80211vap *
572ff6f6
MD
1167ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
1168 enum ieee80211_opmode opmode, int flags,
1169 const uint8_t bssid[IEEE80211_ADDR_LEN],
1170 const uint8_t mac0[IEEE80211_ADDR_LEN])
86877dfb
RP
1171{
1172 struct ath_softc *sc = ic->ic_ifp->if_softc;
1173 struct ath_vap *avp;
1174 struct ieee80211vap *vap;
1175 uint8_t mac[IEEE80211_ADDR_LEN];
572ff6f6
MD
1176 int needbeacon, error;
1177 enum ieee80211_opmode ic_opmode;
86877dfb 1178
3133c5e3 1179 avp = (struct ath_vap *) kmalloc(sizeof(struct ath_vap),
86877dfb
RP
1180 M_80211_VAP, M_WAITOK | M_ZERO);
1181 needbeacon = 0;
1182 IEEE80211_ADDR_COPY(mac, mac0);
1183
572ff6f6 1184 ATH_LOCK(sc);
86877dfb
RP
1185 ic_opmode = opmode; /* default to opmode of new vap */
1186 switch (opmode) {
1187 case IEEE80211_M_STA:
1188 if (sc->sc_nstavaps != 0) { /* XXX only 1 for now */
1189 device_printf(sc->sc_dev, "only 1 sta vap supported\n");
1190 goto bad;
1191 }
1192 if (sc->sc_nvaps) {
1193 /*
1194 * With multiple vaps we must fall back
1195 * to s/w beacon miss handling.
1196 */
1197 flags |= IEEE80211_CLONE_NOBEACONS;
1198 }
1199 if (flags & IEEE80211_CLONE_NOBEACONS) {
1200 /*
1201 * Station mode w/o beacons are implemented w/ AP mode.
1202 */
1203 ic_opmode = IEEE80211_M_HOSTAP;
1204 }
1205 break;
1206 case IEEE80211_M_IBSS:
1207 if (sc->sc_nvaps != 0) { /* XXX only 1 for now */
1208 device_printf(sc->sc_dev,
1209 "only 1 ibss vap supported\n");
1210 goto bad;
1211 }
1212 needbeacon = 1;
1213 break;
1214 case IEEE80211_M_AHDEMO:
1215#ifdef IEEE80211_SUPPORT_TDMA
1216 if (flags & IEEE80211_CLONE_TDMA) {
1217 if (sc->sc_nvaps != 0) {
1218 device_printf(sc->sc_dev,
1219 "only 1 tdma vap supported\n");
1220 goto bad;
1221 }
1222 needbeacon = 1;
1223 flags |= IEEE80211_CLONE_NOBEACONS;
1224 }
1225 /* fall thru... */
1226#endif
1227 case IEEE80211_M_MONITOR:
1228 if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
1229 /*
1230 * Adopt existing mode. Adding a monitor or ahdemo
1231 * vap to an existing configuration is of dubious
1232 * value but should be ok.
1233 */
1234 /* XXX not right for monitor mode */
1235 ic_opmode = ic->ic_opmode;
1236 }
1237 break;
1238 case IEEE80211_M_HOSTAP:
1239 case IEEE80211_M_MBSS:
1240 needbeacon = 1;
1241 break;
1242 case IEEE80211_M_WDS:
1243 if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
1244 device_printf(sc->sc_dev,
1245 "wds not supported in sta mode\n");
1246 goto bad;
1247 }
1248 /*
1249 * Silently remove any request for a unique
1250 * bssid; WDS vap's always share the local
1251 * mac address.
1252 */
1253 flags &= ~IEEE80211_CLONE_BSSID;
1254 if (sc->sc_nvaps == 0)
1255 ic_opmode = IEEE80211_M_HOSTAP;
1256 else
1257 ic_opmode = ic->ic_opmode;
1258 break;
1259 default:
1260 device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
1261 goto bad;
1262 }
1263 /*
1264 * Check that a beacon buffer is available; the code below assumes it.
1265 */
572ff6f6 1266 if (needbeacon & TAILQ_EMPTY(&sc->sc_bbuf)) {
86877dfb
RP
1267 device_printf(sc->sc_dev, "no beacon buffer available\n");
1268 goto bad;
193b341d
SZ
1269 }
1270
86877dfb
RP
1271 /* STA, AHDEMO? */
1272 if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) {
1273 assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
1274 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1275 }
1276
1277 vap = &avp->av_vap;
1278 /* XXX can't hold mutex across if_alloc */
572ff6f6 1279 ATH_UNLOCK(sc);
86877dfb
RP
1280 error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
1281 bssid, mac);
572ff6f6 1282 ATH_LOCK(sc);
86877dfb
RP
1283 if (error != 0) {
1284 device_printf(sc->sc_dev, "%s: error %d creating vap\n",
1285 __func__, error);
1286 goto bad2;
1287 }
1288
1289 /* h/w crypto support */
1290 vap->iv_key_alloc = ath_key_alloc;
1291 vap->iv_key_delete = ath_key_delete;
1292 vap->iv_key_set = ath_key_set;
1293 vap->iv_key_update_begin = ath_key_update_begin;
1294 vap->iv_key_update_end = ath_key_update_end;
1295
1296 /* override various methods */
1297 avp->av_recv_mgmt = vap->iv_recv_mgmt;
1298 vap->iv_recv_mgmt = ath_recv_mgmt;
1299 vap->iv_reset = ath_reset_vap;
1300 vap->iv_update_beacon = ath_beacon_update;
1301 avp->av_newstate = vap->iv_newstate;
1302 vap->iv_newstate = ath_newstate;
1303 avp->av_bmiss = vap->iv_bmiss;
1304 vap->iv_bmiss = ath_bmiss_vap;
1305
3133c5e3 1306#if 0
572ff6f6
MD
1307 avp->av_node_ps = vap->iv_node_ps;
1308 vap->iv_node_ps = ath_node_powersave;
3133c5e3 1309#endif
572ff6f6
MD
1310
1311 avp->av_set_tim = vap->iv_set_tim;
1312 vap->iv_set_tim = ath_node_set_tim;
1313
3133c5e3 1314#if 0
572ff6f6
MD
1315 avp->av_recv_pspoll = vap->iv_recv_pspoll;
1316 vap->iv_recv_pspoll = ath_node_recv_pspoll;
3133c5e3 1317#endif
572ff6f6
MD
1318
1319 /* Set default parameters */
1320
1321 /*
1322 * Anything earlier than some AR9300 series MACs don't
1323 * support a smaller MPDU density.
1324 */
1325 vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_8;
1326 /*
1327 * All NICs can handle the maximum size, however
1328 * AR5416 based MACs can only TX aggregates w/ RTS
1329 * protection when the total aggregate size is <= 8k.
1330 * However, for now that's enforced by the TX path.
1331 */
1332 vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
1333
86877dfb
RP
1334 avp->av_bslot = -1;
1335 if (needbeacon) {
1336 /*
1337 * Allocate beacon state and setup the q for buffered
1338 * multicast frames. We know a beacon buffer is
1339 * available because we checked above.
1340 */
572ff6f6
MD
1341 avp->av_bcbuf = TAILQ_FIRST(&sc->sc_bbuf);
1342 TAILQ_REMOVE(&sc->sc_bbuf, avp->av_bcbuf, bf_list);
86877dfb
RP
1343 if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
1344 /*
1345 * Assign the vap to a beacon xmit slot. As above
1346 * this cannot fail to find a free one.
1347 */
1348 avp->av_bslot = assign_bslot(sc);
1349 KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1350 ("beacon slot %u not empty", avp->av_bslot));
1351 sc->sc_bslot[avp->av_bslot] = vap;
1352 sc->sc_nbcnvaps++;
1353 }
1354 if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1355 /*
1356 * Multple vaps are to transmit beacons and we
1357 * have h/w support for TSF adjusting; enable
1358 * use of staggered beacons.
1359 */
1360 sc->sc_stagbeacons = 1;
1361 }
1362 ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1363 }
193b341d 1364
86877dfb
RP
1365 ic->ic_opmode = ic_opmode;
1366 if (opmode != IEEE80211_M_WDS) {
1367 sc->sc_nvaps++;
1368 if (opmode == IEEE80211_M_STA)
1369 sc->sc_nstavaps++;
1370 if (opmode == IEEE80211_M_MBSS)
1371 sc->sc_nmeshvaps++;
1372 }
1373 switch (ic_opmode) {
1374 case IEEE80211_M_IBSS:
1375 sc->sc_opmode = HAL_M_IBSS;
1376 break;
1377 case IEEE80211_M_STA:
1378 sc->sc_opmode = HAL_M_STA;
1379 break;
1380 case IEEE80211_M_AHDEMO:
1381#ifdef IEEE80211_SUPPORT_TDMA
1382 if (vap->iv_caps & IEEE80211_C_TDMA) {
1383 sc->sc_tdma = 1;
1384 /* NB: disable tsf adjust */
1385 sc->sc_stagbeacons = 0;
1386 }
1387 /*
1388 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1389 * just ap mode.
1390 */
1391 /* fall thru... */
1392#endif
1393 case IEEE80211_M_HOSTAP:
1394 case IEEE80211_M_MBSS:
1395 sc->sc_opmode = HAL_M_HOSTAP;
1396 break;
1397 case IEEE80211_M_MONITOR:
1398 sc->sc_opmode = HAL_M_MONITOR;
1399 break;
1400 default:
1401 /* XXX should not happen */
1402 break;
1403 }
1404 if (sc->sc_hastsfadd) {
1405 /*
1406 * Configure whether or not TSF adjust should be done.
1407 */
1408 ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1409 }
1410 if (flags & IEEE80211_CLONE_NOBEACONS) {
1411 /*
1412 * Enable s/w beacon miss handling.
1413 */
1414 sc->sc_swbmiss = 1;
1415 }
572ff6f6 1416 ATH_UNLOCK(sc);
193b341d 1417
86877dfb
RP
1418 /* complete setup */
1419 ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1420 return vap;
1421bad2:
1422 reclaim_address(sc, mac);
1423 ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1424bad:
3133c5e3 1425 kfree(avp, M_80211_VAP);
572ff6f6 1426 ATH_UNLOCK(sc);
86877dfb
RP
1427 return NULL;
1428}
193b341d 1429
86877dfb
RP
1430static void
1431ath_vap_delete(struct ieee80211vap *vap)
1432{
1433 struct ieee80211com *ic = vap->iv_ic;
1434 struct ifnet *ifp = ic->ic_ifp;
1435 struct ath_softc *sc = ifp->if_softc;
1436 struct ath_hal *ah = sc->sc_ah;
1437 struct ath_vap *avp = ATH_VAP(vap);
193b341d 1438
572ff6f6 1439 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
3133c5e3 1440 if (ifp->if_flags & IFF_RUNNING) {
86877dfb
RP
1441 /*
1442 * Quiesce the hardware while we remove the vap. In
1443 * particular we need to reclaim all references to
1444 * the vap state by any frames pending on the tx queues.
1445 */
1446 ath_hal_intrset(ah, 0); /* disable interrupts */
572ff6f6
MD
1447 ath_draintxq(sc, ATH_RESET_DEFAULT); /* stop hw xmit side */
1448 /* XXX Do all frames from all vaps/nodes need draining here? */
1449 ath_stoprecv(sc, 1); /* stop recv side */
193b341d
SZ
1450 }
1451
86877dfb 1452 ieee80211_vap_detach(vap);
572ff6f6
MD
1453
1454 /*
1455 * XXX Danger Will Robinson! Danger!
1456 *
1457 * Because ieee80211_vap_detach() can queue a frame (the station
1458 * diassociate message?) after we've drained the TXQ and
1459 * flushed the software TXQ, we will end up with a frame queued
1460 * to a node whose vap is about to be freed.
1461 *
1462 * To work around this, flush the hardware/software again.
1463 * This may be racy - the ath task may be running and the packet
1464 * may be being scheduled between sw->hw txq. Tsk.
1465 *
1466 * TODO: figure out why a new node gets allocated somewhere around
1467 * here (after the ath_tx_swq() call; and after an ath_stop_locked()
1468 * call!)
1469 */
1470
1471 ath_draintxq(sc, ATH_RESET_DEFAULT);
1472
1473 ATH_LOCK(sc);
86877dfb
RP
1474 /*
1475 * Reclaim beacon state. Note this must be done before
1476 * the vap instance is reclaimed as we may have a reference
1477 * to it in the buffer for the beacon frame.
1478 */
1479 if (avp->av_bcbuf != NULL) {
1480 if (avp->av_bslot != -1) {
1481 sc->sc_bslot[avp->av_bslot] = NULL;
1482 sc->sc_nbcnvaps--;
1483 }
1484 ath_beacon_return(sc, avp->av_bcbuf);
1485 avp->av_bcbuf = NULL;
1486 if (sc->sc_nbcnvaps == 0) {
1487 sc->sc_stagbeacons = 0;
1488 if (sc->sc_hastsfadd)
1489 ath_hal_settsfadjust(sc->sc_ah, 0);
1490 }
1491 /*
1492 * Reclaim any pending mcast frames for the vap.
1493 */
1494 ath_tx_draintxq(sc, &avp->av_mcastq);
86877dfb
RP
1495 }
1496 /*
1497 * Update bookkeeping.
1498 */
1499 if (vap->iv_opmode == IEEE80211_M_STA) {
1500 sc->sc_nstavaps--;
1501 if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1502 sc->sc_swbmiss = 0;
1503 } else if (vap->iv_opmode == IEEE80211_M_HOSTAP ||
1504 vap->iv_opmode == IEEE80211_M_MBSS) {
1505 reclaim_address(sc, vap->iv_myaddr);
1506 ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1507 if (vap->iv_opmode == IEEE80211_M_MBSS)
1508 sc->sc_nmeshvaps--;
1509 }
1510 if (vap->iv_opmode != IEEE80211_M_WDS)
1511 sc->sc_nvaps--;
1512#ifdef IEEE80211_SUPPORT_TDMA
1513 /* TDMA operation ceases when the last vap is destroyed */
1514 if (sc->sc_tdma && sc->sc_nvaps == 0) {
1515 sc->sc_tdma = 0;
1516 sc->sc_swbmiss = 0;
1517 }
1518#endif
3133c5e3 1519 kfree(avp, M_80211_VAP);
193b341d 1520
3133c5e3 1521 if (ifp->if_flags & IFF_RUNNING) {
86877dfb
RP
1522 /*
1523 * Restart rx+tx machines if still running (RUNNING will
1524 * be reset if we just destroyed the last vap).
1525 */
1526 if (ath_startrecv(sc) != 0)
1527 if_printf(ifp, "%s: unable to restart recv logic\n",
1528 __func__);
1529 if (sc->sc_beacons) { /* restart beacons */
1530#ifdef IEEE80211_SUPPORT_TDMA
1531 if (sc->sc_tdma)
1532 ath_tdma_config(sc, NULL);
1533 else
1534#endif
1535 ath_beacon_config(sc, NULL);
1536 }
1537 ath_hal_intrset(ah, sc->sc_imask);
1538 }
572ff6f6 1539 ATH_UNLOCK(sc);
193b341d
SZ
1540}
1541
1542void
1543ath_suspend(struct ath_softc *sc)
1544{
86877dfb
RP
1545 struct ifnet *ifp = sc->sc_ifp;
1546 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
1547
1548 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1549 __func__, ifp->if_flags);
193b341d 1550
86877dfb 1551 sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
572ff6f6
MD
1552
1553 ieee80211_suspend_all(ic);
86877dfb
RP
1554 /*
1555 * NB: don't worry about putting the chip in low power
1556 * mode; pci will power off our socket on suspend and
1557 * CardBus detaches the device.
1558 */
572ff6f6
MD
1559
1560 /*
1561 * XXX ensure none of the taskqueues are running
1562 * XXX ensure sc_invalid is 1
1563 * XXX ensure the calibration callout is disabled
1564 */
1565
1566 /* Disable the PCIe PHY, complete with workarounds */
1567 ath_hal_enablepcie(sc->sc_ah, 1, 1);
86877dfb
RP
1568}
1569
1570/*
1571 * Reset the key cache since some parts do not reset the
1572 * contents on resume. First we clear all entries, then
1573 * re-load keys that the 802.11 layer assumes are setup
1574 * in h/w.
1575 */
1576static void
1577ath_reset_keycache(struct ath_softc *sc)
1578{
1579 struct ifnet *ifp = sc->sc_ifp;
1580 struct ieee80211com *ic = ifp->if_l2com;
1581 struct ath_hal *ah = sc->sc_ah;
1582 int i;
1583
1584 for (i = 0; i < sc->sc_keymax; i++)
1585 ath_hal_keyreset(ah, i);
1586 ieee80211_crypto_reload_keys(ic);
193b341d
SZ
1587}
1588
572ff6f6
MD
1589/*
1590 * Fetch the current chainmask configuration based on the current
1591 * operating channel and options.
1592 */
1593static void
1594ath_update_chainmasks(struct ath_softc *sc, struct ieee80211_channel *chan)
1595{
1596
1597 /*
1598 * Set TX chainmask to the currently configured chainmask;
1599 * the TX chainmask depends upon the current operating mode.
1600 */
1601 sc->sc_cur_rxchainmask = sc->sc_rxchainmask;
1602 if (IEEE80211_IS_CHAN_HT(chan)) {
1603 sc->sc_cur_txchainmask = sc->sc_txchainmask;
1604 } else {
1605 sc->sc_cur_txchainmask = 1;
1606 }
1607
1608 DPRINTF(sc, ATH_DEBUG_RESET,
1609 "%s: TX chainmask is now 0x%x, RX is now 0x%x\n",
1610 __func__,
1611 sc->sc_cur_txchainmask,
1612 sc->sc_cur_rxchainmask);
1613}
1614
193b341d
SZ
1615void
1616ath_resume(struct ath_softc *sc)
1617{
86877dfb
RP
1618 struct ifnet *ifp = sc->sc_ifp;
1619 struct ieee80211com *ic = ifp->if_l2com;
1620 struct ath_hal *ah = sc->sc_ah;
1621 HAL_STATUS status;
193b341d
SZ
1622
1623 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1624 __func__, ifp->if_flags);
1625
572ff6f6
MD
1626 /* Re-enable PCIe, re-enable the PCIe bus */
1627 ath_hal_enablepcie(ah, 0, 0);
1628
86877dfb
RP
1629 /*
1630 * Must reset the chip before we reload the
1631 * keycache as we were powered down on suspend.
1632 */
572ff6f6
MD
1633 ath_update_chainmasks(sc,
1634 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan);
1635 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
1636 sc->sc_cur_rxchainmask);
86877dfb
RP
1637 ath_hal_reset(ah, sc->sc_opmode,
1638 sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1639 AH_FALSE, &status);
1640 ath_reset_keycache(sc);
572ff6f6
MD
1641
1642 /* Let DFS at it in case it's a DFS channel */
1643 ath_dfs_radar_enable(sc, ic->ic_curchan);
1644
1645 /* Let spectral at in case spectral is enabled */
1646 ath_spectral_enable(sc, ic->ic_curchan);
1647
1648 /*
1649 * Let bluetooth coexistence at in case it's needed for this channel
1650 */
1651 ath_btcoex_enable(sc, ic->ic_curchan);
1652
1653 /*
1654 * If we're doing TDMA, enforce the TXOP limitation for chips that
1655 * support it.
1656 */
1657 if (sc->sc_hasenforcetxop && sc->sc_tdma)
1658 ath_hal_setenforcetxop(sc->sc_ah, 1);
1659 else
1660 ath_hal_setenforcetxop(sc->sc_ah, 0);
1661
1662 /* Restore the LED configuration */
1663 ath_led_config(sc);
1664 ath_hal_setledstate(ah, HAL_LED_INIT);
1665
1666 if (sc->sc_resume_up)
1667 ieee80211_resume_all(ic);
1668
1669 /* XXX beacons ? */
193b341d
SZ
1670}
1671
1672void
1673ath_shutdown(struct ath_softc *sc)
1674{
86877dfb 1675 struct ifnet *ifp = sc->sc_ifp;
193b341d
SZ
1676
1677 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1678 __func__, ifp->if_flags);
193b341d 1679
86877dfb
RP
1680 ath_stop(ifp);
1681 /* NB: no point powering down chip as we're about to reboot */
193b341d
SZ
1682}
1683
1684/*
1685 * Interrupt handler. Most of the actual processing is deferred.
1686 */
1687void
1688ath_intr(void *arg)
1689{
1690 struct ath_softc *sc = arg;
86877dfb 1691 struct ifnet *ifp = sc->sc_ifp;
193b341d 1692 struct ath_hal *ah = sc->sc_ah;
572ff6f6
MD
1693 HAL_INT status = 0;
1694 uint32_t txqs;
193b341d 1695
572ff6f6
MD
1696 /*
1697 * If we're inside a reset path, just print a warning and
1698 * clear the ISR. The reset routine will finish it for us.
1699 */
1700 ATH_PCU_LOCK(sc);
1701 if (sc->sc_inreset_cnt) {
1702 HAL_INT status;
1703 ath_hal_getisr(ah, &status); /* clear ISR */
1704 ath_hal_intrset(ah, 0); /* disable further intr's */
1705 DPRINTF(sc, ATH_DEBUG_ANY,
1706 "%s: in reset, ignoring: status=0x%x\n",
1707 __func__, status);
1708 ATH_PCU_UNLOCK(sc);
193b341d
SZ
1709 return;
1710 }
0c208ba4 1711
572ff6f6
MD
1712 if (sc->sc_invalid) {
1713 /*
1714 * The hardware is not ready/present, don't touch anything.
1715 * Note this can happen early on if the IRQ is shared.
1716 */
1717 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1718 ATH_PCU_UNLOCK(sc);
1719 return;
1720 }
1721 if (!ath_hal_intrpend(ah)) { /* shared irq, not for us */
1722 ATH_PCU_UNLOCK(sc);
193b341d 1723 return;
572ff6f6
MD
1724 }
1725
86877dfb 1726 if ((ifp->if_flags & IFF_UP) == 0 ||
3133c5e3 1727 (ifp->if_flags & IFF_RUNNING) == 0) {
86877dfb
RP
1728 HAL_INT status;
1729
193b341d
SZ
1730 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1731 __func__, ifp->if_flags);
1732 ath_hal_getisr(ah, &status); /* clear ISR */
1733 ath_hal_intrset(ah, 0); /* disable further intr's */
572ff6f6 1734 ATH_PCU_UNLOCK(sc);
193b341d
SZ
1735 return;
1736 }
572ff6f6 1737
193b341d
SZ
1738 /*
1739 * Figure out the reason(s) for the interrupt. Note
1740 * that the hal returns a pseudo-ISR that may include
1741 * bits we haven't explicitly enabled so we mask the
1742 * value to insure we only process bits we requested.
1743 */
572ff6f6
MD
1744 ath_hal_getisr(ah, &status); /* NB: clears ISR too */
1745 DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1746 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath_intr: mask=0x%.8x", status);
1747#ifdef ATH_DEBUG_ALQ
1748 if_ath_alq_post_intr(&sc->sc_alq, status, ah->ah_intrstate,
1749 ah->ah_syncstate);
1750#endif /* ATH_DEBUG_ALQ */
1751#ifdef ATH_KTR_INTR_DEBUG
1752 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 5,
1753 "ath_intr: ISR=0x%.8x, ISR_S0=0x%.8x, ISR_S1=0x%.8x, ISR_S2=0x%.8x, ISR_S5=0x%.8x",
1754 ah->ah_intrstate[0],
1755 ah->ah_intrstate[1],
1756 ah->ah_intrstate[2],
1757 ah->ah_intrstate[3],
1758 ah->ah_intrstate[6]);
1759#endif
1760
1761 /* Squirrel away SYNC interrupt debugging */
1762 if (ah->ah_syncstate != 0) {
1763 int i;
1764 for (i = 0; i < 32; i++)
1765 if (ah->ah_syncstate & (i << i))
1766 sc->sc_intr_stats.sync_intr[i]++;
1767 }
1768
1769 status &= sc->sc_imask; /* discard unasked for bits */
1770
1771 /* Short-circuit un-handled interrupts */
1772 if (status == 0x0) {
1773 ATH_PCU_UNLOCK(sc);
1774 return;
1775 }
1776
1777 /*
1778 * Take a note that we're inside the interrupt handler, so
1779 * the reset routines know to wait.
1780 */
1781 sc->sc_intr_cnt++;
1782 ATH_PCU_UNLOCK(sc);
1783
1784 /*
1785 * Handle the interrupt. We won't run concurrent with the reset
1786 * or channel change routines as they'll wait for sc_intr_cnt
1787 * to be 0 before continuing.
1788 */
193b341d 1789 if (status & HAL_INT_FATAL) {
193b341d
SZ
1790 sc->sc_stats.ast_hardware++;
1791 ath_hal_intrset(ah, 0); /* disable intr's until reset */
572ff6f6 1792 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
193b341d
SZ
1793 } else {
1794 if (status & HAL_INT_SWBA) {
1795 /*
1796 * Software beacon alert--time to send a beacon.
1797 * Handle beacon transmission directly; deferring
1798 * this is too slow to meet timing constraints
1799 * under load.
1800 */
86877dfb
RP
1801#ifdef IEEE80211_SUPPORT_TDMA
1802 if (sc->sc_tdma) {
1803 if (sc->sc_tdmaswba == 0) {
1804 struct ieee80211com *ic = ifp->if_l2com;
1805 struct ieee80211vap *vap =
1806 TAILQ_FIRST(&ic->ic_vaps);
1807 ath_tdma_beacon_send(sc, vap);
1808 sc->sc_tdmaswba =
1809 vap->iv_tdma->tdma_bintval;
1810 } else
1811 sc->sc_tdmaswba--;
1812 } else
1813#endif
1814 {
1815 ath_beacon_proc(sc, 0);
1816#ifdef IEEE80211_SUPPORT_SUPERG
1817 /*
1818 * Schedule the rx taskq in case there's no
1819 * traffic so any frames held on the staging
1820 * queue are aged and potentially flushed.
1821 */
572ff6f6 1822 sc->sc_rx.recv_sched(sc, 1);
86877dfb
RP
1823#endif
1824 }
193b341d
SZ
1825 }
1826 if (status & HAL_INT_RXEOL) {
572ff6f6
MD
1827 int imask;
1828 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXEOL");
1829 ATH_PCU_LOCK(sc);
1830 /*
1831 * NB: the hardware should re-read the link when
1832 * RXE bit is written, but it doesn't work at
1833 * least on older hardware revs.
1834 */
193b341d 1835 sc->sc_stats.ast_rxeol++;
572ff6f6
MD
1836 /*
1837 * Disable RXEOL/RXORN - prevent an interrupt
1838 * storm until the PCU logic can be reset.
1839 * In case the interface is reset some other
1840 * way before "sc_kickpcu" is called, don't
1841 * modify sc_imask - that way if it is reset
1842 * by a call to ath_reset() somehow, the
1843 * interrupt mask will be correctly reprogrammed.
1844 */
1845 imask = sc->sc_imask;
1846 imask &= ~(HAL_INT_RXEOL | HAL_INT_RXORN);
1847 ath_hal_intrset(ah, imask);
1848 /*
1849 * Only blank sc_rxlink if we've not yet kicked
1850 * the PCU.
1851 *
1852 * This isn't entirely correct - the correct solution
1853 * would be to have a PCU lock and engage that for
1854 * the duration of the PCU fiddling; which would include
1855 * running the RX process. Otherwise we could end up
1856 * messing up the RX descriptor chain and making the
1857 * RX desc list much shorter.
1858 */
1859 if (! sc->sc_kickpcu)
1860 sc->sc_rxlink = NULL;
1861 sc->sc_kickpcu = 1;
1862 ATH_PCU_UNLOCK(sc);
1863 /*
1864 * Enqueue an RX proc, to handled whatever
1865 * is in the RX queue.
1866 * This will then kick the PCU.
1867 */
1868 sc->sc_rx.recv_sched(sc, 1);
193b341d
SZ
1869 }
1870 if (status & HAL_INT_TXURN) {
1871 sc->sc_stats.ast_txurn++;
1872 /* bump tx trigger level */
1873 ath_hal_updatetxtriglevel(ah, AH_TRUE);
1874 }
572ff6f6
MD
1875 /*
1876 * Handle both the legacy and RX EDMA interrupt bits.
1877 * Note that HAL_INT_RXLP is also HAL_INT_RXDESC.
1878 */
1879 if (status & (HAL_INT_RX | HAL_INT_RXHP | HAL_INT_RXLP)) {
1880 sc->sc_stats.ast_rx_intr++;
1881 sc->sc_rx.recv_sched(sc, 1);
1882 }
1883 if (status & HAL_INT_TX) {
1884 sc->sc_stats.ast_tx_intr++;
1885 /*
1886 * Grab all the currently set bits in the HAL txq bitmap
1887 * and blank them. This is the only place we should be
1888 * doing this.
1889 */
1890 if (! sc->sc_isedma) {
1891 ATH_PCU_LOCK(sc);
1892 txqs = 0xffffffff;
1893 ath_hal_gettxintrtxqs(sc->sc_ah, &txqs);
1894 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 3,
1895 "ath_intr: TX; txqs=0x%08x, txq_active was 0x%08x, now 0x%08x",
1896 txqs,
1897 sc->sc_txq_active,
1898 sc->sc_txq_active | txqs);
1899 sc->sc_txq_active |= txqs;
1900 ATH_PCU_UNLOCK(sc);
1901 }
86877dfb 1902 taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
572ff6f6 1903 }
193b341d
SZ
1904 if (status & HAL_INT_BMISS) {
1905 sc->sc_stats.ast_bmiss++;
86877dfb 1906 taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
193b341d 1907 }
572ff6f6
MD
1908 if (status & HAL_INT_GTT)
1909 sc->sc_stats.ast_tx_timeout++;
1910 if (status & HAL_INT_CST)
1911 sc->sc_stats.ast_tx_cst++;
193b341d
SZ
1912 if (status & HAL_INT_MIB) {
1913 sc->sc_stats.ast_mib++;
572ff6f6 1914 ATH_PCU_LOCK(sc);
193b341d
SZ
1915 /*
1916 * Disable interrupts until we service the MIB
1917 * interrupt; otherwise it will continue to fire.
1918 */
1919 ath_hal_intrset(ah, 0);
1920 /*
1921 * Let the hal handle the event. We assume it will
1922 * clear whatever condition caused the interrupt.
1923 */
1924 ath_hal_mibevent(ah, &sc->sc_halstats);
572ff6f6
MD
1925 /*
1926 * Don't reset the interrupt if we've just
1927 * kicked the PCU, or we may get a nested
1928 * RXEOL before the rxproc has had a chance
1929 * to run.
1930 */
1931 if (sc->sc_kickpcu == 0)
1932 ath_hal_intrset(ah, sc->sc_imask);
1933 ATH_PCU_UNLOCK(sc);
193b341d 1934 }
86877dfb
RP
1935 if (status & HAL_INT_RXORN) {
1936 /* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
572ff6f6 1937 ATH_KTR(sc, ATH_KTR_ERROR, 0, "ath_intr: RXORN");
86877dfb
RP
1938 sc->sc_stats.ast_rxorn++;
1939 }
193b341d 1940 }
572ff6f6
MD
1941 ATH_PCU_LOCK(sc);
1942 sc->sc_intr_cnt--;
1943 ATH_PCU_UNLOCK(sc);
193b341d
SZ
1944}
1945
1946static void
86877dfb 1947ath_fatal_proc(void *arg, int pending)
193b341d 1948{
86877dfb
RP
1949 struct ath_softc *sc = arg;
1950 struct ifnet *ifp = sc->sc_ifp;
1951 u_int32_t *state;
1952 u_int32_t len;
1953 void *sp;
193b341d
SZ
1954
1955 if_printf(ifp, "hardware error; resetting\n");
ed33fa9f
SW
1956 /*
1957 * Fatal errors are unrecoverable. Typically these
1958 * are caused by DMA errors. Collect h/w state from
1959 * the hal so we can diagnose what's going on.
1960 */
3133c5e3 1961 wlan_serialize_enter();
86877dfb
RP
1962 if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1963 KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1964 state = sp;
ed33fa9f
SW
1965 if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1966 state[0], state[1] , state[2], state[3],
1967 state[4], state[5]);
1968 }
572ff6f6 1969 ath_reset(ifp, ATH_RESET_NOLOSS);
3133c5e3 1970 wlan_serialize_exit();
193b341d
SZ
1971}
1972
1973static void
86877dfb 1974ath_bmiss_vap(struct ieee80211vap *vap)
193b341d 1975{
86877dfb
RP
1976 /*
1977 * Workaround phantom bmiss interrupts by sanity-checking
1978 * the time of our last rx'd frame. If it is within the
1979 * beacon miss interval then ignore the interrupt. If it's
1980 * truly a bmiss we'll get another interrupt soon and that'll
1981 * be dispatched up for processing. Note this applies only
1982 * for h/w beacon miss events.
1983 */
1984 if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1985 struct ifnet *ifp = vap->iv_ic->ic_ifp;
1986 struct ath_softc *sc = ifp->if_softc;
1987 u_int64_t lastrx = sc->sc_lastrx;
1988 u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
572ff6f6 1989 /* XXX should take a locked ref to iv_bss */
193b341d 1990 u_int bmisstimeout =
86877dfb 1991 vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
193b341d
SZ
1992
1993 DPRINTF(sc, ATH_DEBUG_BEACON,
1994 "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1995 __func__, (unsigned long long) tsf,
1996 (unsigned long long)(tsf - lastrx),
1997 (unsigned long long) lastrx, bmisstimeout);
86877dfb
RP
1998
1999 if (tsf - lastrx <= bmisstimeout) {
193b341d 2000 sc->sc_stats.ast_bmiss_phantom++;
86877dfb
RP
2001 return;
2002 }
193b341d 2003 }
86877dfb
RP
2004 ATH_VAP(vap)->av_bmiss(vap);
2005}
2006
572ff6f6 2007int
86877dfb
RP
2008ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
2009{
2010 uint32_t rsize;
2011 void *sp;
2012
572ff6f6 2013 if (!ath_hal_getdiagstate(ah, HAL_DIAG_CHECK_HANGS, &mask, sizeof(mask), &sp, &rsize))
86877dfb
RP
2014 return 0;
2015 KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
2016 *hangs = *(uint32_t *)sp;
2017 return 1;
193b341d
SZ
2018}
2019
86877dfb 2020static void
572ff6f6 2021ath_bmiss_proc(void *arg, int pending)
193b341d 2022{
86877dfb
RP
2023 struct ath_softc *sc = arg;
2024 struct ifnet *ifp = sc->sc_ifp;
2025 uint32_t hangs;
193b341d 2026
86877dfb
RP
2027 DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
2028
572ff6f6
MD
2029 /*
2030 * Do a reset upon any becaon miss event.
2031 *
2032 * It may be a non-recognised RX clear hang which needs a reset
2033 * to clear.
2034 */
3133c5e3 2035 wlan_serialize_enter();
86877dfb 2036 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
572ff6f6
MD
2037 ath_reset(ifp, ATH_RESET_NOLOSS);
2038 if_printf(ifp, "bb hang detected (0x%x), resetting\n", hangs);
a311f6cd 2039 } else {
572ff6f6 2040 ath_reset(ifp, ATH_RESET_NOLOSS);
86877dfb 2041 ieee80211_beacon_miss(ifp->if_l2com);
a311f6cd 2042 }
3133c5e3 2043 wlan_serialize_exit();
86877dfb
RP
2044}
2045
2046/*
2047 * Handle TKIP MIC setup to deal hardware that doesn't do MIC
2048 * calcs together with WME. If necessary disable the crypto
2049 * hardware and mark the 802.11 state so keys will be setup
2050 * with the MIC work done in software.
2051 */
2052static void
2053ath_settkipmic(struct ath_softc *sc)
2054{
2055 struct ifnet *ifp = sc->sc_ifp;
2056 struct ieee80211com *ic = ifp->if_l2com;
2057
2058 if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
2059 if (ic->ic_flags & IEEE80211_F_WME) {
2060 ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
2061 ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
2062 } else {
2063 ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
2064 ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
2065 }
2066 }
193b341d
SZ
2067}
2068
193b341d
SZ
2069static void
2070ath_init(void *arg)
2071{
86877dfb
RP
2072 struct ath_softc *sc = (struct ath_softc *) arg;
2073 struct ifnet *ifp = sc->sc_ifp;
2074 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
2075 struct ath_hal *ah = sc->sc_ah;
2076 HAL_STATUS status;
2077
193b341d
SZ
2078 DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
2079 __func__, ifp->if_flags);
2080
572ff6f6 2081 ATH_LOCK(sc);
193b341d
SZ
2082 /*
2083 * Stop anything previously setup. This is safe
2084 * whether this is the first time through or not.
2085 */
86877dfb 2086 ath_stop_locked(ifp);
193b341d
SZ
2087
2088 /*
2089 * The basic interface to setting the hardware in a good
2090 * state is ``reset''. On return the hardware is known to
2091 * be powered up and with interrupts disabled. This must
2092 * be followed by initialization of the appropriate bits
2093 * and then setup of the interrupt mask.
2094 */
86877dfb 2095 ath_settkipmic(sc);
572ff6f6
MD
2096 ath_update_chainmasks(sc, ic->ic_curchan);
2097 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2098 sc->sc_cur_rxchainmask);
86877dfb 2099 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
193b341d
SZ
2100 if_printf(ifp, "unable to reset hardware; hal status %u\n",
2101 status);
572ff6f6 2102 ATH_UNLOCK(sc);
193b341d
SZ
2103 return;
2104 }
86877dfb 2105 ath_chan_change(sc, ic->ic_curchan);
193b341d 2106
572ff6f6
MD
2107 /* Let DFS at it in case it's a DFS channel */
2108 ath_dfs_radar_enable(sc, ic->ic_curchan);
2109
2110 /* Let spectral at in case spectral is enabled */
2111 ath_spectral_enable(sc, ic->ic_curchan);
2112
2113 /*
2114 * Let bluetooth coexistence at in case it's needed for this channel
2115 */
2116 ath_btcoex_enable(sc, ic->ic_curchan);
2117
2118 /*
2119 * If we're doing TDMA, enforce the TXOP limitation for chips that
2120 * support it.
2121 */
2122 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2123 ath_hal_setenforcetxop(sc->sc_ah, 1);
2124 else
2125 ath_hal_setenforcetxop(sc->sc_ah, 0);
2126
193b341d 2127 /*
193b341d
SZ
2128 * Likewise this is set during reset so update
2129 * state cached in the driver.
2130 */
2131 sc->sc_diversity = ath_hal_getdiversity(ah);
86877dfb
RP
2132 sc->sc_lastlongcal = 0;
2133 sc->sc_resetcal = 1;
2134 sc->sc_lastcalreset = 0;
572ff6f6
MD
2135 sc->sc_lastani = 0;
2136 sc->sc_lastshortcal = 0;
2137 sc->sc_doresetcal = AH_FALSE;
2138 /*
2139 * Beacon timers were cleared here; give ath_newstate()
2140 * a hint that the beacon timers should be poked when
2141 * things transition to the RUN state.
2142 */
2143 sc->sc_beacons = 0;
193b341d
SZ
2144
2145 /*
2146 * Setup the hardware after reset: the key cache
2147 * is filled as needed and the receive engine is
2148 * set going. Frame transmit is handled entirely
2149 * in the frame output path; there's nothing to do
2150 * here except setup the interrupt mask.
2151 */
2152 if (ath_startrecv(sc) != 0) {
2153 if_printf(ifp, "unable to start recv logic\n");
572ff6f6 2154 ATH_UNLOCK(sc);
193b341d
SZ
2155 return;
2156 }
2157
2158 /*
2159 * Enable interrupts.
2160 */
2161 sc->sc_imask = HAL_INT_RX | HAL_INT_TX
2162 | HAL_INT_RXEOL | HAL_INT_RXORN
572ff6f6 2163 | HAL_INT_TXURN
193b341d 2164 | HAL_INT_FATAL | HAL_INT_GLOBAL;
572ff6f6
MD
2165
2166 /*
2167 * Enable RX EDMA bits. Note these overlap with
2168 * HAL_INT_RX and HAL_INT_RXDESC respectively.
2169 */
2170 if (sc->sc_isedma)
2171 sc->sc_imask |= (HAL_INT_RXHP | HAL_INT_RXLP);
2172
193b341d
SZ
2173 /*
2174 * Enable MIB interrupts when there are hardware phy counters.
2175 * Note we only do this (at the moment) for station mode.
2176 */
2177 if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
2178 sc->sc_imask |= HAL_INT_MIB;
193b341d 2179
572ff6f6
MD
2180 /* Enable global TX timeout and carrier sense timeout if available */
2181 if (ath_hal_gtxto_supported(ah))
2182 sc->sc_imask |= HAL_INT_GTT;
2183
2184 DPRINTF(sc, ATH_DEBUG_RESET, "%s: imask=0x%x\n",
2185 __func__, sc->sc_imask);
2186
3133c5e3 2187 ifp->if_flags |= IFF_RUNNING;
572ff6f6 2188 callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
86877dfb 2189 ath_hal_intrset(ah, sc->sc_imask);
193b341d 2190
572ff6f6 2191 ATH_UNLOCK(sc);
86877dfb
RP
2192
2193#ifdef ATH_TX99_DIAG
2194 if (sc->sc_tx99 != NULL)
2195 sc->sc_tx99->start(sc->sc_tx99);
2196 else
2197#endif
2198 ieee80211_start_all(ic); /* start all vap's */
193b341d
SZ
2199}
2200
2201static void
86877dfb 2202ath_stop_locked(struct ifnet *ifp)
193b341d
SZ
2203{
2204 struct ath_softc *sc = ifp->if_softc;
193b341d
SZ
2205 struct ath_hal *ah = sc->sc_ah;
2206
193b341d
SZ
2207 DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
2208 __func__, sc->sc_invalid, ifp->if_flags);
2209
572ff6f6 2210 ATH_LOCK_ASSERT(sc);
3133c5e3 2211 if (ifp->if_flags & IFF_RUNNING) {
193b341d
SZ
2212 /*
2213 * Shutdown the hardware and driver:
2214 * reset 802.11 state machine
2215 * turn off timers
2216 * disable interrupts
2217 * turn off the radio
2218 * clear transmit machinery
2219 * clear receive machinery
2220 * drain and release tx queues
2221 * reclaim beacon resources
2222 * power down hardware
2223 *
2224 * Note that some of this work is not possible if the
2225 * hardware is gone (invalid).
2226 */
86877dfb
RP
2227#ifdef ATH_TX99_DIAG
2228 if (sc->sc_tx99 != NULL)
2229 sc->sc_tx99->stop(sc->sc_tx99);
2230#endif
2231 callout_stop(&sc->sc_wd_ch);
2232 sc->sc_wd_timer = 0;
3133c5e3 2233 ifp->if_flags &= ~IFF_RUNNING;
193b341d
SZ
2234 if (!sc->sc_invalid) {
2235 if (sc->sc_softled) {
2236 callout_stop(&sc->sc_ledtimer);
2237 ath_hal_gpioset(ah, sc->sc_ledpin,
2238 !sc->sc_ledon);
2239 sc->sc_blinking = 0;
2240 }
2241 ath_hal_intrset(ah, 0);
2242 }
572ff6f6 2243 ath_draintxq(sc, ATH_RESET_DEFAULT);
193b341d 2244 if (!sc->sc_invalid) {
572ff6f6 2245 ath_stoprecv(sc, 1);
193b341d
SZ
2246 ath_hal_phydisable(ah);
2247 } else
2248 sc->sc_rxlink = NULL;
86877dfb 2249 ath_beacon_free(sc); /* XXX not needed */
193b341d
SZ
2250 }
2251}
2252
572ff6f6
MD
2253#define MAX_TXRX_ITERATIONS 1000
2254static void
2255ath_txrx_stop_locked(struct ath_softc *sc)
2256{
2257 int i = MAX_TXRX_ITERATIONS;
2258
2259 ATH_UNLOCK_ASSERT(sc);
2260 ATH_PCU_LOCK_ASSERT(sc);
2261
2262 /*
2263 * Sleep until all the pending operations have completed.
2264 *
2265 * The caller must ensure that reset has been incremented
2266 * or the pending operations may continue being queued.
2267 */
2268 while (sc->sc_rxproc_cnt || sc->sc_txproc_cnt ||
2269 sc->sc_txstart_cnt || sc->sc_intr_cnt) {
2270 if (i <= 0)
2271 break;
3133c5e3 2272 wlan_serialize_sleep(sc, 0, "ath_txrx_stop", 1);
572ff6f6
MD
2273 i--;
2274 }
2275
2276 if (i <= 0)
2277 device_printf(sc->sc_dev,
2278 "%s: didn't finish after %d iterations\n",
2279 __func__, MAX_TXRX_ITERATIONS);
2280}
2281#undef MAX_TXRX_ITERATIONS
2282
2283#if 0
2284static void
2285ath_txrx_stop(struct ath_softc *sc)
2286{
2287 ATH_UNLOCK_ASSERT(sc);
2288 ATH_PCU_UNLOCK_ASSERT(sc);
2289
2290 ATH_PCU_LOCK(sc);
2291 ath_txrx_stop_locked(sc);
2292 ATH_PCU_UNLOCK(sc);
2293}
2294#endif
2295
2296static void
2297ath_txrx_start(struct ath_softc *sc)
2298{
2299
2300 taskqueue_unblock(sc->sc_tq);
2301}
2302
2303/*
2304 * Grab the reset lock, and wait around until noone else
2305 * is trying to do anything with it.
2306 *
2307 * This is totally horrible but we can't hold this lock for
2308 * long enough to do TX/RX or we end up with net80211/ip stack
2309 * LORs and eventual deadlock.
2310 *
2311 * "dowait" signals whether to spin, waiting for the reset
2312 * lock count to reach 0. This should (for now) only be used
2313 * during the reset path, as the rest of the code may not
2314 * be locking-reentrant enough to behave correctly.
2315 *
2316 * Another, cleaner way should be found to serialise all of
2317 * these operations.
2318 */
2319#define MAX_RESET_ITERATIONS 10
2320static int
2321ath_reset_grablock(struct ath_softc *sc, int dowait)
2322{
2323 int w = 0;
2324 int i = MAX_RESET_ITERATIONS;
2325
2326 ATH_PCU_LOCK_ASSERT(sc);
2327 do {
2328 if (sc->sc_inreset_cnt == 0) {
2329 w = 1;
2330 break;
2331 }
2332 if (dowait == 0) {
2333 w = 0;
2334 break;
2335 }
2336 ATH_PCU_UNLOCK(sc);
3133c5e3 2337 wlan_serialize_sleep(sc, 0, "ath_reset_grablock", 1);
572ff6f6
MD
2338 i--;
2339 ATH_PCU_LOCK(sc);
2340 } while (i > 0);
2341
2342 /*
2343 * We always increment the refcounter, regardless
2344 * of whether we succeeded to get it in an exclusive
2345 * way.
2346 */
2347 sc->sc_inreset_cnt++;
2348
2349 if (i <= 0)
2350 device_printf(sc->sc_dev,
2351 "%s: didn't finish after %d iterations\n",
2352 __func__, MAX_RESET_ITERATIONS);
2353
2354 if (w == 0)
2355 device_printf(sc->sc_dev,
2356 "%s: warning, recursive reset path!\n",
2357 __func__);
2358
2359 return w;
2360}
2361#undef MAX_RESET_ITERATIONS
2362
2363/*
2364 * XXX TODO: write ath_reset_releaselock
2365 */
2366
193b341d
SZ
2367static void
2368ath_stop(struct ifnet *ifp)
2369{
3133c5e3 2370 struct ath_softc *sc __unused = ifp->if_softc;
193b341d 2371
572ff6f6 2372 ATH_LOCK(sc);
86877dfb 2373 ath_stop_locked(ifp);
572ff6f6 2374 ATH_UNLOCK(sc);
193b341d
SZ
2375}
2376
2377/*
2378 * Reset the hardware w/o losing operational state. This is
2379 * basically a more efficient way of doing ath_stop, ath_init,
2380 * followed by state transitions to the current 802.11
2381 * operational state. Used to recover from various errors and
2382 * to reset or reload hardware state.
2383 */
572ff6f6
MD
2384int
2385ath_reset(struct ifnet *ifp, ATH_RESET_TYPE reset_type)
193b341d
SZ
2386{
2387 struct ath_softc *sc = ifp->if_softc;
86877dfb 2388 struct ieee80211com *ic = ifp->if_l2com;
193b341d 2389 struct ath_hal *ah = sc->sc_ah;
193b341d 2390 HAL_STATUS status;
572ff6f6
MD
2391 int i;
2392
2393 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
2394
2395 /* Ensure ATH_LOCK isn't held; ath_rx_proc can't be locked */
2396 ATH_PCU_UNLOCK_ASSERT(sc);
2397 ATH_UNLOCK_ASSERT(sc);
2398
2399 /* Try to (stop any further TX/RX from occuring */
2400 taskqueue_block(sc->sc_tq);
2401
2402 ATH_PCU_LOCK(sc);
2403
2404 /*
2405 * Grab the reset lock before TX/RX is stopped.
2406 *
2407 * This is needed to ensure that when the TX/RX actually does finish,
2408 * no further TX/RX/reset runs in parallel with this.
2409 */
2410 if (ath_reset_grablock(sc, 1) == 0) {
2411 device_printf(sc->sc_dev, "%s: concurrent reset! Danger!\n",
2412 __func__);
2413 }
2414
2415 /* disable interrupts */
2416 ath_hal_intrset(ah, 0);
2417
2418 /*
2419 * Now, ensure that any in progress TX/RX completes before we
2420 * continue.
2421 */
2422 ath_txrx_stop_locked(sc);
2423
2424 ATH_PCU_UNLOCK(sc);
2425
2426 /*
2427 * Should now wait for pending TX/RX to complete
2428 * and block future ones from occuring. This needs to be
2429 * done before the TX queue is drained.
2430 */
2431 ath_draintxq(sc, reset_type); /* stop xmit side */
2432
2433 /*
2434 * Regardless of whether we're doing a no-loss flush or
2435 * not, stop the PCU and handle what's in the RX queue.
2436 * That way frames aren't dropped which shouldn't be.
2437 */
2438 ath_stoprecv(sc, (reset_type != ATH_RESET_NOLOSS));
2439 ath_rx_flush(sc);
193b341d 2440
86877dfb 2441 ath_settkipmic(sc); /* configure TKIP MIC handling */
193b341d 2442 /* NB: indicate channel change so we do a full reset */
572ff6f6
MD
2443 ath_update_chainmasks(sc, ic->ic_curchan);
2444 ath_hal_setchainmasks(sc->sc_ah, sc->sc_cur_txchainmask,
2445 sc->sc_cur_rxchainmask);
86877dfb 2446 if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
193b341d
SZ
2447 if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
2448 __func__, status);
193b341d 2449 sc->sc_diversity = ath_hal_getdiversity(ah);
572ff6f6
MD
2450
2451 /* Let DFS at it in case it's a DFS channel */
2452 ath_dfs_radar_enable(sc, ic->ic_curchan);
2453
2454 /* Let spectral at in case spectral is enabled */
2455 ath_spectral_enable(sc, ic->ic_curchan);
2456
2457 /*
2458 * Let bluetooth coexistence at in case it's needed for this channel
2459 */
2460 ath_btcoex_enable(sc, ic->ic_curchan);
2461
2462 /*
2463 * If we're doing TDMA, enforce the TXOP limitation for chips that
2464 * support it.
2465 */
2466 if (sc->sc_hasenforcetxop && sc->sc_tdma)
2467 ath_hal_setenforcetxop(sc->sc_ah, 1);
2468 else
2469 ath_hal_setenforcetxop(sc->sc_ah, 0);
2470
86877dfb
RP
2471 if (ath_startrecv(sc) != 0) /* restart recv */
2472 if_printf(ifp, "%s: unable to start recv logic\n", __func__);
193b341d
SZ
2473 /*
2474 * We may be doing a reset in response to an ioctl
2475 * that changes the channel so update any state that
2476 * might change as a result.
2477 */
86877dfb
RP
2478 ath_chan_change(sc, ic->ic_curchan);
2479 if (sc->sc_beacons) { /* restart beacons */
2480#ifdef IEEE80211_SUPPORT_TDMA
2481 if (sc->sc_tdma)
2482 ath_tdma_config(sc, NULL);
2483 else
2484#endif
2485 ath_beacon_config(sc, NULL);
2486 }
572ff6f6
MD
2487
2488 /*
2489 * Release the reset lock and re-enable interrupts here.
2490 * If an interrupt was being processed in ath_intr(),
2491 * it would disable interrupts at this point. So we have
2492 * to atomically enable interrupts and decrement the
2493 * reset counter - this way ath_intr() doesn't end up
2494 * disabling interrupts without a corresponding enable
2495 * in the rest or channel change path.
2496 */
2497 ATH_PCU_LOCK(sc);
2498 sc->sc_inreset_cnt--;
2499 /* XXX only do this if sc_inreset_cnt == 0? */
193b341d 2500 ath_hal_intrset(ah, sc->sc_imask);
572ff6f6
MD
2501 ATH_PCU_UNLOCK(sc);
2502
2503 /*
2504 * TX and RX can be started here. If it were started with
2505 * sc_inreset_cnt > 0, the TX and RX path would abort.
2506 * Thus if this is a nested call through the reset or
2507 * channel change code, TX completion will occur but
2508 * RX completion and ath_start / ath_tx_start will not
2509 * run.
2510 */
2511
2512 /* Restart TX/RX as needed */
2513 ath_txrx_start(sc);
2514
2515 /* Restart TX completion and pending TX */
2516 if (reset_type == ATH_RESET_NOLOSS) {
2517 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
2518 if (ATH_TXQ_SETUP(sc, i)) {
2519 ATH_TXQ_LOCK(&sc->sc_txq[i]);
2520 ath_txq_restart_dma(sc, &sc->sc_txq[i]);
2521 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
2522
2523 ATH_TX_LOCK(sc);
2524 ath_txq_sched(sc, &sc->sc_txq[i]);
2525 ATH_TX_UNLOCK(sc);
2526 }
2527 }
2528 }
2529
2530 /*
2531 * This may have been set during an ath_start() call which
2532 * set this once it detected a concurrent TX was going on.
2533 * So, clear it.
2534 */
2535 IF_LOCK(&ifp->if_snd);
3133c5e3 2536 ifq_clr_oactive(&ifp->if_snd);
572ff6f6 2537 IF_UNLOCK(&ifp->if_snd);
193b341d 2538
572ff6f6
MD
2539 /* Handle any frames in the TX queue */
2540 /*
2541 * XXX should this be done by the caller, rather than
2542 * ath_reset() ?
2543 */
2544 ath_tx_kick(sc); /* restart xmit */
193b341d
SZ
2545 return 0;
2546}
2547
86877dfb
RP
2548static int
2549ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
193b341d 2550{
86877dfb
RP
2551 struct ieee80211com *ic = vap->iv_ic;
2552 struct ifnet *ifp = ic->ic_ifp;
193b341d
SZ
2553 struct ath_softc *sc = ifp->if_softc;
2554 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
2555
2556 switch (cmd) {
2557 case IEEE80211_IOC_TXPOWER:
2558 /*
2559 * If per-packet TPC is enabled, then we have nothing
2560 * to do; otherwise we need to force the global limit.
2561 * All this can happen directly; no need to reset.
2562 */
2563 if (!ath_hal_gettpc(ah))
2564 ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
2565 return 0;
2566 }
572ff6f6
MD
2567 /* XXX? Full or NOLOSS? */
2568 return ath_reset(ifp, ATH_RESET_FULL);
86877dfb
RP
2569}
2570
572ff6f6
MD
2571struct ath_buf *
2572_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
86877dfb
RP
2573{
2574 struct ath_buf *bf;
2575
572ff6f6
MD
2576 ATH_TXBUF_LOCK_ASSERT(sc);
2577
2578 if (btype == ATH_BUFTYPE_MGMT)
2579 bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
86877dfb 2580 else
572ff6f6
MD
2581 bf = TAILQ_FIRST(&sc->sc_txbuf);
2582
2583 if (bf == NULL) {
2584 sc->sc_stats.ast_tx_getnobuf++;
2585 } else {
2586 if (bf->bf_flags & ATH_BUF_BUSY) {
2587 sc->sc_stats.ast_tx_getbusybuf++;
2588 bf = NULL;
2589 }
2590 }
2591
2592 if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
2593 if (btype == ATH_BUFTYPE_MGMT)
2594 TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
2595 else {
2596 TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
2597 sc->sc_txbuf_cnt--;
2598
2599 /*
2600 * This shuldn't happen; however just to be
2601 * safe print a warning and fudge the txbuf
2602 * count.
2603 */
2604 if (sc->sc_txbuf_cnt < 0) {
2605 device_printf(sc->sc_dev,
2606 "%s: sc_txbuf_cnt < 0?\n",
2607 __func__);
2608 sc->sc_txbuf_cnt = 0;
2609 }
2610 }
2611 } else
86877dfb 2612 bf = NULL;
572ff6f6 2613
86877dfb 2614 if (bf == NULL) {
572ff6f6 2615 /* XXX should check which list, mgmt or otherwise */
86877dfb 2616 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
572ff6f6 2617 TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
86877dfb 2618 "out of xmit buffers" : "xmit buffer busy");
572ff6f6
MD
2619 return NULL;
2620 }
2621
2622 /* XXX TODO: should do this at buffer list initialisation */
2623 /* XXX (then, ensure the buffer has the right flag set) */
2624 bf->bf_flags = 0;
2625 if (btype == ATH_BUFTYPE_MGMT)
2626 bf->bf_flags |= ATH_BUF_MGMT;
2627 else
2628 bf->bf_flags &= (~ATH_BUF_MGMT);
2629
2630 /* Valid bf here; clear some basic fields */
2631 bf->bf_next = NULL; /* XXX just to be sure */
2632 bf->bf_last = NULL; /* XXX again, just to be sure */
2633 bf->bf_comp = NULL; /* XXX again, just to be sure */
2634 bzero(&bf->bf_state, sizeof(bf->bf_state));
2635
2636 /*
2637 * Track the descriptor ID only if doing EDMA
2638 */
2639 if (sc->sc_isedma) {
2640 bf->bf_descid = sc->sc_txbuf_descid;
2641 sc->sc_txbuf_descid++;
86877dfb 2642 }
572ff6f6 2643
86877dfb
RP
2644 return bf;
2645}
2646
572ff6f6
MD
2647/*
2648 * When retrying a software frame, buffers marked ATH_BUF_BUSY
2649 * can't be thrown back on the queue as they could still be
2650 * in use by the hardware.
2651 *
2652 * This duplicates the buffer, or returns NULL.
2653 *
2654 * The descriptor is also copied but the link pointers and
2655 * the DMA segments aren't copied; this frame should thus
2656 * be again passed through the descriptor setup/chain routines
2657 * so the link is correct.
2658 *
2659 * The caller must free the buffer using ath_freebuf().
2660 */
2661struct ath_buf *
2662ath_buf_clone(struct ath_softc *sc, struct ath_buf *bf)
2663{
2664 struct ath_buf *tbf;
2665
2666 tbf = ath_getbuf(sc,
2667 (bf->bf_flags & ATH_BUF_MGMT) ?
2668 ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
2669 if (tbf == NULL)
2670 return NULL; /* XXX failure? Why? */
2671
2672 /* Copy basics */
2673 tbf->bf_next = NULL;
2674 tbf->bf_nseg = bf->bf_nseg;
2675 tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
2676 tbf->bf_status = bf->bf_status;
2677 tbf->bf_m = bf->bf_m;
2678 tbf->bf_node = bf->bf_node;
2679 /* will be setup by the chain/setup function */
2680 tbf->bf_lastds = NULL;
2681 /* for now, last == self */
2682 tbf->bf_last = tbf;
2683 tbf->bf_comp = bf->bf_comp;
2684
2685 /* NOTE: DMA segments will be setup by the setup/chain functions */
2686
2687 /* The caller has to re-init the descriptor + links */
2688
2689 /*
2690 * Free the DMA mapping here, before we NULL the mbuf.
2691 * We must only call bus_dmamap_unload() once per mbuf chain
2692 * or behaviour is undefined.
2693 */
2694 if (bf->bf_m != NULL) {
2695 /*
2696 * XXX is this POSTWRITE call required?
2697 */
2698 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2699 BUS_DMASYNC_POSTWRITE);
2700 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2701 }
2702
2703 bf->bf_m = NULL;
2704 bf->bf_node = NULL;
2705
2706 /* Copy state */
2707 memcpy(&tbf->bf_state, &bf->bf_state, sizeof(bf->bf_state));
2708
2709 return tbf;
2710}
2711
2712struct ath_buf *
2713ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
86877dfb 2714{
193b341d 2715 struct ath_buf *bf;
86877dfb 2716
572ff6f6
MD
2717 ATH_TXBUF_LOCK(sc);
2718 bf = _ath_getbuf_locked(sc, btype);
2719 /*
2720 * If a mgmt buffer was requested but we're out of those,
2721 * try requesting a normal one.
2722 */
2723 if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
2724 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
2725 ATH_TXBUF_UNLOCK(sc);
86877dfb
RP
2726 if (bf == NULL) {
2727 struct ifnet *ifp = sc->sc_ifp;
2728
2729 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
2730 sc->sc_stats.ast_tx_qstop++;
572ff6f6 2731 IF_LOCK(&ifp->if_snd);
3133c5e3 2732 ifq_set_oactive(&ifp->if_snd);
572ff6f6 2733 IF_UNLOCK(&ifp->if_snd);
86877dfb 2734 }
86877dfb
RP
2735 return bf;
2736}
2737
3133c5e3 2738#if 0
86877dfb 2739static void
572ff6f6 2740ath_qflush(struct ifnet *ifp)
86877dfb 2741{
86877dfb 2742
572ff6f6 2743 /* XXX TODO */
86877dfb
RP
2744}
2745
2746/*
572ff6f6
MD
2747 * Transmit a single frame.
2748 *
2749 * net80211 will free the node reference if the transmit
2750 * fails, so don't free the node reference here.
86877dfb
RP
2751 */
2752static int
572ff6f6 2753ath_transmit(struct ifnet *ifp, struct mbuf *m)
86877dfb 2754{
572ff6f6
MD
2755 struct ieee80211com *ic = ifp->if_l2com;
2756 struct ath_softc *sc = ic->ic_ifp->if_softc;
2757 struct ieee80211_node *ni;
2758 struct mbuf *next;
86877dfb 2759 struct ath_buf *bf;
572ff6f6
MD
2760 ath_bufhead frags;
2761 int retval = 0;
193b341d 2762
572ff6f6
MD
2763 /*
2764 * Tell the reset path that we're currently transmitting.
2765 */
2766 ATH_PCU_LOCK(sc);
2767 if (sc->sc_inreset_cnt > 0) {
2768 DPRINTF(sc, ATH_DEBUG_XMIT,
2769 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2770 ATH_PCU_UNLOCK(sc);
2771 IF_LOCK(&ifp->if_snd);
2772 sc->sc_stats.ast_tx_qstop++;
3133c5e3 2773 ifq_set_oactive(&ifp->if_snd);
572ff6f6
MD
2774 IF_UNLOCK(&ifp->if_snd);
2775 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_start_task: OACTIVE, finish");
2776 return (ENOBUFS); /* XXX should be EINVAL or? */
9db4b353 2777 }
572ff6f6
MD
2778 sc->sc_txstart_cnt++;
2779 ATH_PCU_UNLOCK(sc);
9db4b353 2780
572ff6f6
MD
2781 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: start");
2782 /*
2783 * Grab the TX lock - it's ok to do this here; we haven't
2784 * yet started transmitting.
2785 */
2786 ATH_TX_LOCK(sc);
86877dfb 2787
572ff6f6
MD
2788 /*
2789 * Node reference, if there's one.
2790 */
2791 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
193b341d 2792
572ff6f6
MD
2793 /*
2794 * Enforce how deep a node queue can get.
2795 *
2796 * XXX it would be nicer if we kept an mbuf queue per
2797 * node and only whacked them into ath_bufs when we
2798 * are ready to schedule some traffic from them.
2799 * .. that may come later.
2800 *
2801 * XXX we should also track the per-node hardware queue
2802 * depth so it is easy to limit the _SUM_ of the swq and
2803 * hwq frames. Since we only schedule two HWQ frames
2804 * at a time, this should be OK for now.
2805 */
2806 if ((!(m->m_flags & M_EAPOL)) &&
2807 (ATH_NODE(ni)->an_swq_depth > sc->sc_txq_node_maxdepth)) {
2808 sc->sc_stats.ast_tx_nodeq_overflow++;
2809 m_freem(m);
2810 m = NULL;
2811 retval = ENOBUFS;
2812 goto finish;
2813 }
f0bfedd7 2814
572ff6f6
MD
2815 /*
2816 * Check how many TX buffers are available.
2817 *
2818 * If this is for non-EAPOL traffic, just leave some
2819 * space free in order for buffer cloning and raw
2820 * frame transmission to occur.
2821 *
2822 * If it's for EAPOL traffic, ignore this for now.
2823 * Management traffic will be sent via the raw transmit
2824 * method which bypasses this check.
2825 *
2826 * This is needed to ensure that EAPOL frames during
2827 * (re) keying have a chance to go out.
2828 *
2829 * See kern/138379 for more information.
2830 */
2831 if ((!(m->m_flags & M_EAPOL)) &&
2832 (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree)) {
2833 sc->sc_stats.ast_tx_nobuf++;
2834 m_freem(m);
2835 m = NULL;
2836 retval = ENOBUFS;
2837 goto finish;
2508f206 2838 }
86877dfb 2839
572ff6f6
MD
2840 /*
2841 * Grab a TX buffer and associated resources.
2842 *
2843 * If it's an EAPOL frame, allocate a MGMT ath_buf.
2844 * That way even with temporary buffer exhaustion due to
2845 * the data path doesn't leave us without the ability
2846 * to transmit management frames.
2847 *
2848 * Otherwise allocate a normal buffer.
2849 */
2850 if (m->m_flags & M_EAPOL)
2851 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2852 else
2853 bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
2854
2855 if (bf == NULL) {
86877dfb 2856 /*
572ff6f6
MD
2857 * If we failed to allocate a buffer, fail.
2858 *
2859 * We shouldn't fail normally, due to the check
2860 * above.
86877dfb 2861 */
572ff6f6
MD
2862 sc->sc_stats.ast_tx_nobuf++;
2863 IF_LOCK(&ifp->if_snd);
3133c5e3 2864 ifq_set_oactive(&ifp->if_snd);
572ff6f6
MD
2865 IF_UNLOCK(&ifp->if_snd);
2866 m_freem(m);
2867 m = NULL;
2868 retval = ENOBUFS;
2869 goto finish;
193b341d 2870 }
193b341d 2871
572ff6f6
MD
2872 /*
2873 * At this point we have a buffer; so we need to free it
2874 * if we hit any error conditions.
2875 */
193b341d 2876
572ff6f6
MD
2877 /*
2878 * Check for fragmentation. If this frame
2879 * has been broken up verify we have enough
2880 * buffers to send all the fragments so all
2881 * go out or none...
2882 */
2883 TAILQ_INIT(&frags);
2884 if ((m->m_flags & M_FRAG) &&
2885 !ath_txfrag_setup(sc, &frags, m, ni)) {
2886 DPRINTF(sc, ATH_DEBUG_XMIT,
2887 "%s: out of txfrag buffers\n", __func__);
2888 sc->sc_stats.ast_tx_nofrag++;
2889 ifp->if_oerrors++;
2890 ath_freetx(m);
2891 goto bad;
193b341d 2892 }
193b341d 2893
572ff6f6
MD
2894 /*
2895 * At this point if we have any TX fragments, then we will
2896 * have bumped the node reference once for each of those.
2897 */
193b341d 2898
572ff6f6
MD
2899 /*
2900 * XXX Is there anything actually _enforcing_ that the
2901 * fragments are being transmitted in one hit, rather than
2902 * being interleaved with other transmissions on that
2903 * hardware queue?
2904 *
2905 * The ATH TX output lock is the only thing serialising this
2906 * right now.
2907 */
193b341d 2908
572ff6f6
MD
2909 /*
2910 * Calculate the "next fragment" length field in ath_buf
2911 * in order to let the transmit path know enough about
2912 * what to next write to the hardware.
2913 */
2914 if (m->m_flags & M_FRAG) {
2915 struct ath_buf *fbf = bf;
2916 struct ath_buf *n_fbf = NULL;
2917 struct mbuf *fm = m->m_nextpkt;
193b341d 2918
193b341d 2919 /*
572ff6f6
MD
2920 * We need to walk the list of fragments and set
2921 * the next size to the following buffer.
2922 * However, the first buffer isn't in the frag
2923 * list, so we have to do some gymnastics here.
193b341d 2924 */
572ff6f6
MD
2925 TAILQ_FOREACH(n_fbf, &frags, bf_list) {
2926 fbf->bf_nextfraglen = fm->m_pkthdr.len;
2927 fbf = n_fbf;
2928 fm = fm->m_nextpkt;
8982d733
SZ
2929 }
2930 }
8982d733 2931
572ff6f6
MD
2932 /*
2933 * Bump the ifp output counter.
2934 *
2935 * XXX should use atomics?
2936 */
2937 ifp->if_opackets++;
2938nextfrag:
2939 /*
2940 * Pass the frame to the h/w for transmission.
2941 * Fragmented frames have each frag chained together
2942 * with m_nextpkt. We know there are sufficient ath_buf's
2943 * to send all the frags because of work done by
2944 * ath_txfrag_setup. We leave m_nextpkt set while
2945 * calling ath_tx_start so it can use it to extend the
2946 * the tx duration to cover the subsequent frag and
2947 * so it can reclaim all the mbufs in case of an error;
2948 * ath_tx_start clears m_nextpkt once it commits to
2949 * handing the frame to the hardware.
2950 *
2951 * Note: if this fails, then the mbufs are freed but
2952 * not the node reference.
2953 */
2954 next = m->m_nextpkt;
2955 if (ath_tx_start(sc, ni, bf, m)) {
2956bad:
2957 ifp->if_oerrors++;
2958reclaim:
2959 bf->bf_m = NULL;
2960 bf->bf_node = NULL;
2961 ATH_TXBUF_LOCK(sc);
2962 ath_returnbuf_head(sc, bf);
2963 /*
2964 * Free the rest of the node references and
2965 * buffers for the fragment list.
2966 */
2967 ath_txfrag_cleanup(sc, &frags, ni);
2968 ATH_TXBUF_UNLOCK(sc);
2969 retval = ENOBUFS;
2970 goto finish;
193b341d 2971 }
193b341d
SZ
2972
2973 /*
572ff6f6 2974 * Check here if the node is in power save state.
193b341d 2975 */
572ff6f6
MD
2976 ath_tx_update_tim(sc, ni, 1);
2977
2978 if (next != NULL) {
86877dfb 2979 /*
572ff6f6
MD
2980 * Beware of state changing between frags.
2981 * XXX check sta power-save state?
86877dfb 2982 */
572ff6f6
MD
2983 if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
2984 DPRINTF(sc, ATH_DEBUG_XMIT,
2985 "%s: flush fragmented packet, state %s\n",
2986 __func__,
2987 ieee80211_state_name[ni->ni_vap->iv_state]);
2988 /* XXX dmamap */
2989 ath_freetx(next);
2990 goto reclaim;
86877dfb 2991 }
572ff6f6
MD
2992 m = next;
2993 bf = TAILQ_FIRST(&frags);
2994 KASSERT(bf != NULL, ("no buf for txfrag"));
2995 TAILQ_REMOVE(&frags, bf, bf_list);
2996 goto nextfrag;
193b341d
SZ
2997 }
2998
2999 /*
572ff6f6 3000 * Bump watchdog timer.
193b341d 3001 */
572ff6f6 3002 sc->sc_wd_timer = 5;
193b341d 3003
572ff6f6
MD
3004finish:
3005 ATH_TX_UNLOCK(sc);
193b341d 3006
193b341d 3007 /*
572ff6f6 3008 * Finished transmitting!
193b341d 3009 */
572ff6f6
MD
3010 ATH_PCU_LOCK(sc);
3011 sc->sc_txstart_cnt--;
3012 ATH_PCU_UNLOCK(sc);
3013
3014 ATH_KTR(sc, ATH_KTR_TX, 0, "ath_transmit: finished");
3015
3016 return (retval);
193b341d 3017}
3133c5e3 3018#endif
193b341d 3019
193b341d 3020static int
572ff6f6 3021ath_media_change(struct ifnet *ifp)
193b341d 3022{
572ff6f6
MD
3023 int error = ieee80211_media_change(ifp);
3024 /* NB: only the fixed rate can change and that doesn't need a reset */
3025 return (error == ENETRESET ? 0 : error);
193b341d
SZ
3026}
3027
3028/*
3029 * Block/unblock tx+rx processing while a key change is done.
3030 * We assume the caller serializes key management operations
3031 * so we only need to worry about synchronization with other
3032 * uses that originate in the driver.
3033 */
3034static void
86877dfb 3035ath_key_update_begin(struct ieee80211vap *vap)
193b341d 3036{
86877dfb 3037 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
3038 struct ath_softc *sc = ifp->if_softc;
3039
3040 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
86877dfb 3041 taskqueue_block(sc->sc_tq);
572ff6f6 3042 IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */
193b341d
SZ
3043}
3044
3045static void
86877dfb 3046ath_key_update_end(struct ieee80211vap *vap)
193b341d 3047{
86877dfb 3048 struct ifnet *ifp = vap->iv_ic->ic_ifp;
193b341d
SZ
3049 struct ath_softc *sc = ifp->if_softc;
3050
3051 DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
572ff6f6 3052 IF_UNLOCK(&ifp->if_snd);
86877dfb 3053 taskqueue_unblock(sc->sc_tq);
193b341d
SZ
3054}
3055
193b341d 3056static void
86877dfb 3057ath_update_promisc(struct ifnet *ifp)
193b341d 3058{
86877dfb
RP
3059 struct ath_softc *sc = ifp->if_softc;
3060 u_int32_t rfilt;
193b341d
SZ
3061
3062 /* configure rx filter */
86877dfb
RP
3063 rfilt = ath_calcrxfilter(sc);
3064 ath_hal_setrxfilter(sc->sc_ah, rfilt);
193b341d 3065
86877dfb
RP
3066 DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
3067}
193b341d 3068
86877dfb
RP
3069static void
3070ath_update_mcast(struct ifnet *ifp)
3071{
3072 struct ath_softc *sc = ifp->if_softc;
3073 u_int32_t mfilt[2];
193b341d
SZ
3074
3075 /* calculate and install multicast filter */
3076 if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
86877dfb
RP
3077 struct ifmultiaddr *ifma;
3078 /*
3079 * Merge multicast addresses to form the hardware filter.
3080 */
193b341d 3081 mfilt[0] = mfilt[1] = 0;
3133c5e3 3082#if 0
86877dfb 3083 if_maddr_rlock(ifp); /* XXX need some fiddling to remove? */
3133c5e3 3084#endif
441d34b2 3085 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
193b341d 3086 caddr_t dl;
86877dfb
RP
3087 u_int32_t val;
3088 u_int8_t pos;
193b341d
SZ
3089
3090 /* calculate XOR of eight 6bit values */
86877dfb 3091 dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
193b341d
SZ
3092 val = LE_READ_4(dl + 0);
3093 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3094 val = LE_READ_4(dl + 3);
3095 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
3096 pos &= 0x3f;
3097 mfilt[pos / 32] |= (1 << (pos % 32));
3098 }
3133c5e3 3099#if 0
86877dfb 3100 if_maddr_runlock(ifp);
3133c5e3 3101#endif
86877dfb 3102 } else
193b341d 3103 mfilt[0] = mfilt[1] = ~0;
86877dfb
RP
3104 ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
3105 DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
3106 __func__, mfilt[0], mfilt[1]);
3107}
3108
572ff6f6 3109void
86877dfb
RP
3110ath_mode_init(struct ath_softc *sc)
3111{
3112 struct ifnet *ifp = sc->sc_ifp;
3113 struct ath_hal *ah = sc->sc_ah;
3114 u_int32_t rfilt;
3115
3116 /* configure rx filter */
3117 rfilt = ath_calcrxfilter(sc);
3118 ath_hal_setrxfilter(ah, rfilt);
3119
3120 /* configure operational mode */
3121 ath_hal_setopmode(ah);
3122
572ff6f6
MD
3123 DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
3124 "%s: ah=%p, ifp=%p, if_addr=%p\n",
3125 __func__,
3126 ah,
3127 ifp,
3128 (ifp == NULL) ? NULL : ifp->if_addr);
3129
86877dfb
RP
3130 /* handle any link-level address change */
3131 ath_hal_setmac(ah, IF_LLADDR(ifp));
3132
3133 /* calculate and install multicast filter */
3134 ath_update_mcast(ifp);
193b341d
SZ
3135}
3136
3137/*
3138 * Set the slot time based on the current setting.
3139 */
572ff6f6 3140void
193b341d
SZ
3141ath_setslottime(struct ath_softc *sc)
3142{
86877dfb 3143 struct ieee80211com *ic = sc->sc_ifp->if_l2com;
193b341d 3144 struct ath_hal *ah = sc->sc_ah;
86877dfb
RP
3145 u_int usec;
3146
3147 if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
3148 usec = 13;
3149 else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
3150 usec = 21;
3151 else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
3152 /* honor short/long slot time only in 11g */
3153 /* XXX shouldn't honor on pure g or turbo g channel */
3154 if (ic->ic_flags & IEEE80211_F_SHSLOT)
3155 usec = HAL_SLOT_TIME_9;
3156 else
3157 usec = HAL_SLOT_TIME_20;
3158 } else
3159 usec = HAL_SLOT_TIME_9;
193b341d 3160
86877dfb
RP
3161 DPRINTF(sc, ATH_DEBUG_RESET,
3162 "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
3163 __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
3164 ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
3165
3166 ath_hal_setslottime(ah, usec);
193b341d
SZ
3167 sc->sc_updateslot = OK;
3168}
3169
3170/*
3171 * Callback from the 802.11 layer to update the
3172 * slot time based on the current setting.
3173 */
3174static void
3175ath_updateslot(struct ifnet *ifp)
3176{
3177 struct ath_softc *sc = ifp->if_softc;
86877dfb 3178 struct ieee80211com *ic = ifp->if_l2com;
193b341d
SZ
3179
3180 /*
3181 * When not coordinating the BSS, change the hardware
3182 * immediately. For other operation we defer the change
3183 * until beacon updates have propagated to the stations.
3184 */
86877dfb
RP
3185 if (ic->ic_opmode == IEEE80211_M_HOSTAP ||
3186 ic->ic_opmode == IEEE80211_M_MBSS)
193b341d
SZ
3187 sc->sc_updateslot = UPDATE;
3188 else
3189 ath_setslottime(sc);
3190}
3191
3192/*
572ff6f6
MD
3193 * Append the contents of src to dst; both queues
3194 * are assumed to be locked.
193b341d 3195 */
572ff6f6
MD
3196void
3197ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
193b341d 3198{
193b341d 3199
572ff6f6
MD
3200 ATH_TXQ_LOCK_ASSERT(src);
3201 ATH_TXQ_LOCK_ASSERT(dst);
3202
3203 TAILQ_CONCAT(&dst->axq_q, &src->axq_q, bf_list);
3204 dst->axq_link = src->axq_link;
3205 src->axq_link = NULL;
3206 dst->axq_depth += src->axq_depth;
3207 dst->axq_aggr_depth += src->axq_aggr_depth;
3208 src->axq_depth = 0;
3209 src->axq_aggr_depth = 0;
193b341d
SZ
3210}
3211
3212/*
572ff6f6
MD
3213 * Reset the hardware, with no loss.
3214 *
3215 * This can't be used for a general case reset.
193b341d 3216 */
572ff6f6
MD
3217static void
3218ath_reset_proc(void *arg, int pending)
193b341d 3219{
572ff6f6
MD
3220 struct ath_softc *sc = arg;
3221 struct ifnet *ifp = sc->sc_ifp;
193b341d 3222
572ff6f6
MD
3223#if 0
3224 if_printf(ifp, "%s: resetting\n", __func__);
3225#endif
3133c5e3 3226 wlan_serialize_enter();
572ff6f6 3227 ath_reset(ifp, ATH_RESET_NOLOSS);
3133c5e3 3228 wlan_serialize_exit();
193b341d
SZ
3229}
3230
3231/*
572ff6f6 3232 * Reset the hardware after detecting beacons have stopped.
193b341d 3233 */
572ff6f6
MD
3234static void
3235ath_bstuck_proc(void *arg, int pending)
193b341d 3236{
572ff6f6
MD
3237 struct ath_softc *sc = arg;
3238 struct ifnet *ifp = sc->sc_ifp;
3239 uint32_t hangs = 0;
193b341d 3240
3133c5e3 3241 wlan_serialize_enter();
572ff6f6
MD
3242 if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0)
3243 if_printf(ifp, "bb hang detected (0x%x)\n", hangs);
86877dfb 3244
572ff6f6
MD
3245#ifdef ATH_DEBUG_ALQ
3246 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_STUCK_BEACON))
3247 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_STUCK_BEACON, 0, NULL);
3248#endif
86877dfb 3249
572ff6f6
MD
3250 if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3251 sc->sc_bmisscount);
3252 sc->sc_stats.ast_bstuck++;
86877dfb 3253 /*
572ff6f6
MD
3254 * This assumes that there's no simultaneous channel mode change
3255 * occuring.
86877dfb 3256 */
572ff6f6 3257 ath_reset(ifp, ATH_RESET_NOLOSS);
3133c5e3 3258 wlan_serialize_exit();
193b341d
SZ
3259}
3260
86877dfb 3261static void
572ff6f6 3262ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
86877dfb 3263{
572ff6f6
MD
3264 bus_addr_t *paddr = (bus_addr_t*) arg;
3265 KASSERT(error == 0, ("error %u on bus_dma callback", error));
3266 *paddr = segs->ds_addr;
86877dfb
RP
3267}
3268
193b341d 3269/*
572ff6f6
MD
3270 * Allocate the descriptors and appropriate DMA tag/setup.
3271 *
3272 * For some situations (eg EDMA TX completion), there isn't a requirement
3273 * for the ath_buf entries to be allocated.
ed33fa9f 3274 */
572ff6f6
MD
3275int
3276ath_descdma_alloc_desc(struct ath_softc *sc,
3277 struct ath_descdma *dd, ath_bufhead *head,
3278 const char *name, int ds_size, int ndesc)
ed33fa9f 3279{
572ff6f6
MD
3280#define DS2PHYS(_dd, _ds) \
3281 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3282#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3283 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3284 struct ifnet *ifp = sc->sc_ifp;
3285 int error;
ed33fa9f 3286
572ff6f6 3287 dd->dd_descsize = ds_size;
193b341d 3288
572ff6f6
MD
3289 DPRINTF(sc, ATH_DEBUG_RESET,
3290 "%s: %s DMA: %u desc, %d bytes per descriptor\n",
3291 __func__, name, ndesc, dd->dd_descsize);
3292
3293 dd->dd_name = name;
3294 dd->dd_desc_len = dd->dd_descsize * ndesc;
193b341d 3295
0c208ba4 3296 /*
572ff6f6
MD
3297 * Merlin work-around:
3298 * Descriptors that cross the 4KB boundary can't be used.
3299 * Assume one skipped descriptor per 4KB page.
0c208ba4 3300 */
572ff6f6
MD
3301 if (! ath_hal_split4ktrans(sc->sc_ah)) {
3302 int numpages = dd->dd_desc_len / 4096;
3303 dd->dd_desc_len += ds_size * numpages;
ed33fa9f
SW
3304 }
3305
193b341d 3306 /*
572ff6f6
MD
3307 * Setup DMA descriptor area.
3308 *
3309 * BUS_DMA_ALLOCNOW is not used; we never use bounce
3310 * buffers for the descriptors themselves.
193b341d 3311 */
572ff6f6
MD
3312 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
3313 PAGE_SIZE, 0, /* alignment, bounds */
3314 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
3315 BUS_SPACE_MAXADDR, /* highaddr */
3316 NULL, NULL, /* filter, filterarg */
3317 dd->dd_desc_len, /* maxsize */
3318 1, /* nsegments */
3319 dd->dd_desc_len, /* maxsegsize */
3320 0, /* flags */
572ff6f6
MD
3321 &dd->dd_dmat);
3322 if (error != 0) {
3323 if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3324 return error;
3325 }
193b341d 3326
572ff6f6
MD
3327 /* allocate descriptors */
3328 error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3329 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3330 &dd->dd_dmamap);
3331 if (error != 0) {
3332 if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3333 "error %u\n", ndesc, dd->dd_name, error);
3334 goto fail1;
86877dfb 3335 }
193b341d 3336
572ff6f6
MD
3337 error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3338 dd->dd_desc, dd->dd_desc_len,
3339 ath_load_cb, &dd->dd_desc_paddr,
3340 BUS_DMA_NOWAIT);
3341 if (error != 0) {
3342 if_printf(ifp, "unable to map %s descriptors, error %u\n",
3343 dd->dd_name, error);
3344 goto fail2;
86877dfb 3345 }
572ff6f6
MD
3346
3347 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3348 __func__, dd->dd_name, (uint8_t *) dd->dd_desc,
3349 (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr,
3350 /*XXX*/ (u_long) dd->dd_desc_len);
3351
3352 return (0);
3353
3354fail2:
3355 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3356fail1:
3357 bus_dma_tag_destroy(dd->dd_dmat);
3358 memset(dd, 0, sizeof(*dd));
3359 return error;
3360#undef DS2PHYS
3361#undef ATH_DESC_4KB_BOUND_CHECK
86877dfb
RP
3362}
3363
572ff6f6
MD
3364int
3365ath_descdma_setup(struct ath_softc *sc,
3366 struct ath_descdma *dd, ath_bufhead *head,
3367 const char *name, int ds_size, int nbuf, int ndesc)
86877dfb 3368{
572ff6f6
MD
3369#define DS2PHYS(_dd, _ds) \
3370 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3371#define ATH_DESC_4KB_BOUND_CHECK(_daddr, _len) \
3372 ((((u_int32_t)(_daddr) & 0xFFF) > (0x1000 - (_len))) ? 1 : 0)
3373 struct ifnet *ifp = sc->sc_ifp;
3374 uint8_t *ds;
86877dfb 3375 struct ath_buf *bf;
572ff6f6 3376 int i, bsize, error;
86877dfb 3377
572ff6f6
MD
3378 /* Allocate descriptors */
3379 error = ath_descdma_alloc_desc(sc, dd, head, name, ds_size,
3380 nbuf * ndesc);
193b341d 3381
572ff6f6
MD
3382 /* Assume any errors during allocation were dealt with */
3383 if (error != 0) {
3384 return (error);
193b341d 3385 }
193b341d 3386
572ff6f6 3387 ds = (uint8_t *) dd->dd_desc;
86877dfb 3388
572ff6f6
MD
3389 /* allocate rx buffers */
3390 bsize = sizeof(struct ath_buf) * nbuf;
3133c5e3 3391 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT|M_ZERO);
572ff6f6
MD
3392 if (bf == NULL) {
3393 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3394 dd->dd_name, bsize);
3395 goto fail3;
3396 }
3397 dd->dd_bufptr = bf;
ed33fa9f 3398
572ff6f6
MD
3399 TAILQ_INIT(head);
3400 for (i = 0; i < nbuf; i++, bf++, ds += (ndesc * dd->dd_descsize)) {
3401 bf->bf_desc = (struct ath_desc *) ds;
3402 bf->bf_daddr = DS2PHYS(dd, ds);
3403 if (! ath_hal_split4ktrans(sc->sc_ah)) {
ed33fa9f 3404 /*
572ff6f6
MD
3405 * Merlin WAR: Skip descriptor addresses which
3406 * cause 4KB boundary crossing along any point
3407 * in the descriptor.
ed33fa9f 3408 */
572ff6f6
MD
3409 if (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr,
3410 dd->dd_descsize)) {
3411 /* Start at the next page */
3412 ds += 0x1000 - (bf->bf_daddr & 0xFFF);
3413 bf->bf_desc = (struct ath_desc *) ds;
3414 bf->bf_daddr = DS2PHYS(dd, ds);
0c208ba4 3415 }
ed33fa9f 3416 }
572ff6f6
MD
3417 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3418 &bf->bf_dmamap);
86877dfb 3419 if (error != 0) {
572ff6f6
MD
3420 if_printf(ifp, "unable to create dmamap for %s "
3421 "buffer %u, error %u\n", dd->dd_name, i, error);
3422 ath_descdma_cleanup(sc, dd, head);
3423 return error;
86877dfb 3424 }
572ff6f6
MD
3425 bf->bf_lastds = bf->bf_desc; /* Just an initial value */
3426 TAILQ_INSERT_TAIL(head, bf, bf_list);
ed33fa9f 3427 }
86877dfb 3428
572ff6f6
MD
3429 /*
3430 * XXX TODO: ensure that ds doesn't overflow the descriptor
3431 * allocation otherwise weird stuff will occur and crash your
3432 * machine.
3433 */
3434 return 0;
3435 /* XXX this should likely just call ath_descdma_cleanup() */
3436fail3:
3437 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3438 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3439 bus_dma_tag_destroy(dd->dd_dmat);
3440 memset(dd, 0, sizeof(*dd));
3441 return error;
3442#undef DS2PHYS
3443#undef ATH_DESC_4KB_BOUND_CHECK
193b341d
SZ
3444}
3445
3446/*
572ff6f6
MD
3447 * Allocate ath_buf entries but no descriptor contents.
3448 *
3449 * This is for RX EDMA where the descriptors are the header part of
3450 * the RX buffer.
193b341d 3451 */
572ff6f6
MD
3452int
3453ath_descdma_setup_rx_edma(struct ath_softc *sc,
3454 struct ath_descdma *dd, ath_bufhead *head,
3455 const char *name, int nbuf, int rx_status_len)
193b341d 3456{
86877dfb 3457 struct ifnet *ifp = sc->sc_ifp;
572ff6f6
MD
3458 struct ath_buf *bf;
3459 int i, bsize, error;
193b341d 3460
572ff6f6
MD
3461 DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers\n",
3462 __func__, name, nbuf);
193b341d 3463
572ff6f6
MD
3464 dd->dd_name = name;
3465 /*
3466 * This is (mostly) purely for show. We're not allocating any actual
3467 * descriptors here as EDMA RX has the descriptor be part
3468 * of the RX buffer.
3469 *
3470 * However, dd_desc_len is used by ath_descdma_free() to determine
3471 * whether we have already freed this DMA mapping.
3472 */
3473 dd->dd_desc_len = rx_status_len * nbuf;
3474 dd->dd_descsize = rx_status_len;
86877dfb 3475
572ff6f6
MD
3476 /* allocate rx buffers */
3477 bsize = sizeof(struct ath_buf) * nbuf;
3133c5e3 3478 bf = kmalloc(bsize, M_ATHDEV, M_INTWAIT | M_ZERO);
572ff6f6
MD
3479 if (bf == NULL) {
3480 if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3481 dd->dd_name, bsize);
3482 error = ENOMEM;
3483 goto fail3;
86877dfb 3484 }
572ff6f6 3485 dd->dd_bufptr = bf;
86877dfb 3486
572ff6f6
MD
3487 TAILQ_INIT(head);
3488 for (i = 0; i < nbuf; i++, bf++) {
3489 bf->bf_desc = NULL;
3490 bf->bf_daddr = 0;
3491 bf->bf_lastds = NULL; /* Just an initial value */
193b341d 3492
572ff6f6
MD
3493 error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3494 &bf->bf_dmamap);
3495 if (error != 0) {
3496 if_printf(ifp, "unable to create dmamap for %s "
3497 "buffer %u, error %u\n", dd->dd_name, i, error);
3498 ath_descdma_cleanup(sc, dd, head);
3499 return error;
193b341d 3500 }
572ff6f6 3501 TAILQ_INSERT_TAIL(head, bf, bf_list);
193b341d
SZ
3502 }
3503 return 0;
572ff6f6 3504fail3:
193b341d
SZ
3505 memset(dd, 0, sizeof(*dd));
3506 return error;
193b341d
SZ
3507}
3508
572ff6f6 3509void
193b341d
SZ
3510ath_descdma_cleanup(struct ath_softc *sc,
3511 struct ath_descdma *dd, ath_bufhead *head)
3512{
3513 struct ath_buf *bf;
3514 struct ieee80211_node *ni;
572ff6f6 3515 int do_warning = 0;
193b341d 3516
572ff6f6
MD
3517 if (dd->dd_dmamap != 0) {
3518 bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3519 bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3520 bus_dma_tag_destroy(dd->dd_dmat);
3521 }
193b341d 3522
572ff6f6
MD
3523 if (head != NULL) {
3524 TAILQ_FOREACH(bf, head, bf_list) {
3525 if (bf->bf_m) {
3526 /*
3527 * XXX warn if there's buffers here.
3528 * XXX it should have been freed by the
3529 * owner!
3530 */
3531
3532 if (do_warning == 0) {
3533 do_warning = 1;
3534 device_printf(sc->sc_dev,
3535 "%s: %s: mbuf should've been"
3536 " unmapped/freed!\n",
3537 __func__,
3538 dd->dd_name);
3539 }
3540 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3541 BUS_DMASYNC_POSTREAD);
3542 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3543 m_freem(bf->bf_m);
3544 bf->bf_m = NULL;
3545 }
3546 if (bf->bf_dmamap != NULL) {
3547 bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3548 bf->bf_dmamap = NULL;
3549 }
3550 ni = bf->bf_node;
3551 bf->bf_node = NULL;
3552 if (ni != NULL) {
3553 /*
3554 * Reclaim node reference.
3555 */
3556 ieee80211_free_node(ni);
3557 }
193b341d
SZ
3558 }
3559 }
193b341d 3560