2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
13 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
14 * redistribution must be conditioned upon including a substantially
15 * similar Disclaimer requirement for further binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
21 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
23 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
26 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGES.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
35 * Driver for the Atheros Wireless LAN controller.
37 * This software is derived from work of Atsushi Onoe; his contribution
38 * is greatly appreciated.
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysctl.h>
49 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/kernel.h>
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/errno.h>
56 #include <sys/callout.h>
58 #include <sys/endian.h>
59 #include <sys/kthread.h>
60 #include <sys/taskqueue.h>
64 #include <machine/bus.h>
67 #include <net/if_var.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 #include <net/if_types.h>
71 #include <net/if_arp.h>
72 #include <net/ethernet.h>
73 #include <net/if_llc.h>
75 #include <net80211/ieee80211_var.h>
76 #include <net80211/ieee80211_regdomain.h>
77 #ifdef IEEE80211_SUPPORT_SUPERG
78 #include <net80211/ieee80211_superg.h>
80 #ifdef IEEE80211_SUPPORT_TDMA
81 #include <net80211/ieee80211_tdma.h>
83 #include <net80211/ieee80211_ht.h>
88 #include <netinet/in.h>
89 #include <netinet/if_ether.h>
92 #include <dev/ath/if_athvar.h>
93 #include <dev/ath/ath_hal/ah_devid.h> /* XXX for softled */
94 #include <dev/ath/ath_hal/ah_diagcodes.h>
96 #include <dev/ath/if_ath_debug.h>
99 #include <dev/ath/ath_tx99/ath_tx99.h>
102 #include <dev/ath/if_ath_misc.h>
103 #include <dev/ath/if_ath_tx.h>
104 #include <dev/ath/if_ath_tx_ht.h>
107 #include <dev/ath/if_ath_alq.h>
111 * How many retries to perform in software
113 #define SWMAX_RETRIES 10
116 * What queue to throw the non-QoS TID traffic into
118 #define ATH_NONQOS_TID_AC WME_AC_VO
121 static int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
123 static int ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an,
125 static int ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an,
127 static ieee80211_seq ath_tx_tid_seqno_assign(struct ath_softc *sc,
128 struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0);
129 static int ath_tx_action_frame_override_queue(struct ath_softc *sc,
130 struct ieee80211_node *ni, struct mbuf *m0, int *tid);
131 static struct ath_buf *
132 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
133 struct ath_tid *tid, struct ath_buf *bf);
137 ath_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
143 /* XXX we should skip out early if debugging isn't enabled! */
147 /* XXX should ensure bf_nseg > 0! */
148 if (bf->bf_nseg == 0)
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1;
151 for (i = 0, ds = (const char *) bf->bf_desc;
153 i++, ds += sc->sc_tx_desclen) {
154 if_ath_alq_post(&sc->sc_alq,
162 #endif /* ATH_DEBUG_ALQ */
165 * Whether to use the 11n rate scenario functions or not
168 ath_tx_is_11n(struct ath_softc *sc)
170 return ((sc->sc_ah->ah_magic == 0x20065416) ||
171 (sc->sc_ah->ah_magic == 0x19741014));
175 * Obtain the current TID from the given frame.
177 * Non-QoS frames need to go into TID 16 (IEEE80211_NONQOS_TID.)
178 * This has implications for which AC/priority the packet is placed
182 ath_tx_gettid(struct ath_softc *sc, const struct mbuf *m0)
184 const struct ieee80211_frame *wh;
185 int pri = M_WME_GETAC(m0);
187 wh = mtod(m0, const struct ieee80211_frame *);
188 if (! IEEE80211_QOS_HAS_SEQ(wh))
189 return IEEE80211_NONQOS_TID;
191 return WME_AC_TO_TID(pri);
195 ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
197 struct ieee80211_frame *wh;
199 wh = mtod(bf->bf_m, struct ieee80211_frame *);
200 /* Only update/resync if needed */
201 if (bf->bf_state.bfs_isretried == 0) {
202 wh->i_fc[1] |= IEEE80211_FC1_RETRY;
203 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
204 BUS_DMASYNC_PREWRITE);
206 bf->bf_state.bfs_isretried = 1;
207 bf->bf_state.bfs_retries ++;
211 * Determine what the correct AC queue for the given frame
214 * This code assumes that the TIDs map consistently to
215 * the underlying hardware (or software) ath_txq.
216 * Since the sender may try to set an AC which is
217 * arbitrary, non-QoS TIDs may end up being put on
218 * completely different ACs. There's no way to put a
219 * TID into multiple ath_txq's for scheduling, so
220 * for now we override the AC/TXQ selection and set
221 * non-QOS TID frames into the BE queue.
223 * This may be completely incorrect - specifically,
224 * some management frames may end up out of order
225 * compared to the QoS traffic they're controlling.
226 * I'll look into this later.
229 ath_tx_getac(struct ath_softc *sc, const struct mbuf *m0)
231 const struct ieee80211_frame *wh;
232 int pri = M_WME_GETAC(m0);
233 wh = mtod(m0, const struct ieee80211_frame *);
234 if (IEEE80211_QOS_HAS_SEQ(wh))
237 return ATH_NONQOS_TID_AC;
241 ath_txfrag_cleanup(struct ath_softc *sc,
242 ath_bufhead *frags, struct ieee80211_node *ni)
244 struct ath_buf *bf, *next;
246 ATH_TXBUF_LOCK_ASSERT(sc);
248 TAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
249 /* NB: bf assumed clean */
250 TAILQ_REMOVE(frags, bf, bf_list);
251 ath_returnbuf_head(sc, bf);
252 ieee80211_node_decref(ni);
257 * Setup xmit of a fragmented frame. Allocate a buffer
258 * for each frag and bump the node reference count to
259 * reflect the held reference to be setup by ath_tx_start.
262 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
263 struct mbuf *m0, struct ieee80211_node *ni)
269 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
270 /* XXX non-management? */
271 bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
272 if (bf == NULL) { /* out of buffers, cleanup */
273 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: no buffer?\n",
275 ath_txfrag_cleanup(sc, frags, ni);
278 ieee80211_node_incref(ni);
279 TAILQ_INSERT_TAIL(frags, bf, bf_list);
281 ATH_TXBUF_UNLOCK(sc);
283 return !TAILQ_EMPTY(frags);
287 * Reclaim mbuf resources. For fragmented frames we
288 * need to claim each frag chained with m_nextpkt.
291 ath_freetx(struct mbuf *m)
299 } while ((m = next) != NULL);
303 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
309 * Load the DMA map so any coalescing is done. This
310 * also calculates the number of descriptors we need.
312 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
313 bf->bf_segs, &bf->bf_nseg,
315 if (error == EFBIG) {
316 /* XXX packet requires too many descriptors */
317 bf->bf_nseg = ATH_MAX_SCATTER + 1;
318 } else if (error != 0) {
319 sc->sc_stats.ast_tx_busdma++;
324 * Discard null packets and check for packets that
325 * require too many TX descriptors. We try to convert
326 * the latter to a cluster.
328 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */
329 sc->sc_stats.ast_tx_linear++;
330 m = m_collapse(m0, M_NOWAIT, ATH_MAX_SCATTER);
333 sc->sc_stats.ast_tx_nombuf++;
337 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
338 bf->bf_segs, &bf->bf_nseg,
341 sc->sc_stats.ast_tx_busdma++;
345 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER,
346 ("too many segments after defrag; nseg %u", bf->bf_nseg));
347 } else if (bf->bf_nseg == 0) { /* null packet, discard */
348 sc->sc_stats.ast_tx_nodata++;
352 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
353 __func__, m0, m0->m_pkthdr.len);
354 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
361 * Chain together segments+descriptors for a frame - 11n or otherwise.
363 * For aggregates, this is called on each frame in the aggregate.
366 ath_tx_chaindesclist(struct ath_softc *sc, struct ath_desc *ds0,
367 struct ath_buf *bf, int is_aggr, int is_first_subframe,
368 int is_last_subframe)
370 struct ath_hal *ah = sc->sc_ah;
373 HAL_DMA_ADDR bufAddrList[4];
374 uint32_t segLenList[4];
379 * XXX There's txdma and txdma_mgmt; the descriptor
382 struct ath_descdma *dd = &sc->sc_txdma;
385 * Fillin the remainder of the descriptor info.
389 * We need the number of TX data pointers in each descriptor.
390 * EDMA and later chips support 4 TX buffers per descriptor;
391 * previous chips just support one.
393 numTxMaps = sc->sc_tx_nmaps;
396 * For EDMA and later chips ensure the TX map is fully populated
397 * before advancing to the next descriptor.
399 ds = (char *) bf->bf_desc;
401 bzero(bufAddrList, sizeof(bufAddrList));
402 bzero(segLenList, sizeof(segLenList));
403 for (i = 0; i < bf->bf_nseg; i++) {
404 bufAddrList[bp] = bf->bf_segs[i].ds_addr;
405 segLenList[bp] = bf->bf_segs[i].ds_len;
409 * Go to the next segment if this isn't the last segment
410 * and there's space in the current TX map.
412 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps))
416 * Last segment or we're out of buffer pointers.
420 if (i == bf->bf_nseg - 1)
421 ath_hal_settxdesclink(ah, (struct ath_desc *) ds, 0);
423 ath_hal_settxdesclink(ah, (struct ath_desc *) ds,
424 bf->bf_daddr + dd->dd_descsize * (dsp + 1));
427 * XXX This assumes that bfs_txq is the actual destination
428 * hardware queue at this point. It may not have been
429 * assigned, it may actually be pointing to the multicast
430 * software TXQ id. These must be fixed!
432 ath_hal_filltxdesc(ah, (struct ath_desc *) ds
435 , bf->bf_descid /* XXX desc id */
436 , bf->bf_state.bfs_tx_queue
437 , isFirstDesc /* first segment */
438 , i == bf->bf_nseg - 1 /* last segment */
439 , (struct ath_desc *) ds0 /* first descriptor */
443 * Make sure the 11n aggregate fields are cleared.
445 * XXX TODO: this doesn't need to be called for
446 * aggregate frames; as it'll be called on all
447 * sub-frames. Since the descriptors are in
448 * non-cacheable memory, this leads to some
449 * rather slow writes on MIPS/ARM platforms.
451 if (ath_tx_is_11n(sc))
452 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds);
455 * If 11n is enabled, set it up as if it's an aggregate
458 if (is_last_subframe) {
459 ath_hal_set11n_aggr_last(sc->sc_ah,
460 (struct ath_desc *) ds);
461 } else if (is_aggr) {
463 * This clears the aggrlen field; so
464 * the caller needs to call set_aggr_first()!
466 * XXX TODO: don't call this for the first
467 * descriptor in the first frame in an
470 ath_hal_set11n_aggr_middle(sc->sc_ah,
471 (struct ath_desc *) ds,
472 bf->bf_state.bfs_ndelim);
475 bf->bf_lastds = (struct ath_desc *) ds;
478 * Don't forget to skip to the next descriptor.
480 ds += sc->sc_tx_desclen;
484 * .. and don't forget to blank these out!
486 bzero(bufAddrList, sizeof(bufAddrList));
487 bzero(segLenList, sizeof(segLenList));
489 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
493 * Set the rate control fields in the given descriptor based on
494 * the bf_state fields and node state.
496 * The bfs fields should already be set with the relevant rate
497 * control information, including whether MRR is to be enabled.
499 * Since the FreeBSD HAL currently sets up the first TX rate
500 * in ath_hal_setuptxdesc(), this will setup the MRR
501 * conditionally for the pre-11n chips, and call ath_buf_set_rate
502 * unconditionally for 11n chips. These require the 11n rate
503 * scenario to be set if MCS rates are enabled, so it's easier
504 * to just always call it. The caller can then only set rates 2, 3
505 * and 4 if multi-rate retry is needed.
508 ath_tx_set_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
511 struct ath_rc_series *rc = bf->bf_state.bfs_rc;
513 /* If mrr is disabled, blank tries 1, 2, 3 */
514 if (! bf->bf_state.bfs_ismrr)
515 rc[1].tries = rc[2].tries = rc[3].tries = 0;
519 * If NOACK is set, just set ntries=1.
521 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) {
522 rc[1].tries = rc[2].tries = rc[3].tries = 0;
528 * Always call - that way a retried descriptor will
529 * have the MRR fields overwritten.
531 * XXX TODO: see if this is really needed - setting up
532 * the first descriptor should set the MRR fields to 0
535 if (ath_tx_is_11n(sc)) {
536 ath_buf_set_rate(sc, ni, bf);
538 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc
539 , rc[1].ratecode, rc[1].tries
540 , rc[2].ratecode, rc[2].tries
541 , rc[3].ratecode, rc[3].tries
547 * Setup segments+descriptors for an 11n aggregate.
548 * bf_first is the first buffer in the aggregate.
549 * The descriptor list must already been linked together using
553 ath_tx_setds_11n(struct ath_softc *sc, struct ath_buf *bf_first)
555 struct ath_buf *bf, *bf_prev = NULL;
556 struct ath_desc *ds0 = bf_first->bf_desc;
558 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: nframes=%d, al=%d\n",
559 __func__, bf_first->bf_state.bfs_nframes,
560 bf_first->bf_state.bfs_al);
564 if (bf->bf_state.bfs_txrate0 == 0)
565 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, txrate0=%d\n",
567 if (bf->bf_state.bfs_rc[0].ratecode == 0)
568 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: bf=%p, rix0=%d\n",
572 * Setup all descriptors of all subframes - this will
573 * call ath_hal_set11naggrmiddle() on every frame.
576 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
577 "%s: bf=%p, nseg=%d, pktlen=%d, seqno=%d\n",
578 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen,
579 SEQNO(bf->bf_state.bfs_seqno));
582 * Setup the initial fields for the first descriptor - all
583 * the non-11n specific stuff.
585 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc
586 , bf->bf_state.bfs_pktlen /* packet length */
587 , bf->bf_state.bfs_hdrlen /* header length */
588 , bf->bf_state.bfs_atype /* Atheros packet type */
589 , bf->bf_state.bfs_txpower /* txpower */
590 , bf->bf_state.bfs_txrate0
591 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
592 , bf->bf_state.bfs_keyix /* key cache index */
593 , bf->bf_state.bfs_txantenna /* antenna mode */
594 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */
595 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
596 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
600 * First descriptor? Setup the rate control and initial
601 * aggregate header information.
603 if (bf == bf_first) {
605 * setup first desc with rate and aggr info
607 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
611 * Setup the descriptors for a multi-descriptor frame.
612 * This is both aggregate and non-aggregate aware.
614 ath_tx_chaindesclist(sc, ds0, bf,
616 !! (bf == bf_first), /* is_first_subframe */
617 !! (bf->bf_next == NULL) /* is_last_subframe */
620 if (bf == bf_first) {
622 * Initialise the first 11n aggregate with the
623 * aggregate length and aggregate enable bits.
625 ath_hal_set11n_aggr_first(sc->sc_ah,
628 bf->bf_state.bfs_ndelim);
632 * Link the last descriptor of the previous frame
633 * to the beginning descriptor of this frame.
636 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds,
639 /* Save a copy so we can link the next descriptor in */
645 * Set the first descriptor bf_lastds field to point to
646 * the last descriptor in the last subframe, that's where
647 * the status update will occur.
649 bf_first->bf_lastds = bf_prev->bf_lastds;
652 * And bf_last in the first descriptor points to the end of
653 * the aggregate list.
655 bf_first->bf_last = bf_prev;
658 * For non-AR9300 NICs, which require the rate control
659 * in the final descriptor - let's set that up now.
661 * This is because the filltxdesc() HAL call doesn't
662 * populate the last segment with rate control information
663 * if firstSeg is also true. For non-aggregate frames
664 * that is fine, as the first frame already has rate control
665 * info. But if the last frame in an aggregate has one
666 * descriptor, both firstseg and lastseg will be true and
667 * the rate info isn't copied.
669 * This is inefficient on MIPS/ARM platforms that have
670 * non-cachable memory for TX descriptors, but we'll just
673 * As to why the rate table is stashed in the last descriptor
674 * rather than the first descriptor? Because proctxdesc()
675 * is called on the final descriptor in an MPDU or A-MPDU -
676 * ie, the one that gets updated by the hardware upon
677 * completion. That way proctxdesc() doesn't need to know
678 * about the first _and_ last TX descriptor.
680 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0);
682 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: end\n", __func__);
686 * Hand-off a frame to the multicast TX queue.
688 * This is a software TXQ which will be appended to the CAB queue
689 * during the beacon setup code.
691 * XXX TODO: since the AR9300 EDMA TX queue support wants the QCU ID
692 * as part of the TX descriptor, bf_state.bfs_tx_queue must be updated
693 * with the actual hardware txq, or all of this will fall apart.
695 * XXX It may not be a bad idea to just stuff the QCU ID into bf_state
696 * and retire bfs_tx_queue; then make sure the CABQ QCU ID is populated
700 ath_tx_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
703 ATH_TX_LOCK_ASSERT(sc);
705 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
706 ("%s: busy status 0x%x", __func__, bf->bf_flags));
709 * Ensure that the tx queue is the cabq, so things get
712 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) {
713 DPRINTF(sc, ATH_DEBUG_XMIT,
714 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
715 __func__, bf, bf->bf_state.bfs_tx_queue,
720 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
721 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
722 struct ieee80211_frame *wh;
724 /* mark previous frame */
725 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
726 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
727 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
728 BUS_DMASYNC_PREWRITE);
730 /* link descriptor */
731 ath_hal_settxdesclink(sc->sc_ah,
735 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
740 * Hand-off packet to a hardware queue.
743 ath_tx_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
746 struct ath_hal *ah = sc->sc_ah;
747 struct ath_buf *bf_first;
750 * Insert the frame on the outbound list and pass it on
751 * to the hardware. Multicast frames buffered for power
752 * save stations and transmit from the CAB queue are stored
753 * on a s/w only queue and loaded on to the CAB queue in
754 * the SWBA handler since frames only go out on DTIM and
755 * to avoid possible races.
757 ATH_TX_LOCK_ASSERT(sc);
758 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
759 ("%s: busy status 0x%x", __func__, bf->bf_flags));
760 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ,
761 ("ath_tx_handoff_hw called for mcast queue"));
764 * XXX racy, should hold the PCU lock when checking this,
765 * and also should ensure that the TX counter is >0!
767 KASSERT((sc->sc_inreset_cnt == 0),
768 ("%s: TX during reset?\n", __func__));
772 * This causes a LOR. Find out where the PCU lock is being
773 * held whilst the TXQ lock is grabbed - that shouldn't
777 if (sc->sc_inreset_cnt) {
779 DPRINTF(sc, ATH_DEBUG_RESET,
780 "%s: called with sc_in_reset != 0\n",
782 DPRINTF(sc, ATH_DEBUG_XMIT,
783 "%s: queued: TXDP[%u] = %p (%p) depth %d\n",
784 __func__, txq->axq_qnum,
785 (caddr_t)bf->bf_daddr, bf->bf_desc,
787 /* XXX axq_link needs to be set and updated! */
788 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
789 if (bf->bf_state.bfs_aggr)
790 txq->axq_aggr_depth++;
799 * XXX TODO: if there's a holdingbf, then
800 * ATH_TXQ_PUTRUNNING should be clear.
802 * If there is a holdingbf and the list is empty,
803 * then axq_link should be pointing to the holdingbf.
805 * Otherwise it should point to the last descriptor
806 * in the last ath_buf.
808 * In any case, we should really ensure that we
809 * update the previous descriptor link pointer to
810 * this descriptor, regardless of all of the above state.
812 * For now this is captured by having axq_link point
813 * to either the holdingbf (if the TXQ list is empty)
814 * or the end of the list (if the TXQ list isn't empty.)
815 * I'd rather just kill axq_link here and do it as above.
819 * Append the frame to the TX queue.
821 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
822 ATH_KTR(sc, ATH_KTR_TX, 3,
823 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p "
830 * If there's a link pointer, update it.
832 * XXX we should replace this with the above logic, just
833 * to kill axq_link with fire.
835 if (txq->axq_link != NULL) {
836 *txq->axq_link = bf->bf_daddr;
837 DPRINTF(sc, ATH_DEBUG_XMIT,
838 "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
839 txq->axq_qnum, txq->axq_link,
840 (caddr_t)bf->bf_daddr, bf->bf_desc,
842 ATH_KTR(sc, ATH_KTR_TX, 5,
843 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) "
845 txq->axq_qnum, txq->axq_link,
846 (caddr_t)bf->bf_daddr, bf->bf_desc,
851 * If we've not pushed anything into the hardware yet,
852 * push the head of the queue into the TxDP.
854 * Once we've started DMA, there's no guarantee that
855 * updating the TxDP with a new value will actually work.
856 * So we just don't do that - if we hit the end of the list,
857 * we keep that buffer around (the "holding buffer") and
858 * re-start DMA by updating the link pointer of _that_
859 * descriptor and then restart DMA.
861 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) {
862 bf_first = TAILQ_FIRST(&txq->axq_q);
863 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
864 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr);
865 DPRINTF(sc, ATH_DEBUG_XMIT,
866 "%s: TXDP[%u] = %p (%p) depth %d\n",
867 __func__, txq->axq_qnum,
868 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
870 ATH_KTR(sc, ATH_KTR_TX, 5,
871 "ath_tx_handoff: TXDP[%u] = %p (%p) "
872 "lastds=%p depth %d",
874 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc,
880 * Ensure that the bf TXQ matches this TXQ, so later
881 * checking and holding buffer manipulation is sane.
883 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) {
884 DPRINTF(sc, ATH_DEBUG_XMIT,
885 "%s: bf=%p, bfs_tx_queue=%d, axq_qnum=%d\n",
886 __func__, bf, bf->bf_state.bfs_tx_queue,
891 * Track aggregate queue depth.
893 if (bf->bf_state.bfs_aggr)
894 txq->axq_aggr_depth++;
897 * Update the link pointer.
899 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link);
904 * If we wrote a TxDP above, DMA will start from here.
906 * If DMA is running, it'll do nothing.
908 * If the DMA engine hit the end of the QCU list (ie LINK=NULL,
909 * or VEOL) then it stops at the last transmitted write.
910 * We then append a new frame by updating the link pointer
911 * in that descriptor and then kick TxE here; it will re-read
912 * that last descriptor and find the new descriptor to transmit.
914 * This is why we keep the holding descriptor around.
916 ath_hal_txstart(ah, txq->axq_qnum);
918 ATH_KTR(sc, ATH_KTR_TX, 1,
919 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum);
923 * Restart TX DMA for the given TXQ.
925 * This must be called whether the queue is empty or not.
928 ath_legacy_tx_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
930 struct ath_buf *bf, *bf_last;
932 ATH_TXQ_LOCK_ASSERT(txq);
934 /* XXX make this ATH_TXQ_FIRST */
935 bf = TAILQ_FIRST(&txq->axq_q);
936 bf_last = ATH_TXQ_LAST(txq, axq_q_s);
941 DPRINTF(sc, ATH_DEBUG_RESET,
942 "%s: Q%d: bf=%p, bf_last=%p, daddr=0x%08x\n",
947 (uint32_t) bf->bf_daddr);
950 if (sc->sc_debug & ATH_DEBUG_RESET)
951 ath_tx_dump(sc, txq);
955 * This is called from a restart, so DMA is known to be
956 * completely stopped.
958 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)),
959 ("%s: Q%d: called with PUTRUNNING=1\n",
963 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
964 txq->axq_flags |= ATH_TXQ_PUTRUNNING;
966 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds,
968 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
972 * Hand off a packet to the hardware (or mcast queue.)
974 * The relevant hardware txq should be locked.
977 ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
980 ATH_TX_LOCK_ASSERT(sc);
983 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
984 ath_tx_alq_post(sc, bf);
987 if (txq->axq_qnum == ATH_TXQ_SWQ)
988 ath_tx_handoff_mcast(sc, txq, bf);
990 ath_tx_handoff_hw(sc, txq, bf);
994 ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
995 struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
998 DPRINTF(sc, ATH_DEBUG_XMIT,
999 "%s: hdrlen=%d, pktlen=%d, isfrag=%d, iswep=%d, m0=%p\n",
1008 const struct ieee80211_cipher *cip;
1009 struct ieee80211_key *k;
1012 * Construct the 802.11 header+trailer for an encrypted
1013 * frame. The only reason this can fail is because of an
1014 * unknown or unsupported cipher/key type.
1016 k = ieee80211_crypto_encap(ni, m0);
1019 * This can happen when the key is yanked after the
1020 * frame was queued. Just discard the frame; the
1021 * 802.11 layer counts failures and provides
1022 * debugging/diagnostics.
1027 * Adjust the packet + header lengths for the crypto
1028 * additions and calculate the h/w key index. When
1029 * a s/w mic is done the frame will have had any mic
1030 * added to it prior to entry so m0->m_pkthdr.len will
1031 * account for it. Otherwise we need to add it to the
1035 (*hdrlen) += cip->ic_header;
1036 (*pktlen) += cip->ic_header + cip->ic_trailer;
1037 /* NB: frags always have any TKIP MIC done in s/w */
1038 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
1039 (*pktlen) += cip->ic_miclen;
1040 (*keyix) = k->wk_keyix;
1041 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
1043 * Use station key cache slot, if assigned.
1045 (*keyix) = ni->ni_ucastkey.wk_keyix;
1046 if ((*keyix) == IEEE80211_KEYIX_NONE)
1047 (*keyix) = HAL_TXKEYIX_INVALID;
1049 (*keyix) = HAL_TXKEYIX_INVALID;
1055 * Calculate whether interoperability protection is required for
1058 * This requires the rate control information be filled in,
1059 * as the protection requirement depends upon the current
1060 * operating mode / PHY.
1063 ath_tx_calc_protection(struct ath_softc *sc, struct ath_buf *bf)
1065 struct ieee80211_frame *wh;
1069 const HAL_RATE_TABLE *rt = sc->sc_currates;
1070 struct ifnet *ifp = sc->sc_ifp;
1071 struct ieee80211com *ic = ifp->if_l2com;
1073 flags = bf->bf_state.bfs_txflags;
1074 rix = bf->bf_state.bfs_rc[0].rix;
1075 shortPreamble = bf->bf_state.bfs_shpream;
1076 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1079 * If 802.11g protection is enabled, determine whether
1080 * to use RTS/CTS or just CTS. Note that this is only
1081 * done for OFDM unicast frames.
1083 if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
1084 rt->info[rix].phy == IEEE80211_T_OFDM &&
1085 (flags & HAL_TXDESC_NOACK) == 0) {
1086 bf->bf_state.bfs_doprot = 1;
1087 /* XXX fragments must use CCK rates w/ protection */
1088 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) {
1089 flags |= HAL_TXDESC_RTSENA;
1090 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) {
1091 flags |= HAL_TXDESC_CTSENA;
1094 * For frags it would be desirable to use the
1095 * highest CCK rate for RTS/CTS. But stations
1096 * farther away may detect it at a lower CCK rate
1097 * so use the configured protection rate instead
1100 sc->sc_stats.ast_tx_protect++;
1104 * If 11n protection is enabled and it's a HT frame,
1107 * XXX ic_htprotmode or ic_curhtprotmode?
1108 * XXX should it_htprotmode only matter if ic_curhtprotmode
1109 * XXX indicates it's not a HT pure environment?
1111 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
1112 rt->info[rix].phy == IEEE80211_T_HT &&
1113 (flags & HAL_TXDESC_NOACK) == 0) {
1114 flags |= HAL_TXDESC_RTSENA;
1115 sc->sc_stats.ast_tx_htprotect++;
1117 bf->bf_state.bfs_txflags = flags;
1121 * Update the frame duration given the currently selected rate.
1123 * This also updates the frame duration value, so it will require
1127 ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
1129 struct ieee80211_frame *wh;
1133 struct ath_hal *ah = sc->sc_ah;
1134 const HAL_RATE_TABLE *rt = sc->sc_currates;
1135 int isfrag = bf->bf_m->m_flags & M_FRAG;
1137 flags = bf->bf_state.bfs_txflags;
1138 rix = bf->bf_state.bfs_rc[0].rix;
1139 shortPreamble = bf->bf_state.bfs_shpream;
1140 wh = mtod(bf->bf_m, struct ieee80211_frame *);
1143 * Calculate duration. This logically belongs in the 802.11
1144 * layer but it lacks sufficient information to calculate it.
1146 if ((flags & HAL_TXDESC_NOACK) == 0 &&
1147 (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
1150 dur = rt->info[rix].spAckDuration;
1152 dur = rt->info[rix].lpAckDuration;
1153 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
1154 dur += dur; /* additional SIFS+ACK */
1156 * Include the size of next fragment so NAV is
1157 * updated properly. The last fragment uses only
1160 * XXX TODO: ensure that the rate lookup for each
1161 * fragment is the same as the rate used by the
1164 dur += ath_hal_computetxtime(ah,
1167 rix, shortPreamble);
1171 * Force hardware to use computed duration for next
1172 * fragment by disabling multi-rate retry which updates
1173 * duration based on the multi-rate duration table.
1175 bf->bf_state.bfs_ismrr = 0;
1176 bf->bf_state.bfs_try0 = ATH_TXMGTTRY;
1177 /* XXX update bfs_rc[0].try? */
1180 /* Update the duration field itself */
1181 *(u_int16_t *)wh->i_dur = htole16(dur);
1186 ath_tx_get_rtscts_rate(struct ath_hal *ah, const HAL_RATE_TABLE *rt,
1187 int cix, int shortPreamble)
1192 * CTS transmit rate is derived from the transmit rate
1193 * by looking in the h/w rate table. We must also factor
1194 * in whether or not a short preamble is to be used.
1196 /* NB: cix is set above where RTS/CTS is enabled */
1197 KASSERT(cix != 0xff, ("cix not setup"));
1198 ctsrate = rt->info[cix].rateCode;
1200 /* XXX this should only matter for legacy rates */
1202 ctsrate |= rt->info[cix].shortPreamble;
1208 * Calculate the RTS/CTS duration for legacy frames.
1211 ath_tx_calc_ctsduration(struct ath_hal *ah, int rix, int cix,
1212 int shortPreamble, int pktlen, const HAL_RATE_TABLE *rt,
1215 int ctsduration = 0;
1217 /* This mustn't be called for HT modes */
1218 if (rt->info[cix].phy == IEEE80211_T_HT) {
1219 printf("%s: HT rate where it shouldn't be (0x%x)\n",
1220 __func__, rt->info[cix].rateCode);
1225 * Compute the transmit duration based on the frame
1226 * size and the size of an ACK frame. We call into the
1227 * HAL to do the computation since it depends on the
1228 * characteristics of the actual PHY being used.
1230 * NB: CTS is assumed the same size as an ACK so we can
1231 * use the precalculated ACK durations.
1233 if (shortPreamble) {
1234 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1235 ctsduration += rt->info[cix].spAckDuration;
1236 ctsduration += ath_hal_computetxtime(ah,
1237 rt, pktlen, rix, AH_TRUE);
1238 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1239 ctsduration += rt->info[rix].spAckDuration;
1241 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */
1242 ctsduration += rt->info[cix].lpAckDuration;
1243 ctsduration += ath_hal_computetxtime(ah,
1244 rt, pktlen, rix, AH_FALSE);
1245 if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */
1246 ctsduration += rt->info[rix].lpAckDuration;
1249 return (ctsduration);
1253 * Update the given ath_buf with updated rts/cts setup and duration
1256 * To support rate lookups for each software retry, the rts/cts rate
1257 * and cts duration must be re-calculated.
1259 * This function assumes the RTS/CTS flags have been set as needed;
1260 * mrr has been disabled; and the rate control lookup has been done.
1262 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1263 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1266 ath_tx_set_rtscts(struct ath_softc *sc, struct ath_buf *bf)
1268 uint16_t ctsduration = 0;
1269 uint8_t ctsrate = 0;
1270 uint8_t rix = bf->bf_state.bfs_rc[0].rix;
1272 const HAL_RATE_TABLE *rt = sc->sc_currates;
1275 * No RTS/CTS enabled? Don't bother.
1277 if ((bf->bf_state.bfs_txflags &
1278 (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) == 0) {
1279 /* XXX is this really needed? */
1280 bf->bf_state.bfs_ctsrate = 0;
1281 bf->bf_state.bfs_ctsduration = 0;
1286 * If protection is enabled, use the protection rix control
1287 * rate. Otherwise use the rate0 control rate.
1289 if (bf->bf_state.bfs_doprot)
1290 rix = sc->sc_protrix;
1292 rix = bf->bf_state.bfs_rc[0].rix;
1295 * If the raw path has hard-coded ctsrate0 to something,
1298 if (bf->bf_state.bfs_ctsrate0 != 0)
1299 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0);
1301 /* Control rate from above */
1302 cix = rt->info[rix].controlRate;
1304 /* Calculate the rtscts rate for the given cix */
1305 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix,
1306 bf->bf_state.bfs_shpream);
1308 /* The 11n chipsets do ctsduration calculations for you */
1309 if (! ath_tx_is_11n(sc))
1310 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix,
1311 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen,
1312 rt, bf->bf_state.bfs_txflags);
1314 /* Squirrel away in ath_buf */
1315 bf->bf_state.bfs_ctsrate = ctsrate;
1316 bf->bf_state.bfs_ctsduration = ctsduration;
1319 * Must disable multi-rate retry when using RTS/CTS.
1321 if (!sc->sc_mrrprot) {
1322 bf->bf_state.bfs_ismrr = 0;
1323 bf->bf_state.bfs_try0 =
1324 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */
1329 * Setup the descriptor chain for a normal or fast-frame
1332 * XXX TODO: extend to include the destination hardware QCU ID.
1333 * Make sure that is correct. Make sure that when being added
1334 * to the mcastq, the CABQ QCUID is set or things will get a bit
1338 ath_tx_setds(struct ath_softc *sc, struct ath_buf *bf)
1340 struct ath_desc *ds = bf->bf_desc;
1341 struct ath_hal *ah = sc->sc_ah;
1343 if (bf->bf_state.bfs_txrate0 == 0)
1344 DPRINTF(sc, ATH_DEBUG_XMIT,
1345 "%s: bf=%p, txrate0=%d\n", __func__, bf, 0);
1347 ath_hal_setuptxdesc(ah, ds
1348 , bf->bf_state.bfs_pktlen /* packet length */
1349 , bf->bf_state.bfs_hdrlen /* header length */
1350 , bf->bf_state.bfs_atype /* Atheros packet type */
1351 , bf->bf_state.bfs_txpower /* txpower */
1352 , bf->bf_state.bfs_txrate0
1353 , bf->bf_state.bfs_try0 /* series 0 rate/tries */
1354 , bf->bf_state.bfs_keyix /* key cache index */
1355 , bf->bf_state.bfs_txantenna /* antenna mode */
1356 , bf->bf_state.bfs_txflags /* flags */
1357 , bf->bf_state.bfs_ctsrate /* rts/cts rate */
1358 , bf->bf_state.bfs_ctsduration /* rts/cts duration */
1362 * This will be overriden when the descriptor chain is written.
1367 /* Set rate control and descriptor chain for this frame */
1368 ath_tx_set_ratectrl(sc, bf->bf_node, bf);
1369 ath_tx_chaindesclist(sc, ds, bf, 0, 0, 0);
1375 * This performs a rate lookup for the given ath_buf only if it's required.
1376 * Non-data frames and raw frames don't require it.
1378 * This populates the primary and MRR entries; MRR values are
1379 * then disabled later on if something requires it (eg RTS/CTS on
1382 * This needs to be done before the RTS/CTS fields are calculated
1383 * as they may depend upon the rate chosen.
1386 ath_tx_do_ratelookup(struct ath_softc *sc, struct ath_buf *bf)
1391 if (! bf->bf_state.bfs_doratelookup)
1394 /* Get rid of any previous state */
1395 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1397 ATH_NODE_LOCK(ATH_NODE(bf->bf_node));
1398 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream,
1399 bf->bf_state.bfs_pktlen, &rix, &try0, &rate);
1401 /* In case MRR is disabled, make sure rc[0] is setup correctly */
1402 bf->bf_state.bfs_rc[0].rix = rix;
1403 bf->bf_state.bfs_rc[0].ratecode = rate;
1404 bf->bf_state.bfs_rc[0].tries = try0;
1406 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY)
1407 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix,
1408 bf->bf_state.bfs_rc);
1409 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node));
1411 sc->sc_txrix = rix; /* for LED blinking */
1412 sc->sc_lastdatarix = rix; /* for fast frames */
1413 bf->bf_state.bfs_try0 = try0;
1414 bf->bf_state.bfs_txrate0 = rate;
1418 * Update the CLRDMASK bit in the ath_buf if it needs to be set.
1421 ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
1424 struct ath_node *an = ATH_NODE(bf->bf_node);
1426 ATH_TX_LOCK_ASSERT(sc);
1428 if (an->clrdmask == 1) {
1429 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1435 * Return whether this frame should be software queued or
1436 * direct dispatched.
1438 * When doing powersave, BAR frames should be queued but other management
1439 * frames should be directly sent.
1441 * When not doing powersave, stick BAR frames into the hardware queue
1442 * so it goes out even though the queue is paused.
1444 * For now, management frames are also software queued by default.
1447 ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
1448 struct mbuf *m0, int *queue_to_head)
1450 struct ieee80211_node *ni = &an->an_node;
1451 struct ieee80211_frame *wh;
1452 uint8_t type, subtype;
1454 wh = mtod(m0, struct ieee80211_frame *);
1455 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1456 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1458 (*queue_to_head) = 0;
1460 /* If it's not in powersave - direct-dispatch BAR */
1461 if ((ATH_NODE(ni)->an_is_powersave == 0)
1462 && type == IEEE80211_FC0_TYPE_CTL &&
1463 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1464 DPRINTF(sc, ATH_DEBUG_SW_TX,
1465 "%s: BAR: TX'ing direct\n", __func__);
1467 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1468 && type == IEEE80211_FC0_TYPE_CTL &&
1469 subtype == IEEE80211_FC0_SUBTYPE_BAR) {
1470 /* BAR TX whilst asleep; queue */
1471 DPRINTF(sc, ATH_DEBUG_SW_TX,
1472 "%s: swq: TX'ing\n", __func__);
1473 (*queue_to_head) = 1;
1475 } else if ((ATH_NODE(ni)->an_is_powersave == 1)
1476 && (type == IEEE80211_FC0_TYPE_MGT ||
1477 type == IEEE80211_FC0_TYPE_CTL)) {
1479 * Other control/mgmt frame; bypass software queuing
1482 DPRINTF(sc, ATH_DEBUG_XMIT,
1483 "%s: %6D: Node is asleep; sending mgmt "
1484 "(type=%d, subtype=%d)\n",
1485 __func__, ni->ni_macaddr, ":", type, subtype);
1494 * Transmit the given frame to the hardware.
1496 * The frame must already be setup; rate control must already have
1499 * XXX since the TXQ lock is being held here (and I dislike holding
1500 * it for this long when not doing software aggregation), later on
1501 * break this function into "setup_normal" and "xmit_normal". The
1502 * lock only needs to be held for the ath_tx_handoff call.
1504 * XXX we don't update the leak count here - if we're doing
1505 * direct frame dispatch, we need to be able to do it without
1506 * decrementing the leak count (eg multicast queue frames.)
1509 ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
1512 struct ath_node *an = ATH_NODE(bf->bf_node);
1513 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
1515 ATH_TX_LOCK_ASSERT(sc);
1518 * For now, just enable CLRDMASK. ath_tx_xmit_normal() does
1519 * set a completion handler however it doesn't (yet) properly
1520 * handle the strict ordering requirements needed for normal,
1521 * non-aggregate session frames.
1523 * Once this is implemented, only set CLRDMASK like this for
1524 * frames that must go out - eg management/raw frames.
1526 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
1528 /* Setup the descriptor before handoff */
1529 ath_tx_do_ratelookup(sc, bf);
1530 ath_tx_calc_duration(sc, bf);
1531 ath_tx_calc_protection(sc, bf);
1532 ath_tx_set_rtscts(sc, bf);
1533 ath_tx_rate_fill_rcflags(sc, bf);
1534 ath_tx_setds(sc, bf);
1536 /* Track per-TID hardware queue depth correctly */
1539 /* Assign the completion handler */
1540 bf->bf_comp = ath_tx_normal_comp;
1542 /* Hand off to hardware */
1543 ath_tx_handoff(sc, txq, bf);
1547 * Do the basic frame setup stuff that's required before the frame
1548 * is added to a software queue.
1550 * All frames get mostly the same treatment and it's done once.
1551 * Retransmits fiddle with things like the rate control setup,
1552 * setting the retransmit bit in the packet; doing relevant DMA/bus
1553 * syncing and relinking it (back) into the hardware TX queue.
1555 * Note that this may cause the mbuf to be reallocated, so
1556 * m0 may not be valid.
1559 ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
1560 struct ath_buf *bf, struct mbuf *m0, struct ath_txq *txq)
1562 struct ieee80211vap *vap = ni->ni_vap;
1563 struct ath_hal *ah = sc->sc_ah;
1564 struct ifnet *ifp = sc->sc_ifp;
1565 struct ieee80211com *ic = ifp->if_l2com;
1566 const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
1567 int error, iswep, ismcast, isfrag, ismrr;
1568 int keyix, hdrlen, pktlen, try0 = 0;
1569 u_int8_t rix = 0, txrate = 0;
1570 struct ath_desc *ds;
1571 struct ieee80211_frame *wh;
1572 u_int subtype, flags;
1574 const HAL_RATE_TABLE *rt;
1575 HAL_BOOL shortPreamble;
1576 struct ath_node *an;
1580 * To ensure that both sequence numbers and the CCMP PN handling
1581 * is "correct", make sure that the relevant TID queue is locked.
1582 * Otherwise the CCMP PN and seqno may appear out of order, causing
1583 * re-ordered frames to have out of order CCMP PN's, resulting
1584 * in many, many frame drops.
1586 ATH_TX_LOCK_ASSERT(sc);
1588 wh = mtod(m0, struct ieee80211_frame *);
1589 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1590 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1591 isfrag = m0->m_flags & M_FRAG;
1592 hdrlen = ieee80211_anyhdrsize(wh);
1594 * Packet length must not include any
1595 * pad bytes; deduct them here.
1597 pktlen = m0->m_pkthdr.len - (hdrlen & 3);
1599 /* Handle encryption twiddling if needed */
1600 if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
1606 /* packet header may have moved, reset our local pointer */
1607 wh = mtod(m0, struct ieee80211_frame *);
1609 pktlen += IEEE80211_CRC_LEN;
1612 * Load the DMA map so any coalescing is done. This
1613 * also calculates the number of descriptors we need.
1615 error = ath_tx_dmasetup(sc, bf, m0);
1618 bf->bf_node = ni; /* NB: held reference */
1619 m0 = bf->bf_m; /* NB: may have changed */
1620 wh = mtod(m0, struct ieee80211_frame *);
1622 /* setup descriptors */
1624 rt = sc->sc_currates;
1625 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
1628 * NB: the 802.11 layer marks whether or not we should
1629 * use short preamble based on the current mode and
1630 * negotiated parameters.
1632 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1633 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
1634 shortPreamble = AH_TRUE;
1635 sc->sc_stats.ast_tx_shortpre++;
1637 shortPreamble = AH_FALSE;
1641 //flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
1643 ismrr = 0; /* default no multi-rate retry*/
1644 pri = M_WME_GETAC(m0); /* honor classification */
1645 /* XXX use txparams instead of fixed values */
1647 * Calculate Atheros packet type from IEEE80211 packet header,
1648 * setup for rate calculations, and select h/w transmit queue.
1650 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1651 case IEEE80211_FC0_TYPE_MGT:
1652 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1653 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1654 atype = HAL_PKT_TYPE_BEACON;
1655 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1656 atype = HAL_PKT_TYPE_PROBE_RESP;
1657 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1658 atype = HAL_PKT_TYPE_ATIM;
1660 atype = HAL_PKT_TYPE_NORMAL; /* XXX */
1661 rix = an->an_mgmtrix;
1662 txrate = rt->info[rix].rateCode;
1664 txrate |= rt->info[rix].shortPreamble;
1665 try0 = ATH_TXMGTTRY;
1666 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1668 case IEEE80211_FC0_TYPE_CTL:
1669 atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */
1670 rix = an->an_mgmtrix;
1671 txrate = rt->info[rix].rateCode;
1673 txrate |= rt->info[rix].shortPreamble;
1674 try0 = ATH_TXMGTTRY;
1675 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
1677 case IEEE80211_FC0_TYPE_DATA:
1678 atype = HAL_PKT_TYPE_NORMAL; /* default */
1680 * Data frames: multicast frames go out at a fixed rate,
1681 * EAPOL frames use the mgmt frame rate; otherwise consult
1682 * the rate control module for the rate to use.
1685 rix = an->an_mcastrix;
1686 txrate = rt->info[rix].rateCode;
1688 txrate |= rt->info[rix].shortPreamble;
1690 } else if (m0->m_flags & M_EAPOL) {
1691 /* XXX? maybe always use long preamble? */
1692 rix = an->an_mgmtrix;
1693 txrate = rt->info[rix].rateCode;
1695 txrate |= rt->info[rix].shortPreamble;
1696 try0 = ATH_TXMAXTRY; /* XXX?too many? */
1699 * Do rate lookup on each TX, rather than using
1700 * the hard-coded TX information decided here.
1703 bf->bf_state.bfs_doratelookup = 1;
1705 if (cap->cap_wmeParams[pri].wmep_noackPolicy)
1706 flags |= HAL_TXDESC_NOACK;
1709 if_printf(ifp, "bogus frame type 0x%x (%s)\n",
1710 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
1712 /* XXX free tx dmamap */
1718 * There are two known scenarios where the frame AC doesn't match
1719 * what the destination TXQ is.
1721 * + non-QoS frames (eg management?) that the net80211 stack has
1722 * assigned a higher AC to, but since it's a non-QoS TID, it's
1723 * being thrown into TID 16. TID 16 gets the AC_BE queue.
1724 * It's quite possible that management frames should just be
1725 * direct dispatched to hardware rather than go via the software
1726 * queue; that should be investigated in the future. There are
1727 * some specific scenarios where this doesn't make sense, mostly
1728 * surrounding ADDBA request/response - hence why that is special
1731 * + Multicast frames going into the VAP mcast queue. That shows up
1734 * This driver should eventually support separate TID and TXQ locking,
1735 * allowing for arbitrary AC frames to appear on arbitrary software
1736 * queues, being queued to the "correct" hardware queue when needed.
1739 if (txq != sc->sc_ac2q[pri]) {
1740 DPRINTF(sc, ATH_DEBUG_XMIT,
1741 "%s: txq=%p (%d), pri=%d, pri txq=%p (%d)\n",
1747 sc->sc_ac2q[pri]->axq_qnum);
1752 * Calculate miscellaneous flags.
1755 flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */
1756 } else if (pktlen > vap->iv_rtsthreshold &&
1757 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
1758 flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */
1759 sc->sc_stats.ast_tx_rts++;
1761 if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */
1762 sc->sc_stats.ast_tx_noack++;
1763 #ifdef IEEE80211_SUPPORT_TDMA
1764 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
1765 DPRINTF(sc, ATH_DEBUG_TDMA,
1766 "%s: discard frame, ACK required w/ TDMA\n", __func__);
1767 sc->sc_stats.ast_tdma_ack++;
1768 /* XXX free tx dmamap */
1775 * Determine if a tx interrupt should be generated for
1776 * this descriptor. We take a tx interrupt to reap
1777 * descriptors when the h/w hits an EOL condition or
1778 * when the descriptor is specifically marked to generate
1779 * an interrupt. We periodically mark descriptors in this
1780 * way to insure timely replenishing of the supply needed
1781 * for sending frames. Defering interrupts reduces system
1782 * load and potentially allows more concurrent work to be
1783 * done but if done to aggressively can cause senders to
1786 * NB: use >= to deal with sc_txintrperiod changing
1787 * dynamically through sysctl.
1789 if (flags & HAL_TXDESC_INTREQ) {
1790 txq->axq_intrcnt = 0;
1791 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
1792 flags |= HAL_TXDESC_INTREQ;
1793 txq->axq_intrcnt = 0;
1796 /* This point forward is actual TX bits */
1799 * At this point we are committed to sending the frame
1800 * and we don't need to look at m_nextpkt; clear it in
1801 * case this frame is part of frag chain.
1803 m0->m_nextpkt = NULL;
1805 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
1806 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
1807 sc->sc_hwmap[rix].ieeerate, -1);
1809 if (ieee80211_radiotap_active_vap(vap)) {
1810 u_int64_t tsf = ath_hal_gettsf64(ah);
1812 sc->sc_tx_th.wt_tsf = htole64(tsf);
1813 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
1815 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
1817 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
1818 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
1819 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni);
1820 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
1822 ieee80211_radiotap_tx(vap, m0);
1825 /* Blank the legacy rate array */
1826 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
1829 * ath_buf_set_rate needs at least one rate/try to setup
1830 * the rate scenario.
1832 bf->bf_state.bfs_rc[0].rix = rix;
1833 bf->bf_state.bfs_rc[0].tries = try0;
1834 bf->bf_state.bfs_rc[0].ratecode = txrate;
1836 /* Store the decided rate index values away */
1837 bf->bf_state.bfs_pktlen = pktlen;
1838 bf->bf_state.bfs_hdrlen = hdrlen;
1839 bf->bf_state.bfs_atype = atype;
1840 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni);
1841 bf->bf_state.bfs_txrate0 = txrate;
1842 bf->bf_state.bfs_try0 = try0;
1843 bf->bf_state.bfs_keyix = keyix;
1844 bf->bf_state.bfs_txantenna = sc->sc_txantenna;
1845 bf->bf_state.bfs_txflags = flags;
1846 bf->bf_state.bfs_shpream = shortPreamble;
1848 /* XXX this should be done in ath_tx_setrate() */
1849 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */
1850 bf->bf_state.bfs_ctsrate = 0; /* calculated later */
1851 bf->bf_state.bfs_ctsduration = 0;
1852 bf->bf_state.bfs_ismrr = ismrr;
1858 * Queue a frame to the hardware or software queue.
1860 * This can be called by the net80211 code.
1862 * XXX what about locking? Or, push the seqno assign into the
1863 * XXX aggregate scheduler so its serialised?
1865 * XXX When sending management frames via ath_raw_xmit(),
1866 * should CLRDMASK be set unconditionally?
1869 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
1870 struct ath_buf *bf, struct mbuf *m0)
1872 struct ieee80211vap *vap = ni->ni_vap;
1873 struct ath_vap *avp = ATH_VAP(vap);
1877 struct ath_txq *txq;
1879 const struct ieee80211_frame *wh;
1880 int is_ampdu, is_ampdu_tx, is_ampdu_pending;
1881 ieee80211_seq seqno;
1882 uint8_t type, subtype;
1885 ATH_TX_LOCK_ASSERT(sc);
1888 * Determine the target hardware queue.
1890 * For multicast frames, the txq gets overridden appropriately
1891 * depending upon the state of PS.
1893 * For any other frame, we do a TID/QoS lookup inside the frame
1894 * to see what the TID should be. If it's a non-QoS frame, the
1895 * AC and TID are overridden. The TID/TXQ code assumes the
1896 * TID is on a predictable hardware TXQ, so we don't support
1897 * having a node TID queued to multiple hardware TXQs.
1898 * This may change in the future but would require some locking
1901 pri = ath_tx_getac(sc, m0);
1902 tid = ath_tx_gettid(sc, m0);
1904 txq = sc->sc_ac2q[pri];
1905 wh = mtod(m0, struct ieee80211_frame *);
1906 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
1907 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
1908 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1911 * Enforce how deep the multicast queue can grow.
1913 * XXX duplicated in ath_raw_xmit().
1915 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1916 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
1917 > sc->sc_txq_mcastq_maxdepth) {
1918 sc->sc_stats.ast_tx_mcastq_overflow++;
1925 * Enforce how deep the unicast queue can grow.
1927 * If the node is in power save then we don't want
1928 * the software queue to grow too deep, or a node may
1929 * end up consuming all of the ath_buf entries.
1931 * For now, only do this for DATA frames.
1933 * We will want to cap how many management/control
1934 * frames get punted to the software queue so it doesn't
1935 * fill up. But the correct solution isn't yet obvious.
1936 * In any case, this check should at least let frames pass
1937 * that we are direct-dispatching.
1939 * XXX TODO: duplicate this to the raw xmit path!
1941 if (type == IEEE80211_FC0_TYPE_DATA &&
1942 ATH_NODE(ni)->an_is_powersave &&
1943 ATH_NODE(ni)->an_swq_depth >
1944 sc->sc_txq_node_psq_maxdepth) {
1945 sc->sc_stats.ast_tx_node_psq_overflow++;
1951 is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
1952 is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
1953 is_ampdu = is_ampdu_tx | is_ampdu_pending;
1955 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ac=%d, is_ampdu=%d\n",
1956 __func__, tid, pri, is_ampdu);
1958 /* Set local packet state, used to queue packets to hardware */
1959 bf->bf_state.bfs_tid = tid;
1960 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
1961 bf->bf_state.bfs_pri = pri;
1965 * When servicing one or more stations in power-save mode
1966 * (or) if there is some mcast data waiting on the mcast
1967 * queue (to prevent out of order delivery) multicast frames
1968 * must be bufferd until after the beacon.
1970 * TODO: we should lock the mcastq before we check the length.
1972 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) {
1973 txq = &avp->av_mcastq;
1975 * Mark the frame as eventually belonging on the CAB
1976 * queue, so the descriptor setup functions will
1977 * correctly initialise the descriptor 'qcuId' field.
1979 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum;
1983 /* Do the generic frame setup */
1984 /* XXX should just bzero the bf_state? */
1985 bf->bf_state.bfs_dobaw = 0;
1987 /* A-MPDU TX? Manually set sequence number */
1989 * Don't do it whilst pending; the net80211 layer still
1994 * Always call; this function will
1995 * handle making sure that null data frames
1996 * don't get a sequence number from the current
1997 * TID and thus mess with the BAW.
1999 seqno = ath_tx_tid_seqno_assign(sc, ni, bf, m0);
2002 * Don't add QoS NULL frames to the BAW.
2004 if (IEEE80211_QOS_HAS_SEQ(wh) &&
2005 subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2006 bf->bf_state.bfs_dobaw = 1;
2011 * If needed, the sequence number has been assigned.
2012 * Squirrel it away somewhere easy to get to.
2014 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT;
2016 /* Is ampdu pending? fetch the seqno and print it out */
2017 if (is_ampdu_pending)
2018 DPRINTF(sc, ATH_DEBUG_SW_TX,
2019 "%s: tid %d: ampdu pending, seqno %d\n",
2020 __func__, tid, M_SEQNO_GET(m0));
2022 /* This also sets up the DMA map */
2023 r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
2028 /* At this point m0 could have changed! */
2033 * If it's a multicast frame, do a direct-dispatch to the
2034 * destination hardware queue. Don't bother software
2038 * If it's a BAR frame, do a direct dispatch to the
2039 * destination hardware queue. Don't bother software
2040 * queuing it, as the TID will now be paused.
2041 * Sending a BAR frame can occur from the net80211 txa timer
2042 * (ie, retries) or from the ath txtask (completion call.)
2043 * It queues directly to hardware because the TID is paused
2044 * at this point (and won't be unpaused until the BAR has
2045 * either been TXed successfully or max retries has been
2049 * Until things are better debugged - if this node is asleep
2050 * and we're sending it a non-BAR frame, direct dispatch it.
2051 * Why? Because we need to figure out what's actually being
2052 * sent - eg, during reassociation/reauthentication after
2053 * the node (last) disappeared whilst asleep, the driver should
2054 * have unpaused/unsleep'ed the node. So until that is
2055 * sorted out, use this workaround.
2057 if (txq == &avp->av_mcastq) {
2058 DPRINTF(sc, ATH_DEBUG_SW_TX,
2059 "%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
2060 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2061 ath_tx_xmit_normal(sc, txq, bf);
2062 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2064 ath_tx_swq(sc, ni, txq, queue_to_head, bf);
2066 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2067 ath_tx_xmit_normal(sc, txq, bf);
2071 * For now, since there's no software queue,
2072 * direct-dispatch to the hardware.
2074 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2076 * Update the current leak count if
2077 * we're leaking frames; and set the
2078 * MORE flag as appropriate.
2080 ath_tx_leak_count_update(sc, tid, bf);
2081 ath_tx_xmit_normal(sc, txq, bf);
2088 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
2089 struct ath_buf *bf, struct mbuf *m0,
2090 const struct ieee80211_bpf_params *params)
2092 struct ifnet *ifp = sc->sc_ifp;
2093 struct ieee80211com *ic = ifp->if_l2com;
2094 struct ath_hal *ah = sc->sc_ah;
2095 struct ieee80211vap *vap = ni->ni_vap;
2096 int error, ismcast, ismrr;
2097 int keyix, hdrlen, pktlen, try0, txantenna;
2098 u_int8_t rix, txrate;
2099 struct ieee80211_frame *wh;
2102 const HAL_RATE_TABLE *rt;
2103 struct ath_desc *ds;
2107 uint8_t type, subtype;
2110 ATH_TX_LOCK_ASSERT(sc);
2112 wh = mtod(m0, struct ieee80211_frame *);
2113 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
2114 hdrlen = ieee80211_anyhdrsize(wh);
2116 * Packet length must not include any
2117 * pad bytes; deduct them here.
2119 /* XXX honor IEEE80211_BPF_DATAPAD */
2120 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
2122 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2123 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2125 ATH_KTR(sc, ATH_KTR_TX, 2,
2126 "ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
2128 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: ismcast=%d\n",
2131 pri = params->ibp_pri & 3;
2132 /* Override pri if the frame isn't a QoS one */
2133 if (! IEEE80211_QOS_HAS_SEQ(wh))
2134 pri = ath_tx_getac(sc, m0);
2136 /* XXX If it's an ADDBA, override the correct queue */
2137 do_override = ath_tx_action_frame_override_queue(sc, ni, m0, &o_tid);
2139 /* Map ADDBA to the correct priority */
2142 DPRINTF(sc, ATH_DEBUG_XMIT,
2143 "%s: overriding tid %d pri %d -> %d\n",
2144 __func__, o_tid, pri, TID_TO_WME_AC(o_tid));
2146 pri = TID_TO_WME_AC(o_tid);
2149 /* Handle encryption twiddling if needed */
2150 if (! ath_tx_tag_crypto(sc, ni,
2151 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
2152 &hdrlen, &pktlen, &keyix)) {
2156 /* packet header may have moved, reset our local pointer */
2157 wh = mtod(m0, struct ieee80211_frame *);
2159 /* Do the generic frame setup */
2160 /* XXX should just bzero the bf_state? */
2161 bf->bf_state.bfs_dobaw = 0;
2163 error = ath_tx_dmasetup(sc, bf, m0);
2166 m0 = bf->bf_m; /* NB: may have changed */
2167 wh = mtod(m0, struct ieee80211_frame *);
2168 bf->bf_node = ni; /* NB: held reference */
2170 /* Always enable CLRDMASK for raw frames for now.. */
2171 flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */
2172 flags |= HAL_TXDESC_INTREQ; /* force interrupt */
2173 if (params->ibp_flags & IEEE80211_BPF_RTS)
2174 flags |= HAL_TXDESC_RTSENA;
2175 else if (params->ibp_flags & IEEE80211_BPF_CTS) {
2176 /* XXX assume 11g/11n protection? */
2177 bf->bf_state.bfs_doprot = 1;
2178 flags |= HAL_TXDESC_CTSENA;
2180 /* XXX leave ismcast to injector? */
2181 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
2182 flags |= HAL_TXDESC_NOACK;
2184 rt = sc->sc_currates;
2185 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
2186 rix = ath_tx_findrix(sc, params->ibp_rate0);
2187 txrate = rt->info[rix].rateCode;
2188 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
2189 txrate |= rt->info[rix].shortPreamble;
2191 try0 = params->ibp_try0;
2192 ismrr = (params->ibp_try1 != 0);
2193 txantenna = params->ibp_pri >> 2;
2194 if (txantenna == 0) /* XXX? */
2195 txantenna = sc->sc_txantenna;
2198 * Since ctsrate is fixed, store it away for later
2199 * use when the descriptor fields are being set.
2201 if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA))
2202 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate;
2205 * NB: we mark all packets as type PSPOLL so the h/w won't
2206 * set the sequence number, duration, etc.
2208 atype = HAL_PKT_TYPE_PSPOLL;
2210 if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
2211 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
2212 sc->sc_hwmap[rix].ieeerate, -1);
2214 if (ieee80211_radiotap_active_vap(vap)) {
2215 u_int64_t tsf = ath_hal_gettsf64(ah);
2217 sc->sc_tx_th.wt_tsf = htole64(tsf);
2218 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
2219 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2220 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2221 if (m0->m_flags & M_FRAG)
2222 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
2223 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
2224 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power,
2225 ieee80211_get_node_txpower(ni));
2226 sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
2228 ieee80211_radiotap_tx(vap, m0);
2232 * Formulate first tx descriptor with tx controls.
2235 /* XXX check return value? */
2237 /* Store the decided rate index values away */
2238 bf->bf_state.bfs_pktlen = pktlen;
2239 bf->bf_state.bfs_hdrlen = hdrlen;
2240 bf->bf_state.bfs_atype = atype;
2241 bf->bf_state.bfs_txpower = MIN(params->ibp_power,
2242 ieee80211_get_node_txpower(ni));
2243 bf->bf_state.bfs_txrate0 = txrate;
2244 bf->bf_state.bfs_try0 = try0;
2245 bf->bf_state.bfs_keyix = keyix;
2246 bf->bf_state.bfs_txantenna = txantenna;
2247 bf->bf_state.bfs_txflags = flags;
2248 bf->bf_state.bfs_shpream =
2249 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE);
2251 /* Set local packet state, used to queue packets to hardware */
2252 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri);
2253 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum;
2254 bf->bf_state.bfs_pri = pri;
2256 /* XXX this should be done in ath_tx_setrate() */
2257 bf->bf_state.bfs_ctsrate = 0;
2258 bf->bf_state.bfs_ctsduration = 0;
2259 bf->bf_state.bfs_ismrr = ismrr;
2261 /* Blank the legacy rate array */
2262 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc));
2264 bf->bf_state.bfs_rc[0].rix =
2265 ath_tx_findrix(sc, params->ibp_rate0);
2266 bf->bf_state.bfs_rc[0].tries = try0;
2267 bf->bf_state.bfs_rc[0].ratecode = txrate;
2272 rix = ath_tx_findrix(sc, params->ibp_rate1);
2273 bf->bf_state.bfs_rc[1].rix = rix;
2274 bf->bf_state.bfs_rc[1].tries = params->ibp_try1;
2276 rix = ath_tx_findrix(sc, params->ibp_rate2);
2277 bf->bf_state.bfs_rc[2].rix = rix;
2278 bf->bf_state.bfs_rc[2].tries = params->ibp_try2;
2280 rix = ath_tx_findrix(sc, params->ibp_rate3);
2281 bf->bf_state.bfs_rc[3].rix = rix;
2282 bf->bf_state.bfs_rc[3].tries = params->ibp_try3;
2285 * All the required rate control decisions have been made;
2286 * fill in the rc flags.
2288 ath_tx_rate_fill_rcflags(sc, bf);
2290 /* NB: no buffered multicast in power save support */
2293 * If we're overiding the ADDBA destination, dump directly
2294 * into the hardware queue, right after any pending
2295 * frames to that node are.
2297 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: dooverride=%d\n",
2298 __func__, do_override);
2302 * Put addba frames in the right place in the right TID/HWQ.
2305 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2307 * XXX if it's addba frames, should we be leaking
2308 * them out via the frame leak method?
2309 * XXX for now let's not risk it; but we may wish
2310 * to investigate this later.
2312 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2313 } else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
2315 /* Queue to software queue */
2316 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
2318 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2319 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2322 /* Direct-dispatch to the hardware */
2323 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
2325 * Update the current leak count if
2326 * we're leaking frames; and set the
2327 * MORE flag as appropriate.
2329 ath_tx_leak_count_update(sc, tid, bf);
2330 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
2338 * This can be called by net80211.
2341 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
2342 const struct ieee80211_bpf_params *params)
2344 struct ieee80211com *ic = ni->ni_ic;
2345 struct ifnet *ifp = ic->ic_ifp;
2346 struct ath_softc *sc = ifp->if_softc;
2348 struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
2352 if (sc->sc_inreset_cnt > 0) {
2353 DPRINTF(sc, ATH_DEBUG_XMIT,
2354 "%s: sc_inreset_cnt > 0; bailing\n", __func__);
2359 sc->sc_txstart_cnt++;
2364 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
2365 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
2366 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
2367 "!running" : "invalid");
2374 * Enforce how deep the multicast queue can grow.
2376 * XXX duplicated in ath_tx_start().
2378 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2379 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth
2380 > sc->sc_txq_mcastq_maxdepth) {
2381 sc->sc_stats.ast_tx_mcastq_overflow++;
2392 * Grab a TX buffer and associated resources.
2394 bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
2396 sc->sc_stats.ast_tx_nobuf++;
2401 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: m=%p, params=%p, bf=%p\n",
2404 if (params == NULL) {
2406 * Legacy path; interpret frame contents to decide
2407 * precisely how to send the frame.
2409 if (ath_tx_start(sc, ni, bf, m)) {
2410 error = EIO; /* XXX */
2415 * Caller supplied explicit parameters to use in
2416 * sending the frame.
2418 if (ath_tx_raw_start(sc, ni, bf, m, params)) {
2419 error = EIO; /* XXX */
2423 sc->sc_wd_timer = 5;
2425 sc->sc_stats.ast_tx_raw++;
2428 * Update the TIM - if there's anything queued to the
2429 * software queue and power save is enabled, we should
2432 ath_tx_update_tim(sc, ni, 1);
2437 sc->sc_txstart_cnt--;
2442 ATH_KTR(sc, ATH_KTR_TX, 3, "ath_raw_xmit: bad2: m=%p, params=%p, "
2448 ath_returnbuf_head(sc, bf);
2449 ATH_TXBUF_UNLOCK(sc);
2455 sc->sc_txstart_cnt--;
2458 ATH_KTR(sc, ATH_KTR_TX, 2, "ath_raw_xmit: bad0: m=%p, params=%p",
2461 sc->sc_stats.ast_tx_raw_fail++;
2462 ieee80211_free_node(ni);
2467 /* Some helper functions */
2470 * ADDBA (and potentially others) need to be placed in the same
2471 * hardware queue as the TID/node it's relating to. This is so
2472 * it goes out after any pending non-aggregate frames to the
2475 * If this isn't done, the ADDBA can go out before the frames
2476 * queued in hardware. Even though these frames have a sequence
2477 * number -earlier- than the ADDBA can be transmitted (but
2478 * no frames whose sequence numbers are after the ADDBA should
2479 * be!) they'll arrive after the ADDBA - and the receiving end
2480 * will simply drop them as being out of the BAW.
2482 * The frames can't be appended to the TID software queue - it'll
2483 * never be sent out. So these frames have to be directly
2484 * dispatched to the hardware, rather than queued in software.
2485 * So if this function returns true, the TXQ has to be
2486 * overridden and it has to be directly dispatched.
2488 * It's a dirty hack, but someone's gotta do it.
2492 * XXX doesn't belong here!
2495 ieee80211_is_action(struct ieee80211_frame *wh)
2497 /* Type: Management frame? */
2498 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
2499 IEEE80211_FC0_TYPE_MGT)
2502 /* Subtype: Action frame? */
2503 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
2504 IEEE80211_FC0_SUBTYPE_ACTION)
2510 #define MS(_v, _f) (((_v) & _f) >> _f##_S)
2512 * Return an alternate TID for ADDBA request frames.
2514 * Yes, this likely should be done in the net80211 layer.
2517 ath_tx_action_frame_override_queue(struct ath_softc *sc,
2518 struct ieee80211_node *ni,
2519 struct mbuf *m0, int *tid)
2521 struct ieee80211_frame *wh = mtod(m0, struct ieee80211_frame *);
2522 struct ieee80211_action_ba_addbarequest *ia;
2524 uint16_t baparamset;
2526 /* Not action frame? Bail */
2527 if (! ieee80211_is_action(wh))
2530 /* XXX Not needed for frames we send? */
2532 /* Correct length? */
2533 if (! ieee80211_parse_action(ni, m))
2537 /* Extract out action frame */
2538 frm = (u_int8_t *)&wh[1];
2539 ia = (struct ieee80211_action_ba_addbarequest *) frm;
2541 /* Not ADDBA? Bail */
2542 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA)
2544 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST)
2547 /* Extract TID, return it */
2548 baparamset = le16toh(ia->rq_baparamset);
2549 *tid = (int) MS(baparamset, IEEE80211_BAPS_TID);
2555 /* Per-node software queue operations */
2558 * Add the current packet to the given BAW.
2559 * It is assumed that the current packet
2561 * + fits inside the BAW;
2562 * + already has had a sequence number allocated.
2564 * Since the BAW status may be modified by both the ath task and
2565 * the net80211/ifnet contexts, the TID must be locked.
2568 ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
2569 struct ath_tid *tid, struct ath_buf *bf)
2572 struct ieee80211_tx_ampdu *tap;
2574 ATH_TX_LOCK_ASSERT(sc);
2576 if (bf->bf_state.bfs_isretried)
2579 tap = ath_tx_get_tx_tid(an, tid->tid);
2581 if (! bf->bf_state.bfs_dobaw) {
2582 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2583 "%s: dobaw=0, seqno=%d, window %d:%d\n",
2584 __func__, SEQNO(bf->bf_state.bfs_seqno),
2585 tap->txa_start, tap->txa_wnd);
2588 if (bf->bf_state.bfs_addedbaw)
2589 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2590 "%s: re-added? tid=%d, seqno %d; window %d:%d; "
2591 "baw head=%d tail=%d\n",
2592 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2593 tap->txa_start, tap->txa_wnd, tid->baw_head,
2597 * Verify that the given sequence number is not outside of the
2598 * BAW. Complain loudly if that's the case.
2600 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2601 SEQNO(bf->bf_state.bfs_seqno))) {
2602 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2603 "%s: bf=%p: outside of BAW?? tid=%d, seqno %d; window %d:%d; "
2604 "baw head=%d tail=%d\n",
2605 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2606 tap->txa_start, tap->txa_wnd, tid->baw_head,
2611 * ni->ni_txseqs[] is the currently allocated seqno.
2612 * the txa state contains the current baw start.
2614 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno));
2615 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2616 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2617 "%s: tid=%d, seqno %d; window %d:%d; index=%d cindex=%d "
2618 "baw head=%d tail=%d\n",
2619 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno),
2620 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head,
2625 assert(tid->tx_buf[cindex] == NULL);
2627 if (tid->tx_buf[cindex] != NULL) {
2628 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2629 "%s: ba packet dup (index=%d, cindex=%d, "
2630 "head=%d, tail=%d)\n",
2631 __func__, index, cindex, tid->baw_head, tid->baw_tail);
2632 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2633 "%s: BA bf: %p; seqno=%d ; new bf: %p; seqno=%d\n",
2635 tid->tx_buf[cindex],
2636 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno),
2638 SEQNO(bf->bf_state.bfs_seqno)
2641 tid->tx_buf[cindex] = bf;
2643 if (index >= ((tid->baw_tail - tid->baw_head) &
2644 (ATH_TID_MAX_BUFS - 1))) {
2645 tid->baw_tail = cindex;
2646 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
2651 * Flip the BAW buffer entry over from the existing one to the new one.
2653 * When software retransmitting a (sub-)frame, it is entirely possible that
2654 * the frame ath_buf is marked as BUSY and can't be immediately reused.
2655 * In that instance the buffer is cloned and the new buffer is used for
2656 * retransmit. We thus need to update the ath_buf slot in the BAW buf
2657 * tracking array to maintain consistency.
2660 ath_tx_switch_baw_buf(struct ath_softc *sc, struct ath_node *an,
2661 struct ath_tid *tid, struct ath_buf *old_bf, struct ath_buf *new_bf)
2664 struct ieee80211_tx_ampdu *tap;
2665 int seqno = SEQNO(old_bf->bf_state.bfs_seqno);
2667 ATH_TX_LOCK_ASSERT(sc);
2669 tap = ath_tx_get_tx_tid(an, tid->tid);
2670 index = ATH_BA_INDEX(tap->txa_start, seqno);
2671 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2674 * Just warn for now; if it happens then we should find out
2675 * about it. It's highly likely the aggregation session will
2678 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) {
2679 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2680 "%s: retransmitted buffer"
2681 " has mismatching seqno's, BA session may hang.\n",
2683 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2684 "%s: old seqno=%d, new_seqno=%d\n", __func__,
2685 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno);
2688 if (tid->tx_buf[cindex] != old_bf) {
2689 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2690 "%s: ath_buf pointer incorrect; "
2691 " has m BA session may hang.\n", __func__);
2692 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2693 "%s: old bf=%p, new bf=%p\n", __func__, old_bf, new_bf);
2696 tid->tx_buf[cindex] = new_bf;
2700 * seq_start - left edge of BAW
2701 * seq_next - current/next sequence number to allocate
2703 * Since the BAW status may be modified by both the ath task and
2704 * the net80211/ifnet contexts, the TID must be locked.
2707 ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
2708 struct ath_tid *tid, const struct ath_buf *bf)
2711 struct ieee80211_tx_ampdu *tap;
2712 int seqno = SEQNO(bf->bf_state.bfs_seqno);
2714 ATH_TX_LOCK_ASSERT(sc);
2716 tap = ath_tx_get_tx_tid(an, tid->tid);
2717 index = ATH_BA_INDEX(tap->txa_start, seqno);
2718 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
2720 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2721 "%s: tid=%d, baw=%d:%d, seqno=%d, index=%d, cindex=%d, "
2722 "baw head=%d, tail=%d\n",
2723 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index,
2724 cindex, tid->baw_head, tid->baw_tail);
2727 * If this occurs then we have a big problem - something else
2728 * has slid tap->txa_start along without updating the BAW
2729 * tracking start/end pointers. Thus the TX BAW state is now
2730 * completely busted.
2732 * But for now, since I haven't yet fixed TDMA and buffer cloning,
2733 * it's quite possible that a cloned buffer is making its way
2734 * here and causing it to fire off. Disable TDMA for now.
2736 if (tid->tx_buf[cindex] != bf) {
2737 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2738 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n",
2739 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
2740 tid->tx_buf[cindex],
2741 (tid->tx_buf[cindex] != NULL) ?
2742 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1);
2745 tid->tx_buf[cindex] = NULL;
2747 while (tid->baw_head != tid->baw_tail &&
2748 !tid->tx_buf[tid->baw_head]) {
2749 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
2750 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
2752 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
2753 "%s: baw is now %d:%d, baw head=%d\n",
2754 __func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
2758 ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
2761 struct ieee80211_frame *wh;
2763 ATH_TX_LOCK_ASSERT(sc);
2765 if (tid->an->an_leak_count > 0) {
2766 wh = mtod(bf->bf_m, struct ieee80211_frame *);
2769 * Update MORE based on the software/net80211 queue states.
2771 if ((tid->an->an_stack_psq > 0)
2772 || (tid->an->an_swq_depth > 0))
2773 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
2775 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
2777 DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
2778 "%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
2780 tid->an->an_node.ni_macaddr,
2782 tid->an->an_leak_count,
2783 tid->an->an_stack_psq,
2784 tid->an->an_swq_depth,
2785 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
2788 * Re-sync the underlying buffer.
2790 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
2791 BUS_DMASYNC_PREWRITE);
2793 tid->an->an_leak_count --;
2798 ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
2801 ATH_TX_LOCK_ASSERT(sc);
2803 if (tid->an->an_leak_count > 0) {
2812 * Mark the current node/TID as ready to TX.
2814 * This is done to make it easy for the software scheduler to
2815 * find which nodes have data to send.
2817 * The TXQ lock must be held.
2820 ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
2822 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2824 ATH_TX_LOCK_ASSERT(sc);
2827 * If we are leaking out a frame to this destination
2828 * for PS-POLL, ensure that we allow scheduling to
2831 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
2832 return; /* paused, can't schedule yet */
2835 return; /* already scheduled */
2841 * If this is a sleeping node we're leaking to, given
2842 * it a higher priority. This is so bad for QoS it hurts.
2844 if (tid->an->an_leak_count) {
2845 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
2847 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2852 * We can't do the above - it'll confuse the TXQ software
2853 * scheduler which will keep checking the _head_ TID
2854 * in the list to see if it has traffic. If we queue
2855 * a TID to the head of the list and it doesn't transmit,
2856 * we'll check it again.
2858 * So, get the rest of this leaking frames support working
2859 * and reliable first and _then_ optimise it so they're
2860 * pushed out in front of any other pending software
2863 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
2867 * Mark the current node as no longer needing to be polled for
2870 * The TXQ lock must be held.
2873 ath_tx_tid_unsched(struct ath_softc *sc, struct ath_tid *tid)
2875 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
2877 ATH_TX_LOCK_ASSERT(sc);
2879 if (tid->sched == 0)
2883 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem);
2887 * Assign a sequence number manually to the given frame.
2889 * This should only be called for A-MPDU TX frames.
2891 static ieee80211_seq
2892 ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
2893 struct ath_buf *bf, struct mbuf *m0)
2895 struct ieee80211_frame *wh;
2897 ieee80211_seq seqno;
2901 wh = mtod(m0, struct ieee80211_frame *);
2902 pri = M_WME_GETAC(m0); /* honor classification */
2903 tid = WME_AC_TO_TID(pri);
2904 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pri=%d, tid=%d, qos has seq=%d\n",
2905 __func__, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
2907 /* XXX Is it a control frame? Ignore */
2909 /* Does the packet require a sequence number? */
2910 if (! IEEE80211_QOS_HAS_SEQ(wh))
2913 ATH_TX_LOCK_ASSERT(sc);
2916 * Is it a QOS NULL Data frame? Give it a sequence number from
2917 * the default TID (IEEE80211_NONQOS_TID.)
2919 * The RX path of everything I've looked at doesn't include the NULL
2920 * data frame sequence number in the aggregation state updates, so
2921 * assigning it a sequence number there will cause a BAW hole on the
2924 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2925 if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
2926 /* XXX no locking for this TID? This is a bit of a problem. */
2927 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
2928 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
2930 /* Manually assign sequence number */
2931 seqno = ni->ni_txseqs[tid];
2932 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE);
2934 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
2935 M_SEQNO_SET(m0, seqno);
2937 /* Return so caller can do something with it if needed */
2938 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: -> seqno=%d\n", __func__, seqno);
2943 * Attempt to direct dispatch an aggregate frame to hardware.
2944 * If the frame is out of BAW, queue.
2945 * Otherwise, schedule it as a single frame.
2948 ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
2949 struct ath_txq *txq, struct ath_buf *bf)
2951 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid];
2952 struct ieee80211_tx_ampdu *tap;
2954 ATH_TX_LOCK_ASSERT(sc);
2956 tap = ath_tx_get_tx_tid(an, tid->tid);
2959 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
2960 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2961 /* XXX don't sched - we're paused! */
2965 /* outside baw? queue */
2966 if (bf->bf_state.bfs_dobaw &&
2967 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
2968 SEQNO(bf->bf_state.bfs_seqno)))) {
2969 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
2970 ath_tx_tid_sched(sc, tid);
2975 * This is a temporary check and should be removed once
2976 * all the relevant code paths have been fixed.
2978 * During aggregate retries, it's possible that the head
2979 * frame will fail (which has the bfs_aggr and bfs_nframes
2980 * fields set for said aggregate) and will be retried as
2981 * a single frame. In this instance, the values should
2982 * be reset or the completion code will get upset with you.
2984 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) {
2985 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
2986 "%s: bfs_aggr=%d, bfs_nframes=%d\n", __func__,
2987 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes);
2988 bf->bf_state.bfs_aggr = 0;
2989 bf->bf_state.bfs_nframes = 1;
2992 /* Update CLRDMASK just before this frame is queued */
2993 ath_tx_update_clrdmask(sc, tid, bf);
2995 /* Direct dispatch to hardware */
2996 ath_tx_do_ratelookup(sc, bf);
2997 ath_tx_calc_duration(sc, bf);
2998 ath_tx_calc_protection(sc, bf);
2999 ath_tx_set_rtscts(sc, bf);
3000 ath_tx_rate_fill_rcflags(sc, bf);
3001 ath_tx_setds(sc, bf);
3004 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++;
3006 /* Track per-TID hardware queue depth correctly */
3010 if (bf->bf_state.bfs_dobaw) {
3011 ath_tx_addto_baw(sc, an, tid, bf);
3012 bf->bf_state.bfs_addedbaw = 1;
3015 /* Set completion handler, multi-frame aggregate or not */
3016 bf->bf_comp = ath_tx_aggr_comp;
3019 * Update the current leak count if
3020 * we're leaking frames; and set the
3021 * MORE flag as appropriate.
3023 ath_tx_leak_count_update(sc, tid, bf);
3025 /* Hand off to hardware */
3026 ath_tx_handoff(sc, txq, bf);
3030 * Attempt to send the packet.
3031 * If the queue isn't busy, direct-dispatch.
3032 * If the queue is busy enough, queue the given packet on the
3033 * relevant software queue.
3036 ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
3037 struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
3039 struct ath_node *an = ATH_NODE(ni);
3040 struct ieee80211_frame *wh;
3041 struct ath_tid *atid;
3043 struct mbuf *m0 = bf->bf_m;
3045 ATH_TX_LOCK_ASSERT(sc);
3047 /* Fetch the TID - non-QoS frames get assigned to TID 16 */
3048 wh = mtod(m0, struct ieee80211_frame *);
3049 pri = ath_tx_getac(sc, m0);
3050 tid = ath_tx_gettid(sc, m0);
3051 atid = &an->an_tid[tid];
3053 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p, pri=%d, tid=%d, qos=%d\n",
3054 __func__, bf, pri, tid, IEEE80211_QOS_HAS_SEQ(wh));
3056 /* Set local packet state, used to queue packets to hardware */
3057 /* XXX potentially duplicate info, re-check */
3058 bf->bf_state.bfs_tid = tid;
3059 bf->bf_state.bfs_tx_queue = txq->axq_qnum;
3060 bf->bf_state.bfs_pri = pri;
3063 * If the hardware queue isn't busy, queue it directly.
3064 * If the hardware queue is busy, queue it.
3065 * If the TID is paused or the traffic it outside BAW, software
3068 * If the node is in power-save and we're leaking a frame,
3069 * leak a single frame.
3071 if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
3072 /* TID is paused, queue */
3073 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
3075 * If the caller requested that it be sent at a high
3076 * priority, queue it at the head of the list.
3079 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
3081 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3082 } else if (ath_tx_ampdu_pending(sc, an, tid)) {
3083 /* AMPDU pending; queue */
3084 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
3085 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3087 } else if (ath_tx_ampdu_running(sc, an, tid)) {
3088 /* AMPDU running, attempt direct dispatch if possible */
3091 * Always queue the frame to the tail of the list.
3093 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3096 * If the hardware queue isn't busy, direct dispatch
3097 * the head frame in the list. Don't schedule the
3098 * TID - let it build some more frames first?
3100 * When running A-MPDU, always just check the hardware
3101 * queue depth against the aggregate frame limit.
3102 * We don't want to burst a large number of single frames
3103 * out to the hardware; we want to aggressively hold back.
3105 * Otherwise, schedule the TID.
3107 /* XXX TXQ locking */
3108 if (txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_aggr) {
3109 bf = ATH_TID_FIRST(atid);
3110 ATH_TID_REMOVE(atid, bf, bf_list);
3113 * Ensure it's definitely treated as a non-AMPDU
3114 * frame - this information may have been left
3115 * over from a previous attempt.
3117 bf->bf_state.bfs_aggr = 0;
3118 bf->bf_state.bfs_nframes = 1;
3120 /* Queue to the hardware */
3121 ath_tx_xmit_aggr(sc, an, txq, bf);
3122 DPRINTF(sc, ATH_DEBUG_SW_TX,
3126 DPRINTF(sc, ATH_DEBUG_SW_TX,
3127 "%s: ampdu; swq'ing\n",
3130 ath_tx_tid_sched(sc, atid);
3133 * If we're not doing A-MPDU, be prepared to direct dispatch
3134 * up to both limits if possible. This particular corner
3135 * case may end up with packet starvation between aggregate
3136 * traffic and non-aggregate traffic: we wnat to ensure
3137 * that non-aggregate stations get a few frames queued to the
3138 * hardware before the aggregate station(s) get their chance.
3140 * So if you only ever see a couple of frames direct dispatched
3141 * to the hardware from a non-AMPDU client, check both here
3142 * and in the software queue dispatcher to ensure that those
3143 * non-AMPDU stations get a fair chance to transmit.
3145 /* XXX TXQ locking */
3146 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) &&
3147 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) {
3148 /* AMPDU not running, attempt direct dispatch */
3149 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
3150 /* See if clrdmask needs to be set */
3151 ath_tx_update_clrdmask(sc, atid, bf);
3154 * Update the current leak count if
3155 * we're leaking frames; and set the
3156 * MORE flag as appropriate.
3158 ath_tx_leak_count_update(sc, atid, bf);
3161 * Dispatch the frame.
3163 ath_tx_xmit_normal(sc, txq, bf);
3166 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: swq'ing\n", __func__);
3167 ATH_TID_INSERT_TAIL(atid, bf, bf_list);
3168 ath_tx_tid_sched(sc, atid);
3173 * Only set the clrdmask bit if none of the nodes are currently
3176 * XXX TODO: go through all the callers and check to see
3177 * which are being called in the context of looping over all
3178 * TIDs (eg, if all tids are being paused, resumed, etc.)
3179 * That'll avoid O(n^2) complexity here.
3182 ath_tx_set_clrdmask(struct ath_softc *sc, struct ath_node *an)
3186 ATH_TX_LOCK_ASSERT(sc);
3188 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3189 if (an->an_tid[i].isfiltered == 1)
3196 * Configure the per-TID node state.
3198 * This likely belongs in if_ath_node.c but I can't think of anywhere
3199 * else to put it just yet.
3201 * This sets up the SLISTs and the mutex as appropriate.
3204 ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an)
3207 struct ath_tid *atid;
3209 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
3210 atid = &an->an_tid[i];
3212 /* XXX now with this bzer(), is the field 0'ing needed? */
3213 bzero(atid, sizeof(*atid));
3215 TAILQ_INIT(&atid->tid_q);
3216 TAILQ_INIT(&atid->filtq.tid_q);
3219 for (j = 0; j < ATH_TID_MAX_BUFS; j++)
3220 atid->tx_buf[j] = NULL;
3221 atid->baw_head = atid->baw_tail = 0;
3224 atid->hwq_depth = 0;
3225 atid->cleanup_inprogress = 0;
3226 if (i == IEEE80211_NONQOS_TID)
3227 atid->ac = ATH_NONQOS_TID_AC;
3229 atid->ac = TID_TO_WME_AC(i);
3231 an->clrdmask = 1; /* Always start by setting this bit */
3235 * Pause the current TID. This stops packets from being transmitted
3238 * Since this is also called from upper layers as well as the driver,
3239 * it will get the TID lock.
3242 ath_tx_tid_pause(struct ath_softc *sc, struct ath_tid *tid)
3245 ATH_TX_LOCK_ASSERT(sc);
3247 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: paused = %d\n",
3248 __func__, tid->paused);
3252 * Unpause the current TID, and schedule it if needed.
3255 ath_tx_tid_resume(struct ath_softc *sc, struct ath_tid *tid)
3257 ATH_TX_LOCK_ASSERT(sc);
3260 * There's some odd places where ath_tx_tid_resume() is called
3261 * when it shouldn't be; this works around that particular issue
3262 * until it's actually resolved.
3264 if (tid->paused == 0) {
3265 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3266 "%s: %6D: paused=0?\n", __func__,
3267 tid->an->an_node.ni_macaddr, ":");
3272 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: unpaused = %d\n",
3273 __func__, tid->paused);
3279 * Override the clrdmask configuration for the next frame
3280 * from this TID, just to get the ball rolling.
3282 ath_tx_set_clrdmask(sc, tid->an);
3284 if (tid->axq_depth == 0)
3287 /* XXX isfiltered shouldn't ever be 0 at this point */
3288 if (tid->isfiltered == 1) {
3289 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: filtered?!\n",
3294 ath_tx_tid_sched(sc, tid);
3297 * Queue the software TX scheduler.
3299 ath_tx_swq_kick(sc);
3303 * Add the given ath_buf to the TID filtered frame list.
3304 * This requires the TID be filtered.
3307 ath_tx_tid_filt_addbuf(struct ath_softc *sc, struct ath_tid *tid,
3311 ATH_TX_LOCK_ASSERT(sc);
3313 if (!tid->isfiltered)
3314 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: not filtered?!\n",
3317 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: bf=%p\n", __func__, bf);
3319 /* Set the retry bit and bump the retry counter */
3320 ath_tx_set_retry(sc, bf);
3321 sc->sc_stats.ast_tx_swfiltered++;
3323 ATH_TID_FILT_INSERT_TAIL(tid, bf, bf_list);
3327 * Handle a completed filtered frame from the given TID.
3328 * This just enables/pauses the filtered frame state if required
3329 * and appends the filtered frame to the filtered queue.
3332 ath_tx_tid_filt_comp_buf(struct ath_softc *sc, struct ath_tid *tid,
3336 ATH_TX_LOCK_ASSERT(sc);
3338 if (! tid->isfiltered) {
3339 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: filter transition\n",
3341 tid->isfiltered = 1;
3342 ath_tx_tid_pause(sc, tid);
3345 /* Add the frame to the filter queue */
3346 ath_tx_tid_filt_addbuf(sc, tid, bf);
3350 * Complete the filtered frame TX completion.
3352 * If there are no more frames in the hardware queue, unpause/unfilter
3353 * the TID if applicable. Otherwise we will wait for a node PS transition
3357 ath_tx_tid_filt_comp_complete(struct ath_softc *sc, struct ath_tid *tid)
3361 ATH_TX_LOCK_ASSERT(sc);
3363 if (tid->hwq_depth != 0)
3366 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT, "%s: hwq=0, transition back\n",
3368 tid->isfiltered = 0;
3369 /* XXX ath_tx_tid_resume() also calls ath_tx_set_clrdmask()! */
3370 ath_tx_set_clrdmask(sc, tid->an);
3372 /* XXX this is really quite inefficient */
3373 while ((bf = ATH_TID_FILT_LAST(tid, ath_bufhead_s)) != NULL) {
3374 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3375 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
3378 ath_tx_tid_resume(sc, tid);
3382 * Called when a single (aggregate or otherwise) frame is completed.
3384 * Returns 1 if the buffer could be added to the filtered list
3385 * (cloned or otherwise), 0 if the buffer couldn't be added to the
3386 * filtered list (failed clone; expired retry) and the caller should
3387 * free it and handle it like a failure (eg by sending a BAR.)
3390 ath_tx_tid_filt_comp_single(struct ath_softc *sc, struct ath_tid *tid,
3393 struct ath_buf *nbf;
3396 ATH_TX_LOCK_ASSERT(sc);
3399 * Don't allow a filtered frame to live forever.
3401 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3402 sc->sc_stats.ast_tx_swretrymax++;
3403 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3404 "%s: bf=%p, seqno=%d, exceeded retries\n",
3407 bf->bf_state.bfs_seqno);
3412 * A busy buffer can't be added to the retry list.
3413 * It needs to be cloned.
3415 if (bf->bf_flags & ATH_BUF_BUSY) {
3416 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3417 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3418 "%s: busy buffer clone: %p -> %p\n",
3425 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3426 "%s: busy buffer couldn't be cloned (%p)!\n",
3430 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3433 ath_tx_tid_filt_comp_complete(sc, tid);
3439 ath_tx_tid_filt_comp_aggr(struct ath_softc *sc, struct ath_tid *tid,
3440 struct ath_buf *bf_first, ath_bufhead *bf_q)
3442 struct ath_buf *bf, *bf_next, *nbf;
3444 ATH_TX_LOCK_ASSERT(sc);
3448 bf_next = bf->bf_next;
3449 bf->bf_next = NULL; /* Remove it from the aggr list */
3452 * Don't allow a filtered frame to live forever.
3454 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) {
3455 sc->sc_stats.ast_tx_swretrymax++;
3456 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3457 "%s: bf=%p, seqno=%d, exceeded retries\n",
3460 bf->bf_state.bfs_seqno);
3461 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3465 if (bf->bf_flags & ATH_BUF_BUSY) {
3466 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf);
3467 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3468 "%s: busy buffer cloned: %p -> %p",
3475 * If the buffer couldn't be cloned, add it to bf_q;
3476 * the caller will free the buffer(s) as required.
3479 DPRINTF(sc, ATH_DEBUG_SW_TX_FILT,
3480 "%s: buffer couldn't be cloned! (%p)\n",
3482 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
3484 ath_tx_tid_filt_comp_buf(sc, tid, nbf);
3490 ath_tx_tid_filt_comp_complete(sc, tid);
3494 * Suspend the queue because we need to TX a BAR.
3497 ath_tx_tid_bar_suspend(struct ath_softc *sc, struct ath_tid *tid)
3500 ATH_TX_LOCK_ASSERT(sc);
3502 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3503 "%s: tid=%d, bar_wait=%d, bar_tx=%d, called\n",
3509 /* We shouldn't be called when bar_tx is 1 */
3511 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3512 "%s: bar_tx is 1?!\n", __func__);
3515 /* If we've already been called, just be patient. */
3522 /* Only one pause, no matter how many frames fail */
3523 ath_tx_tid_pause(sc, tid);
3527 * We've finished with BAR handling - either we succeeded or
3528 * failed. Either way, unsuspend TX.
3531 ath_tx_tid_bar_unsuspend(struct ath_softc *sc, struct ath_tid *tid)
3534 ATH_TX_LOCK_ASSERT(sc);
3536 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3537 "%s: %6D: TID=%d, called\n",
3539 tid->an->an_node.ni_macaddr,
3543 if (tid->bar_tx == 0 || tid->bar_wait == 0) {
3544 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3545 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3546 __func__, tid->an->an_node.ni_macaddr, ":",
3547 tid->tid, tid->bar_tx, tid->bar_wait);
3550 tid->bar_tx = tid->bar_wait = 0;
3551 ath_tx_tid_resume(sc, tid);
3555 * Return whether we're ready to TX a BAR frame.
3557 * Requires the TID lock be held.
3560 ath_tx_tid_bar_tx_ready(struct ath_softc *sc, struct ath_tid *tid)
3563 ATH_TX_LOCK_ASSERT(sc);
3565 if (tid->bar_wait == 0 || tid->hwq_depth > 0)
3568 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3569 "%s: %6D: TID=%d, bar ready\n",
3571 tid->an->an_node.ni_macaddr,
3579 * Check whether the current TID is ready to have a BAR
3580 * TXed and if so, do the TX.
3582 * Since the TID/TXQ lock can't be held during a call to
3583 * ieee80211_send_bar(), we have to do the dirty thing of unlocking it,
3584 * sending the BAR and locking it again.
3586 * Eventually, the code to send the BAR should be broken out
3587 * from this routine so the lock doesn't have to be reacquired
3588 * just to be immediately dropped by the caller.
3591 ath_tx_tid_bar_tx(struct ath_softc *sc, struct ath_tid *tid)
3593 struct ieee80211_tx_ampdu *tap;
3595 ATH_TX_LOCK_ASSERT(sc);
3597 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3598 "%s: %6D: TID=%d, called\n",
3600 tid->an->an_node.ni_macaddr,
3604 tap = ath_tx_get_tx_tid(tid->an, tid->tid);
3607 * This is an error condition!
3609 if (tid->bar_wait == 0 || tid->bar_tx == 1) {
3610 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3611 "%s: %6D: TID=%d, bar_tx=%d, bar_wait=%d: ?\n",
3612 __func__, tid->an->an_node.ni_macaddr, ":",
3613 tid->tid, tid->bar_tx, tid->bar_wait);
3617 /* Don't do anything if we still have pending frames */
3618 if (tid->hwq_depth > 0) {
3619 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3620 "%s: %6D: TID=%d, hwq_depth=%d, waiting\n",
3622 tid->an->an_node.ni_macaddr,
3629 /* We're now about to TX */
3633 * Override the clrdmask configuration for the next frame,
3634 * just to get the ball rolling.
3636 ath_tx_set_clrdmask(sc, tid->an);
3639 * Calculate new BAW left edge, now that all frames have either
3640 * succeeded or failed.
3642 * XXX verify this is _actually_ the valid value to begin at!
3644 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3645 "%s: %6D: TID=%d, new BAW left edge=%d\n",
3647 tid->an->an_node.ni_macaddr,
3652 /* Try sending the BAR frame */
3653 /* We can't hold the lock here! */
3656 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) {
3657 /* Success? Now we wait for notification that it's done */
3662 /* Failure? For now, warn loudly and continue */
3664 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
3665 "%s: %6D: TID=%d, failed to TX BAR, continue!\n",
3666 __func__, tid->an->an_node.ni_macaddr, ":",
3668 ath_tx_tid_bar_unsuspend(sc, tid);
3672 ath_tx_tid_drain_pkt(struct ath_softc *sc, struct ath_node *an,
3673 struct ath_tid *tid, ath_bufhead *bf_cq, struct ath_buf *bf)
3676 ATH_TX_LOCK_ASSERT(sc);
3679 * If the current TID is running AMPDU, update
3682 if (ath_tx_ampdu_running(sc, an, tid->tid) &&
3683 bf->bf_state.bfs_dobaw) {
3685 * Only remove the frame from the BAW if it's
3686 * been transmitted at least once; this means
3687 * the frame was in the BAW to begin with.
3689 if (bf->bf_state.bfs_retries > 0) {
3690 ath_tx_update_baw(sc, an, tid, bf);
3691 bf->bf_state.bfs_dobaw = 0;
3695 * This has become a non-fatal error now
3697 if (! bf->bf_state.bfs_addedbaw)
3698 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW
3699 "%s: wasn't added: seqno %d\n",
3700 __func__, SEQNO(bf->bf_state.bfs_seqno));
3704 /* Strip it out of an aggregate list if it was in one */
3707 /* Insert on the free queue to be freed by the caller */
3708 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
3712 ath_tx_tid_drain_print(struct ath_softc *sc, struct ath_node *an,
3713 const char *pfx, struct ath_tid *tid, struct ath_buf *bf)
3715 struct ieee80211_node *ni = &an->an_node;
3716 struct ath_txq *txq;
3717 struct ieee80211_tx_ampdu *tap;
3719 txq = sc->sc_ac2q[tid->ac];
3720 tap = ath_tx_get_tx_tid(an, tid->tid);
3722 DPRINTF(sc, ATH_DEBUG_SW_TX,
3723 "%s: %s: %6D: bf=%p: addbaw=%d, dobaw=%d, "
3724 "seqno=%d, retry=%d\n",
3730 bf->bf_state.bfs_addedbaw,
3731 bf->bf_state.bfs_dobaw,
3732 SEQNO(bf->bf_state.bfs_seqno),
3733 bf->bf_state.bfs_retries);
3734 DPRINTF(sc, ATH_DEBUG_SW_TX,
3735 "%s: %s: %6D: bf=%p: txq[%d] axq_depth=%d, axq_aggr_depth=%d\n",
3743 txq->axq_aggr_depth);
3744 DPRINTF(sc, ATH_DEBUG_SW_TX,
3745 "%s: %s: %6D: bf=%p: tid txq_depth=%d hwq_depth=%d, bar_wait=%d, "
3756 DPRINTF(sc, ATH_DEBUG_SW_TX,
3757 "%s: %s: %6D: tid %d: "
3758 "sched=%d, paused=%d, "
3759 "incomp=%d, baw_head=%d, "
3760 "baw_tail=%d txa_start=%d, ni_txseqs=%d\n",
3766 tid->sched, tid->paused,
3767 tid->incomp, tid->baw_head,
3768 tid->baw_tail, tap == NULL ? -1 : tap->txa_start,
3769 ni->ni_txseqs[tid->tid]);
3771 /* XXX Dump the frame, see what it is? */
3772 ieee80211_dump_pkt(ni->ni_ic,
3773 mtod(bf->bf_m, const uint8_t *),
3774 bf->bf_m->m_len, 0, -1);
3778 * Free any packets currently pending in the software TX queue.
3780 * This will be called when a node is being deleted.
3782 * It can also be called on an active node during an interface
3783 * reset or state transition.
3785 * (From Linux/reference):
3787 * TODO: For frame(s) that are in the retry state, we will reuse the
3788 * sequence number(s) without setting the retry bit. The
3789 * alternative is to give up on these and BAR the receiver's window
3793 ath_tx_tid_drain(struct ath_softc *sc, struct ath_node *an,
3794 struct ath_tid *tid, ath_bufhead *bf_cq)
3797 struct ieee80211_tx_ampdu *tap;
3798 struct ieee80211_node *ni = &an->an_node;
3801 tap = ath_tx_get_tx_tid(an, tid->tid);
3803 ATH_TX_LOCK_ASSERT(sc);
3805 /* Walk the queue, free frames */
3808 bf = ATH_TID_FIRST(tid);
3814 ath_tx_tid_drain_print(sc, an, "norm", tid, bf);
3818 ATH_TID_REMOVE(tid, bf, bf_list);
3819 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3822 /* And now, drain the filtered frame queue */
3825 bf = ATH_TID_FILT_FIRST(tid);
3830 ath_tx_tid_drain_print(sc, an, "filt", tid, bf);
3834 ATH_TID_FILT_REMOVE(tid, bf, bf_list);
3835 ath_tx_tid_drain_pkt(sc, an, tid, bf_cq, bf);
3839 * Override the clrdmask configuration for the next frame
3840 * in case there is some future transmission, just to get
3843 * This won't hurt things if the TID is about to be freed.
3845 ath_tx_set_clrdmask(sc, tid->an);
3848 * Now that it's completed, grab the TID lock and update
3849 * the sequence number and BAW window.
3850 * Because sequence numbers have been assigned to frames
3851 * that haven't been sent yet, it's entirely possible
3852 * we'll be called with some pending frames that have not
3855 * The cleaner solution is to do the sequence number allocation
3856 * when the packet is first transmitted - and thus the "retries"
3857 * check above would be enough to update the BAW/seqno.
3860 /* But don't do it for non-QoS TIDs */
3863 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
3864 "%s: %6D: node %p: TID %d: sliding BAW left edge to %d\n",
3872 ni->ni_txseqs[tid->tid] = tap->txa_start;
3873 tid->baw_tail = tid->baw_head;
3878 * Reset the TID state. This must be only called once the node has
3879 * had its frames flushed from this TID, to ensure that no other
3880 * pause / unpause logic can kick in.
3883 ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
3887 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0;
3888 tid->paused = tid->sched = tid->addba_tx_pending = 0;
3889 tid->incomp = tid->cleanup_inprogress = 0;
3893 * If we have a bar_wait set, we need to unpause the TID
3894 * here. Otherwise once cleanup has finished, the TID won't
3895 * have the right paused counter.
3897 * XXX I'm not going through resume here - I don't want the
3898 * node to be rescheuled just yet. This however should be
3901 if (tid->bar_wait) {
3902 if (tid->paused > 0) {
3908 * XXX same with a currently filtered TID.
3910 * Since this is being called during a flush, we assume that
3911 * the filtered frame list is actually empty.
3913 * XXX TODO: add in a check to ensure that the filtered queue
3914 * depth is actually 0!
3916 if (tid->isfiltered) {
3917 if (tid->paused > 0) {
3923 * Clear BAR, filtered frames, scheduled and ADDBA pending.
3924 * The TID may be going through cleanup from the last association
3925 * where things in the BAW are still in the hardware queue.
3929 tid->isfiltered = 0;
3931 tid->addba_tx_pending = 0;
3934 * XXX TODO: it may just be enough to walk the HWQs and mark
3935 * frames for that node as non-aggregate; or mark the ath_node
3936 * with something that indicates that aggregation is no longer
3937 * occuring. Then we can just toss the BAW complaints and
3938 * do a complete hard reset of state here - no pause, no
3939 * complete counter, etc.
3945 * Flush all software queued packets for the given node.
3947 * This occurs when a completion handler frees the last buffer
3948 * for a node, and the node is thus freed. This causes the node
3949 * to be cleaned up, which ends up calling ath_tx_node_flush.
3952 ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
3960 ATH_KTR(sc, ATH_KTR_NODE, 1, "ath_tx_node_flush: flush node; ni=%p",
3964 DPRINTF(sc, ATH_DEBUG_NODE,
3965 "%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
3966 "swq_depth=%d, clrdmask=%d, leak_count=%d\n",
3968 an->an_node.ni_macaddr,
3970 an->an_is_powersave,
3977 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
3978 struct ath_tid *atid = &an->an_tid[tid];
3981 ath_tx_tid_drain(sc, an, atid, &bf_cq);
3983 /* Remove this tid from the list of active tids */
3984 ath_tx_tid_unsched(sc, atid);
3986 /* Reset the per-TID pause, BAR, etc state */
3987 ath_tx_tid_reset(sc, atid);
3991 * Clear global leak count
3993 an->an_leak_count = 0;
3996 /* Handle completed frames */
3997 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
3998 TAILQ_REMOVE(&bf_cq, bf, bf_list);
3999 ath_tx_default_comp(sc, bf, 0);
4004 * Drain all the software TXQs currently with traffic queued.
4007 ath_tx_txq_drain(struct ath_softc *sc, struct ath_txq *txq)
4009 struct ath_tid *tid;
4017 * Iterate over all active tids for the given txq,
4018 * flushing and unsched'ing them
4020 while (! TAILQ_EMPTY(&txq->axq_tidq)) {
4021 tid = TAILQ_FIRST(&txq->axq_tidq);
4022 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq);
4023 ath_tx_tid_unsched(sc, tid);
4028 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4029 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4030 ath_tx_default_comp(sc, bf, 0);
4035 * Handle completion of non-aggregate session frames.
4037 * This (currently) doesn't implement software retransmission of
4038 * non-aggregate frames!
4040 * Software retransmission of non-aggregate frames needs to obey
4041 * the strict sequence number ordering, and drop any frames that
4044 * For now, filtered frames and frame transmission will cause
4045 * all kinds of issues. So we don't support them.
4047 * So anyone queuing frames via ath_tx_normal_xmit() or
4048 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4051 ath_tx_normal_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
4053 struct ieee80211_node *ni = bf->bf_node;
4054 struct ath_node *an = ATH_NODE(ni);
4055 int tid = bf->bf_state.bfs_tid;
4056 struct ath_tid *atid = &an->an_tid[tid];
4057 struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
4059 /* The TID state is protected behind the TXQ lock */
4062 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bf=%p: fail=%d, hwq_depth now %d\n",
4063 __func__, bf, fail, atid->hwq_depth - 1);
4069 * If the frame was filtered, stick it on the filter frame
4070 * queue and complain about it. It shouldn't happen!
4072 if ((ts->ts_status & HAL_TXERR_FILT) ||
4073 (ts->ts_status != 0 && atid->isfiltered)) {
4074 DPRINTF(sc, ATH_DEBUG_SW_TX,
4075 "%s: isfiltered=%d, ts_status=%d: huh?\n",
4079 ath_tx_tid_filt_comp_buf(sc, atid, bf);
4082 if (atid->isfiltered)
4083 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: filtered?!\n", __func__);
4084 if (atid->hwq_depth < 0)
4085 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4086 __func__, atid->hwq_depth);
4089 * If the queue is filtered, potentially mark it as complete
4090 * and reschedule it as needed.
4092 * This is required as there may be a subsequent TX descriptor
4093 * for this end-node that has CLRDMASK set, so it's quite possible
4094 * that a filtered frame will be followed by a non-filtered
4095 * (complete or otherwise) frame.
4097 * XXX should we do this before we complete the frame?
4099 if (atid->isfiltered)
4100 ath_tx_tid_filt_comp_complete(sc, atid);
4104 * punt to rate control if we're not being cleaned up
4105 * during a hw queue drain and the frame wanted an ACK.
4107 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4108 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4109 ts, bf->bf_state.bfs_pktlen,
4110 1, (ts->ts_status == 0) ? 0 : 1);
4112 ath_tx_default_comp(sc, bf, fail);
4116 * Handle cleanup of aggregate session packets that aren't
4119 * There's no need to update the BAW here - the session is being
4123 ath_tx_comp_cleanup_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4125 struct ieee80211_node *ni = bf->bf_node;
4126 struct ath_node *an = ATH_NODE(ni);
4127 int tid = bf->bf_state.bfs_tid;
4128 struct ath_tid *atid = &an->an_tid[tid];
4130 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: TID %d: incomp=%d\n",
4131 __func__, tid, atid->incomp);
4135 if (atid->incomp == 0) {
4136 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4137 "%s: TID %d: cleaned up! resume!\n",
4139 atid->cleanup_inprogress = 0;
4140 ath_tx_tid_resume(sc, atid);
4144 ath_tx_default_comp(sc, bf, 0);
4148 * Performs transmit side cleanup when TID changes from aggregated to
4151 * - Discard all retry frames from the s/w queue.
4152 * - Fix the tx completion function for all buffers in s/w queue.
4153 * - Count the number of unacked frames, and let transmit completion
4156 * The caller is responsible for pausing the TID and unpausing the
4157 * TID if no cleanup was required. Otherwise the cleanup path will
4158 * unpause the TID once the last hardware queued frame is completed.
4161 ath_tx_tid_cleanup(struct ath_softc *sc, struct ath_node *an, int tid,
4164 struct ath_tid *atid = &an->an_tid[tid];
4165 struct ieee80211_tx_ampdu *tap;
4166 struct ath_buf *bf, *bf_next;
4168 ATH_TX_LOCK_ASSERT(sc);
4170 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4171 "%s: TID %d: called\n", __func__, tid);
4174 * Move the filtered frames to the TX queue, before
4175 * we run off and discard/process things.
4177 /* XXX this is really quite inefficient */
4178 while ((bf = ATH_TID_FILT_LAST(atid, ath_bufhead_s)) != NULL) {
4179 ATH_TID_FILT_REMOVE(atid, bf, bf_list);
4180 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4184 * Update the frames in the software TX queue:
4186 * + Discard retry frames in the queue
4187 * + Fix the completion function to be non-aggregate
4189 bf = ATH_TID_FIRST(atid);
4191 if (bf->bf_state.bfs_isretried) {
4192 bf_next = TAILQ_NEXT(bf, bf_list);
4193 ATH_TID_REMOVE(atid, bf, bf_list);
4194 if (bf->bf_state.bfs_dobaw) {
4195 ath_tx_update_baw(sc, an, atid, bf);
4196 if (!bf->bf_state.bfs_addedbaw)
4197 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4198 "%s: wasn't added: seqno %d\n",
4200 SEQNO(bf->bf_state.bfs_seqno));
4202 bf->bf_state.bfs_dobaw = 0;
4204 * Call the default completion handler with "fail" just
4205 * so upper levels are suitably notified about this.
4207 TAILQ_INSERT_TAIL(bf_cq, bf, bf_list);
4211 /* Give these the default completion handler */
4212 bf->bf_comp = ath_tx_normal_comp;
4213 bf = TAILQ_NEXT(bf, bf_list);
4217 * Calculate what hardware-queued frames exist based
4218 * on the current BAW size. Ie, what frames have been
4219 * added to the TX hardware queue for this TID but
4222 tap = ath_tx_get_tx_tid(an, tid);
4223 /* Need the lock - fiddling with BAW */
4224 while (atid->baw_head != atid->baw_tail) {
4225 if (atid->tx_buf[atid->baw_head]) {
4227 atid->cleanup_inprogress = 1;
4228 atid->tx_buf[atid->baw_head] = NULL;
4230 INCR(atid->baw_head, ATH_TID_MAX_BUFS);
4231 INCR(tap->txa_start, IEEE80211_SEQ_RANGE);
4234 if (atid->cleanup_inprogress)
4235 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4236 "%s: TID %d: cleanup needed: %d packets\n",
4237 __func__, tid, atid->incomp);
4239 /* Owner now must free completed frames */
4242 static struct ath_buf *
4243 ath_tx_retry_clone(struct ath_softc *sc, struct ath_node *an,
4244 struct ath_tid *tid, struct ath_buf *bf)
4246 struct ath_buf *nbf;
4250 * Clone the buffer. This will handle the dma unmap and
4251 * copy the node reference to the new buffer. If this
4252 * works out, 'bf' will have no DMA mapping, no mbuf
4253 * pointer and no node reference.
4255 nbf = ath_buf_clone(sc, bf);
4258 DPRINTF(sc, ATH_DEBUG_XMIT, "%s: ATH_BUF_BUSY; cloning\n",
4263 /* Failed to clone */
4264 DPRINTF(sc, ATH_DEBUG_XMIT,
4265 "%s: failed to clone a busy buffer\n",
4270 /* Setup the dma for the new buffer */
4271 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m);
4273 DPRINTF(sc, ATH_DEBUG_XMIT,
4274 "%s: failed to setup dma for clone\n",
4277 * Put this at the head of the list, not tail;
4278 * that way it doesn't interfere with the
4279 * busy buffer logic (which uses the tail of
4283 ath_returnbuf_head(sc, nbf);
4284 ATH_TXBUF_UNLOCK(sc);
4288 /* Update BAW if required, before we free the original buf */
4289 if (bf->bf_state.bfs_dobaw)
4290 ath_tx_switch_baw_buf(sc, an, tid, bf, nbf);
4292 /* Free original buffer; return new buffer */
4293 ath_freebuf(sc, bf);
4299 * Handle retrying an unaggregate frame in an aggregate
4302 * If too many retries occur, pause the TID, wait for
4303 * any further retransmits (as there's no reason why
4304 * non-aggregate frames in an aggregate session are
4305 * transmitted in-order; they just have to be in-BAW)
4306 * and then queue a BAR.
4309 ath_tx_aggr_retry_unaggr(struct ath_softc *sc, struct ath_buf *bf)
4311 struct ieee80211_node *ni = bf->bf_node;
4312 struct ath_node *an = ATH_NODE(ni);
4313 int tid = bf->bf_state.bfs_tid;
4314 struct ath_tid *atid = &an->an_tid[tid];
4315 struct ieee80211_tx_ampdu *tap;
4319 tap = ath_tx_get_tx_tid(an, tid);
4322 * If the buffer is marked as busy, we can't directly
4323 * reuse it. Instead, try to clone the buffer.
4324 * If the clone is successful, recycle the old buffer.
4325 * If the clone is unsuccessful, set bfs_retries to max
4326 * to force the next bit of code to free the buffer
4329 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4330 (bf->bf_flags & ATH_BUF_BUSY)) {
4331 struct ath_buf *nbf;
4332 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4334 /* bf has been freed at this point */
4337 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4340 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4341 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4342 "%s: exceeded retries; seqno %d\n",
4343 __func__, SEQNO(bf->bf_state.bfs_seqno));
4344 sc->sc_stats.ast_tx_swretrymax++;
4346 /* Update BAW anyway */
4347 if (bf->bf_state.bfs_dobaw) {
4348 ath_tx_update_baw(sc, an, atid, bf);
4349 if (! bf->bf_state.bfs_addedbaw)
4350 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4351 "%s: wasn't added: seqno %d\n",
4352 __func__, SEQNO(bf->bf_state.bfs_seqno));
4354 bf->bf_state.bfs_dobaw = 0;
4356 /* Suspend the TX queue and get ready to send the BAR */
4357 ath_tx_tid_bar_suspend(sc, atid);
4359 /* Send the BAR if there are no other frames waiting */
4360 if (ath_tx_tid_bar_tx_ready(sc, atid))
4361 ath_tx_tid_bar_tx(sc, atid);
4365 /* Free buffer, bf is free after this call */
4366 ath_tx_default_comp(sc, bf, 0);
4371 * This increments the retry counter as well as
4372 * sets the retry flag in the ath_buf and packet
4375 ath_tx_set_retry(sc, bf);
4376 sc->sc_stats.ast_tx_swretries++;
4379 * Insert this at the head of the queue, so it's
4380 * retried before any current/subsequent frames.
4382 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4383 ath_tx_tid_sched(sc, atid);
4384 /* Send the BAR if there are no other frames waiting */
4385 if (ath_tx_tid_bar_tx_ready(sc, atid))
4386 ath_tx_tid_bar_tx(sc, atid);
4392 * Common code for aggregate excessive retry/subframe retry.
4393 * If retrying, queues buffers to bf_q. If not, frees the
4396 * XXX should unify this with ath_tx_aggr_retry_unaggr()
4399 ath_tx_retry_subframe(struct ath_softc *sc, struct ath_buf *bf,
4402 struct ieee80211_node *ni = bf->bf_node;
4403 struct ath_node *an = ATH_NODE(ni);
4404 int tid = bf->bf_state.bfs_tid;
4405 struct ath_tid *atid = &an->an_tid[tid];
4407 ATH_TX_LOCK_ASSERT(sc);
4409 /* XXX clr11naggr should be done for all subframes */
4410 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
4411 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0);
4413 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */
4416 * If the buffer is marked as busy, we can't directly
4417 * reuse it. Instead, try to clone the buffer.
4418 * If the clone is successful, recycle the old buffer.
4419 * If the clone is unsuccessful, set bfs_retries to max
4420 * to force the next bit of code to free the buffer
4423 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) &&
4424 (bf->bf_flags & ATH_BUF_BUSY)) {
4425 struct ath_buf *nbf;
4426 nbf = ath_tx_retry_clone(sc, an, atid, bf);
4428 /* bf has been freed at this point */
4431 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1;
4434 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) {
4435 sc->sc_stats.ast_tx_swretrymax++;
4436 DPRINTF(sc, ATH_DEBUG_SW_TX_RETRIES,
4437 "%s: max retries: seqno %d\n",
4438 __func__, SEQNO(bf->bf_state.bfs_seqno));
4439 ath_tx_update_baw(sc, an, atid, bf);
4440 if (!bf->bf_state.bfs_addedbaw)
4441 DPRINTF(sc, ATH_DEBUG_SW_TX_BAW,
4442 "%s: wasn't added: seqno %d\n",
4443 __func__, SEQNO(bf->bf_state.bfs_seqno));
4444 bf->bf_state.bfs_dobaw = 0;
4448 ath_tx_set_retry(sc, bf);
4449 sc->sc_stats.ast_tx_swretries++;
4450 bf->bf_next = NULL; /* Just to make sure */
4452 /* Clear the aggregate state */
4453 bf->bf_state.bfs_aggr = 0;
4454 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */
4455 bf->bf_state.bfs_nframes = 1;
4457 TAILQ_INSERT_TAIL(bf_q, bf, bf_list);
4462 * error pkt completion for an aggregate destination
4465 ath_tx_comp_aggr_error(struct ath_softc *sc, struct ath_buf *bf_first,
4466 struct ath_tid *tid)
4468 struct ieee80211_node *ni = bf_first->bf_node;
4469 struct ath_node *an = ATH_NODE(ni);
4470 struct ath_buf *bf_next, *bf;
4473 struct ieee80211_tx_ampdu *tap;
4480 * Update rate control - all frames have failed.
4482 * XXX use the length in the first frame in the series;
4483 * XXX just so things are consistent for now.
4485 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc,
4486 &bf_first->bf_status.ds_txstat,
4487 bf_first->bf_state.bfs_pktlen,
4488 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes);
4491 tap = ath_tx_get_tx_tid(an, tid->tid);
4492 sc->sc_stats.ast_tx_aggr_failall++;
4494 /* Retry all subframes */
4497 bf_next = bf->bf_next;
4498 bf->bf_next = NULL; /* Remove it from the aggr list */
4499 sc->sc_stats.ast_tx_aggr_fail++;
4500 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4503 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4508 /* Prepend all frames to the beginning of the queue */
4509 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4510 TAILQ_REMOVE(&bf_q, bf, bf_list);
4511 ATH_TID_INSERT_HEAD(tid, bf, bf_list);
4515 * Schedule the TID to be re-tried.
4517 ath_tx_tid_sched(sc, tid);
4520 * send bar if we dropped any frames
4522 * Keep the txq lock held for now, as we need to ensure
4523 * that ni_txseqs[] is consistent (as it's being updated
4524 * in the ifnet TX context or raw TX context.)
4527 /* Suspend the TX queue and get ready to send the BAR */
4528 ath_tx_tid_bar_suspend(sc, tid);
4532 * Send BAR if required
4534 if (ath_tx_tid_bar_tx_ready(sc, tid))
4535 ath_tx_tid_bar_tx(sc, tid);
4539 /* Complete frames which errored out */
4540 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4541 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4542 ath_tx_default_comp(sc, bf, 0);
4547 * Handle clean-up of packets from an aggregate list.
4549 * There's no need to update the BAW here - the session is being
4553 ath_tx_comp_cleanup_aggr(struct ath_softc *sc, struct ath_buf *bf_first)
4555 struct ath_buf *bf, *bf_next;
4556 struct ieee80211_node *ni = bf_first->bf_node;
4557 struct ath_node *an = ATH_NODE(ni);
4558 int tid = bf_first->bf_state.bfs_tid;
4559 struct ath_tid *atid = &an->an_tid[tid];
4570 if (atid->incomp == 0) {
4571 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
4572 "%s: TID %d: cleaned up! resume!\n",
4574 atid->cleanup_inprogress = 0;
4575 ath_tx_tid_resume(sc, atid);
4578 /* Send BAR if required */
4579 /* XXX why would we send a BAR when transitioning to non-aggregation? */
4581 * XXX TODO: we should likely just tear down the BAR state here,
4582 * rather than sending a BAR.
4584 if (ath_tx_tid_bar_tx_ready(sc, atid))
4585 ath_tx_tid_bar_tx(sc, atid);
4589 /* Handle frame completion */
4592 bf_next = bf->bf_next;
4593 ath_tx_default_comp(sc, bf, 1);
4599 * Handle completion of an set of aggregate frames.
4601 * Note: the completion handler is the last descriptor in the aggregate,
4602 * not the last descriptor in the first frame.
4605 ath_tx_aggr_comp_aggr(struct ath_softc *sc, struct ath_buf *bf_first,
4608 //struct ath_desc *ds = bf->bf_lastds;
4609 struct ieee80211_node *ni = bf_first->bf_node;
4610 struct ath_node *an = ATH_NODE(ni);
4611 int tid = bf_first->bf_state.bfs_tid;
4612 struct ath_tid *atid = &an->an_tid[tid];
4613 struct ath_tx_status ts;
4614 struct ieee80211_tx_ampdu *tap;
4620 struct ath_buf *bf, *bf_next;
4623 int nframes = 0, nbad = 0, nf;
4625 /* XXX there's too much on the stack? */
4626 struct ath_rc_series rc[ATH_RC_NUM];
4629 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: called; hwq_depth=%d\n",
4630 __func__, atid->hwq_depth);
4633 * Take a copy; this may be needed -after- bf_first
4634 * has been completed and freed.
4636 ts = bf_first->bf_status.ds_txstat;
4641 /* The TID state is kept behind the TXQ lock */
4645 if (atid->hwq_depth < 0)
4646 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: hwq_depth < 0: %d\n",
4647 __func__, atid->hwq_depth);
4650 * If the TID is filtered, handle completing the filter
4651 * transition before potentially kicking it to the cleanup
4654 * XXX this is duplicate work, ew.
4656 if (atid->isfiltered)
4657 ath_tx_tid_filt_comp_complete(sc, atid);
4660 * Punt cleanup to the relevant function, not our problem now
4662 if (atid->cleanup_inprogress) {
4663 if (atid->isfiltered)
4664 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4665 "%s: isfiltered=1, normal_comp?\n",
4668 ath_tx_comp_cleanup_aggr(sc, bf_first);
4673 * If the frame is filtered, transition to filtered frame
4674 * mode and add this to the filtered frame list.
4676 * XXX TODO: figure out how this interoperates with
4677 * BAR, pause and cleanup states.
4679 if ((ts.ts_status & HAL_TXERR_FILT) ||
4680 (ts.ts_status != 0 && atid->isfiltered)) {
4682 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4683 "%s: isfiltered=1, fail=%d\n", __func__, fail);
4684 ath_tx_tid_filt_comp_aggr(sc, atid, bf_first, &bf_cq);
4686 /* Remove from BAW */
4687 TAILQ_FOREACH_SAFE(bf, &bf_cq, bf_list, bf_next) {
4688 if (bf->bf_state.bfs_addedbaw)
4690 if (bf->bf_state.bfs_dobaw) {
4691 ath_tx_update_baw(sc, an, atid, bf);
4692 if (!bf->bf_state.bfs_addedbaw)
4693 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4694 "%s: wasn't added: seqno %d\n",
4696 SEQNO(bf->bf_state.bfs_seqno));
4698 bf->bf_state.bfs_dobaw = 0;
4701 * If any intermediate frames in the BAW were dropped when
4702 * handling filtering things, send a BAR.
4705 ath_tx_tid_bar_suspend(sc, atid);
4708 * Finish up by sending a BAR if required and freeing
4709 * the frames outside of the TX lock.
4711 goto finish_send_bar;
4715 * XXX for now, use the first frame in the aggregate for
4716 * XXX rate control completion; it's at least consistent.
4718 pktlen = bf_first->bf_state.bfs_pktlen;
4721 * Handle errors first!
4723 * Here, handle _any_ error as a "exceeded retries" error.
4724 * Later on (when filtered frames are to be specially handled)
4725 * it'll have to be expanded.
4728 if (ts.ts_status & HAL_TXERR_XRETRY) {
4730 if (ts.ts_status != 0) {
4732 ath_tx_comp_aggr_error(sc, bf_first, atid);
4736 tap = ath_tx_get_tx_tid(an, tid);
4739 * extract starting sequence and block-ack bitmap
4741 /* XXX endian-ness of seq_st, ba? */
4742 seq_st = ts.ts_seqnum;
4743 hasba = !! (ts.ts_flags & HAL_TX_BA);
4744 tx_ok = (ts.ts_status == 0);
4745 isaggr = bf_first->bf_state.bfs_aggr;
4746 ba[0] = ts.ts_ba_low;
4747 ba[1] = ts.ts_ba_high;
4750 * Copy the TX completion status and the rate control
4751 * series from the first descriptor, as it may be freed
4752 * before the rate control code can get its grubby fingers
4755 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4757 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4758 "%s: txa_start=%d, tx_ok=%d, status=%.8x, flags=%.8x, "
4759 "isaggr=%d, seq_st=%d, hasba=%d, ba=%.8x, %.8x\n",
4760 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4761 isaggr, seq_st, hasba, ba[0], ba[1]);
4764 * The reference driver doesn't do this; it simply ignores
4765 * this check in its entirety.
4767 * I've seen this occur when using iperf to send traffic
4768 * out tid 1 - the aggregate frames are all marked as TID 1,
4769 * but the TXSTATUS has TID=0. So, let's just ignore this
4773 /* Occasionally, the MAC sends a tx status for the wrong TID. */
4774 if (tid != ts.ts_tid) {
4775 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR, "%s: tid %d != hw tid %d\n",
4776 __func__, tid, ts.ts_tid);
4781 /* AR5416 BA bug; this requires an interface reset */
4782 if (isaggr && tx_ok && (! hasba)) {
4783 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4784 "%s: AR5416 bug: hasba=%d; txok=%d, isaggr=%d, "
4786 __func__, hasba, tx_ok, isaggr, seq_st);
4787 /* XXX TODO: schedule an interface reset */
4789 ath_printtxbuf(sc, bf_first,
4790 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
4795 * Walk the list of frames, figure out which ones were correctly
4796 * sent and which weren't.
4799 nf = bf_first->bf_state.bfs_nframes;
4801 /* bf_first is going to be invalid once this list is walked */
4805 * Walk the list of completed frames and determine
4806 * which need to be completed and which need to be
4809 * For completed frames, the completion functions need
4810 * to be called at the end of this function as the last
4811 * node reference may free the node.
4813 * Finally, since the TXQ lock can't be held during the
4814 * completion callback (to avoid lock recursion),
4815 * the completion calls have to be done outside of the
4820 ba_index = ATH_BA_INDEX(seq_st,
4821 SEQNO(bf->bf_state.bfs_seqno));
4822 bf_next = bf->bf_next;
4823 bf->bf_next = NULL; /* Remove it from the aggr list */
4825 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4826 "%s: checking bf=%p seqno=%d; ack=%d\n",
4827 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
4828 ATH_BA_ISSET(ba, ba_index));
4830 if (tx_ok && ATH_BA_ISSET(ba, ba_index)) {
4831 sc->sc_stats.ast_tx_aggr_ok++;
4832 ath_tx_update_baw(sc, an, atid, bf);
4833 bf->bf_state.bfs_dobaw = 0;
4834 if (!bf->bf_state.bfs_addedbaw)
4835 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4836 "%s: wasn't added: seqno %d\n",
4837 __func__, SEQNO(bf->bf_state.bfs_seqno));
4839 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4841 sc->sc_stats.ast_tx_aggr_fail++;
4842 if (ath_tx_retry_subframe(sc, bf, &bf_q)) {
4845 TAILQ_INSERT_TAIL(&bf_cq, bf, bf_list);
4853 * Now that the BAW updates have been done, unlock
4855 * txseq is grabbed before the lock is released so we
4856 * have a consistent view of what -was- in the BAW.
4857 * Anything after this point will not yet have been
4860 txseq = tap->txa_start;
4864 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4865 "%s: num frames seen=%d; bf nframes=%d\n",
4866 __func__, nframes, nf);
4869 * Now we know how many frames were bad, call the rate
4873 ath_tx_update_ratectrl(sc, ni, rc, &ts, pktlen, nframes,
4877 * send bar if we dropped any frames
4880 /* Suspend the TX queue and get ready to send the BAR */
4882 ath_tx_tid_bar_suspend(sc, atid);
4886 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
4887 "%s: txa_start now %d\n", __func__, tap->txa_start);
4891 /* Prepend all frames to the beginning of the queue */
4892 while ((bf = TAILQ_LAST(&bf_q, ath_bufhead_s)) != NULL) {
4893 TAILQ_REMOVE(&bf_q, bf, bf_list);
4894 ATH_TID_INSERT_HEAD(atid, bf, bf_list);
4898 * Reschedule to grab some further frames.
4900 ath_tx_tid_sched(sc, atid);
4903 * If the queue is filtered, re-schedule as required.
4905 * This is required as there may be a subsequent TX descriptor
4906 * for this end-node that has CLRDMASK set, so it's quite possible
4907 * that a filtered frame will be followed by a non-filtered
4908 * (complete or otherwise) frame.
4910 * XXX should we do this before we complete the frame?
4912 if (atid->isfiltered)
4913 ath_tx_tid_filt_comp_complete(sc, atid);
4918 * Send BAR if required
4920 if (ath_tx_tid_bar_tx_ready(sc, atid))
4921 ath_tx_tid_bar_tx(sc, atid);
4925 /* Do deferred completion */
4926 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
4927 TAILQ_REMOVE(&bf_cq, bf, bf_list);
4928 ath_tx_default_comp(sc, bf, 0);
4933 * Handle completion of unaggregated frames in an ADDBA
4936 * Fail is set to 1 if the entry is being freed via a call to
4937 * ath_tx_draintxq().
4940 ath_tx_aggr_comp_unaggr(struct ath_softc *sc, struct ath_buf *bf, int fail)
4942 struct ieee80211_node *ni = bf->bf_node;
4943 struct ath_node *an = ATH_NODE(ni);
4944 int tid = bf->bf_state.bfs_tid;
4945 struct ath_tid *atid = &an->an_tid[tid];
4946 struct ath_tx_status ts;
4950 * Take a copy of this; filtering/cloning the frame may free the
4953 ts = bf->bf_status.ds_txstat;
4956 * Update rate control status here, before we possibly
4957 * punt to retry or cleanup.
4959 * Do it outside of the TXQ lock.
4961 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
4962 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
4963 &bf->bf_status.ds_txstat,
4964 bf->bf_state.bfs_pktlen,
4965 1, (ts.ts_status == 0) ? 0 : 1);
4968 * This is called early so atid->hwq_depth can be tracked.
4969 * This unfortunately means that it's released and regrabbed
4970 * during retry and cleanup. That's rather inefficient.
4974 if (tid == IEEE80211_NONQOS_TID)
4975 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16!\n", __func__);
4977 DPRINTF(sc, ATH_DEBUG_SW_TX,
4978 "%s: bf=%p: tid=%d, hwq_depth=%d, seqno=%d\n",
4979 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
4980 SEQNO(bf->bf_state.bfs_seqno));
4983 if (atid->hwq_depth < 0)
4984 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: hwq_depth < 0: %d\n",
4985 __func__, atid->hwq_depth);
4988 * If the TID is filtered, handle completing the filter
4989 * transition before potentially kicking it to the cleanup
4992 if (atid->isfiltered)
4993 ath_tx_tid_filt_comp_complete(sc, atid);
4996 * If a cleanup is in progress, punt to comp_cleanup;
4997 * rather than handling it here. It's thus their
4998 * responsibility to clean up, call the completion
4999 * function in net80211, etc.
5001 if (atid->cleanup_inprogress) {
5002 if (atid->isfiltered)
5003 DPRINTF(sc, ATH_DEBUG_SW_TX,
5004 "%s: isfiltered=1, normal_comp?\n",
5007 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: cleanup_unaggr\n",
5009 ath_tx_comp_cleanup_unaggr(sc, bf);
5014 * XXX TODO: how does cleanup, BAR and filtered frame handling
5017 * If the frame is filtered OR if it's any failure but
5018 * the TID is filtered, the frame must be added to the
5019 * filtered frame list.
5021 * However - a busy buffer can't be added to the filtered
5022 * list as it will end up being recycled without having
5023 * been made available for the hardware.
5025 if ((ts.ts_status & HAL_TXERR_FILT) ||
5026 (ts.ts_status != 0 && atid->isfiltered)) {
5030 DPRINTF(sc, ATH_DEBUG_SW_TX,
5031 "%s: isfiltered=1, fail=%d\n",
5033 freeframe = ath_tx_tid_filt_comp_single(sc, atid, bf);
5035 /* Remove from BAW */
5036 if (bf->bf_state.bfs_addedbaw)
5038 if (bf->bf_state.bfs_dobaw) {
5039 ath_tx_update_baw(sc, an, atid, bf);
5040 if (!bf->bf_state.bfs_addedbaw)
5041 DPRINTF(sc, ATH_DEBUG_SW_TX,
5042 "%s: wasn't added: seqno %d\n",
5043 __func__, SEQNO(bf->bf_state.bfs_seqno));
5045 bf->bf_state.bfs_dobaw = 0;
5049 * If the frame couldn't be filtered, treat it as a drop and
5050 * prepare to send a BAR.
5052 if (freeframe && drops)
5053 ath_tx_tid_bar_suspend(sc, atid);
5056 * Send BAR if required
5058 if (ath_tx_tid_bar_tx_ready(sc, atid))
5059 ath_tx_tid_bar_tx(sc, atid);
5063 * If freeframe is set, then the frame couldn't be
5064 * cloned and bf is still valid. Just complete/free it.
5067 ath_tx_default_comp(sc, bf, fail);
5073 * Don't bother with the retry check if all frames
5074 * are being failed (eg during queue deletion.)
5077 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5079 if (fail == 0 && ts.ts_status != 0) {
5081 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: retry_unaggr\n",
5083 ath_tx_aggr_retry_unaggr(sc, bf);
5087 /* Success? Complete */
5088 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=%d, seqno %d\n",
5089 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5090 if (bf->bf_state.bfs_dobaw) {
5091 ath_tx_update_baw(sc, an, atid, bf);
5092 bf->bf_state.bfs_dobaw = 0;
5093 if (!bf->bf_state.bfs_addedbaw)
5094 DPRINTF(sc, ATH_DEBUG_SW_TX,
5095 "%s: wasn't added: seqno %d\n",
5096 __func__, SEQNO(bf->bf_state.bfs_seqno));
5100 * If the queue is filtered, re-schedule as required.
5102 * This is required as there may be a subsequent TX descriptor
5103 * for this end-node that has CLRDMASK set, so it's quite possible
5104 * that a filtered frame will be followed by a non-filtered
5105 * (complete or otherwise) frame.
5107 * XXX should we do this before we complete the frame?
5109 if (atid->isfiltered)
5110 ath_tx_tid_filt_comp_complete(sc, atid);
5113 * Send BAR if required
5115 if (ath_tx_tid_bar_tx_ready(sc, atid))
5116 ath_tx_tid_bar_tx(sc, atid);
5120 ath_tx_default_comp(sc, bf, fail);
5121 /* bf is freed at this point */
5125 ath_tx_aggr_comp(struct ath_softc *sc, struct ath_buf *bf, int fail)
5127 if (bf->bf_state.bfs_aggr)
5128 ath_tx_aggr_comp_aggr(sc, bf, fail);
5130 ath_tx_aggr_comp_unaggr(sc, bf, fail);
5134 * Schedule some packets from the given node/TID to the hardware.
5136 * This is the aggregate version.
5139 ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
5140 struct ath_tid *tid)
5143 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5144 struct ieee80211_tx_ampdu *tap;
5145 ATH_AGGR_STATUS status;
5148 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5149 ATH_TX_LOCK_ASSERT(sc);
5152 * XXX TODO: If we're called for a queue that we're leaking frames to,
5153 * ensure we only leak one.
5156 tap = ath_tx_get_tx_tid(an, tid->tid);
5158 if (tid->tid == IEEE80211_NONQOS_TID)
5159 DPRINTF(sc, ATH_DEBUG_SW_TX,
5160 "%s: called for TID=NONQOS_TID?\n", __func__);
5163 status = ATH_AGGR_DONE;
5166 * If the upper layer has paused the TID, don't
5167 * queue any further packets.
5169 * This can also occur from the completion task because
5170 * of packet loss; but as its serialised with this code,
5171 * it won't "appear" half way through queuing packets.
5173 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5176 bf = ATH_TID_FIRST(tid);
5182 * If the packet doesn't fall within the BAW (eg a NULL
5183 * data frame), schedule it directly; continue.
5185 if (! bf->bf_state.bfs_dobaw) {
5186 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5187 "%s: non-baw packet\n",
5189 ATH_TID_REMOVE(tid, bf, bf_list);
5191 if (bf->bf_state.bfs_nframes > 1)
5192 DPRINTF(sc, ATH_DEBUG_SW_TX,
5193 "%s: aggr=%d, nframes=%d\n",
5195 bf->bf_state.bfs_aggr,
5196 bf->bf_state.bfs_nframes);
5199 * This shouldn't happen - such frames shouldn't
5200 * ever have been queued as an aggregate in the
5201 * first place. However, make sure the fields
5202 * are correctly setup just to be totally sure.
5204 bf->bf_state.bfs_aggr = 0;
5205 bf->bf_state.bfs_nframes = 1;
5207 /* Update CLRDMASK just before this frame is queued */
5208 ath_tx_update_clrdmask(sc, tid, bf);
5210 ath_tx_do_ratelookup(sc, bf);
5211 ath_tx_calc_duration(sc, bf);
5212 ath_tx_calc_protection(sc, bf);
5213 ath_tx_set_rtscts(sc, bf);
5214 ath_tx_rate_fill_rcflags(sc, bf);
5215 ath_tx_setds(sc, bf);
5216 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5218 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5220 /* Queue the packet; continue */
5227 * Do a rate control lookup on the first frame in the
5228 * list. The rate control code needs that to occur
5229 * before it can determine whether to TX.
5230 * It's inaccurate because the rate control code doesn't
5231 * really "do" aggregate lookups, so it only considers
5232 * the size of the first frame.
5234 ath_tx_do_ratelookup(sc, bf);
5235 bf->bf_state.bfs_rc[3].rix = 0;
5236 bf->bf_state.bfs_rc[3].tries = 0;
5238 ath_tx_calc_duration(sc, bf);
5239 ath_tx_calc_protection(sc, bf);
5241 ath_tx_set_rtscts(sc, bf);
5242 ath_tx_rate_fill_rcflags(sc, bf);
5244 status = ath_tx_form_aggr(sc, an, tid, &bf_q);
5246 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5247 "%s: ath_tx_form_aggr() status=%d\n", __func__, status);
5250 * No frames to be picked up - out of BAW
5252 if (TAILQ_EMPTY(&bf_q))
5256 * This assumes that the descriptor list in the ath_bufhead
5257 * are already linked together via bf_next pointers.
5259 bf = TAILQ_FIRST(&bf_q);
5261 if (status == ATH_AGGR_8K_LIMITED)
5262 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5265 * If it's the only frame send as non-aggregate
5266 * assume that ath_tx_form_aggr() has checked
5267 * whether it's in the BAW and added it appropriately.
5269 if (bf->bf_state.bfs_nframes == 1) {
5270 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5271 "%s: single-frame aggregate\n", __func__);
5273 /* Update CLRDMASK just before this frame is queued */
5274 ath_tx_update_clrdmask(sc, tid, bf);
5276 bf->bf_state.bfs_aggr = 0;
5277 bf->bf_state.bfs_ndelim = 0;
5278 ath_tx_setds(sc, bf);
5279 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5280 if (status == ATH_AGGR_BAW_CLOSED)
5281 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5283 sc->sc_aggr_stats.aggr_single_pkt++;
5285 DPRINTF(sc, ATH_DEBUG_SW_TX_AGGR,
5286 "%s: multi-frame aggregate: %d frames, "
5288 __func__, bf->bf_state.bfs_nframes,
5289 bf->bf_state.bfs_al);
5290 bf->bf_state.bfs_aggr = 1;
5291 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5292 sc->sc_aggr_stats.aggr_aggr_pkt++;
5294 /* Update CLRDMASK just before this frame is queued */
5295 ath_tx_update_clrdmask(sc, tid, bf);
5298 * Calculate the duration/protection as required.
5300 ath_tx_calc_duration(sc, bf);
5301 ath_tx_calc_protection(sc, bf);
5304 * Update the rate and rtscts information based on the
5305 * rate decision made by the rate control code;
5306 * the first frame in the aggregate needs it.
5308 ath_tx_set_rtscts(sc, bf);
5311 * Setup the relevant descriptor fields
5312 * for aggregation. The first descriptor
5313 * already points to the rest in the chain.
5315 ath_tx_setds_11n(sc, bf);
5319 /* Set completion handler, multi-frame aggregate or not */
5320 bf->bf_comp = ath_tx_aggr_comp;
5322 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5323 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: TID=16?\n", __func__);
5326 * Update leak count and frame config if were leaking frames.
5328 * XXX TODO: it should update all frames in an aggregate
5331 ath_tx_leak_count_update(sc, tid, bf);
5334 ath_tx_handoff(sc, txq, bf);
5336 /* Track outstanding buffer count to hardware */
5337 /* aggregates are "one" buffer */
5341 * Break out if ath_tx_form_aggr() indicated
5342 * there can't be any further progress (eg BAW is full.)
5343 * Checking for an empty txq is done above.
5345 * XXX locking on txq here?
5347 /* XXX TXQ locking */
5348 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5349 (status == ATH_AGGR_BAW_CLOSED ||
5350 status == ATH_AGGR_LEAK_CLOSED))
5356 * Schedule some packets from the given node/TID to the hardware.
5358 * XXX TODO: this routine doesn't enforce the maximum TXQ depth.
5359 * It just dumps frames into the TXQ. We should limit how deep
5360 * the transmit queue can grow for frames dispatched to the given
5363 * To avoid locking issues, either we need to own the TXQ lock
5364 * at this point, or we need to pass in the maximum frame count
5368 ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
5369 struct ath_tid *tid)
5372 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5374 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: node %p: TID %d: called\n",
5375 __func__, an, tid->tid);
5377 ATH_TX_LOCK_ASSERT(sc);
5379 /* Check - is AMPDU pending or running? then print out something */
5380 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5381 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu pending?\n",
5382 __func__, tid->tid);
5383 if (ath_tx_ampdu_running(sc, an, tid->tid))
5384 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, ampdu running?\n",
5385 __func__, tid->tid);
5390 * If the upper layers have paused the TID, don't
5391 * queue any further packets.
5393 * XXX if we are leaking frames, make sure we decrement
5394 * that counter _and_ we continue here.
5396 if (! ath_tx_tid_can_tx_or_sched(sc, tid))
5399 bf = ATH_TID_FIRST(tid);
5404 ATH_TID_REMOVE(tid, bf, bf_list);
5407 if (tid->tid != bf->bf_state.bfs_tid) {
5408 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: bfs_tid %d !="
5409 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5412 /* Normal completion handler */
5413 bf->bf_comp = ath_tx_normal_comp;
5416 * Override this for now, until the non-aggregate
5417 * completion handler correctly handles software retransmits.
5419 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5421 /* Update CLRDMASK just before this frame is queued */
5422 ath_tx_update_clrdmask(sc, tid, bf);
5424 /* Program descriptors + rate control */
5425 ath_tx_do_ratelookup(sc, bf);
5426 ath_tx_calc_duration(sc, bf);
5427 ath_tx_calc_protection(sc, bf);
5428 ath_tx_set_rtscts(sc, bf);
5429 ath_tx_rate_fill_rcflags(sc, bf);
5430 ath_tx_setds(sc, bf);
5433 * Update the current leak count if
5434 * we're leaking frames; and set the
5435 * MORE flag as appropriate.
5437 ath_tx_leak_count_update(sc, tid, bf);
5439 /* Track outstanding buffer count to hardware */
5440 /* aggregates are "one" buffer */
5443 /* Punt to hardware or software txq */
5444 ath_tx_handoff(sc, txq, bf);
5449 * Schedule some packets to the given hardware queue.
5451 * This function walks the list of TIDs (ie, ath_node TIDs
5452 * with queued traffic) and attempts to schedule traffic
5455 * TID scheduling is implemented as a FIFO, with TIDs being
5456 * added to the end of the queue after some frames have been
5460 ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
5462 struct ath_tid *tid, *next, *last;
5464 ATH_TX_LOCK_ASSERT(sc);
5467 * Don't schedule if the hardware queue is busy.
5468 * This (hopefully) gives some more time to aggregate
5469 * some packets in the aggregation queue.
5471 * XXX It doesn't stop a parallel sender from sneaking
5472 * in transmitting a frame!
5474 /* XXX TXQ locking */
5475 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5476 sc->sc_aggr_stats.aggr_sched_nopkt++;
5479 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5480 sc->sc_aggr_stats.aggr_sched_nopkt++;
5484 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5486 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5488 * Suspend paused queues here; they'll be resumed
5489 * once the addba completes or times out.
5491 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
5492 __func__, tid->tid, tid->paused);
5493 ath_tx_tid_unsched(sc, tid);
5495 * This node may be in power-save and we're leaking
5496 * a frame; be careful.
5498 if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
5501 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5502 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5504 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5506 /* Not empty? Re-schedule */
5507 if (tid->axq_depth != 0)
5508 ath_tx_tid_sched(sc, tid);
5511 * Give the software queue time to aggregate more
5512 * packets. If we aren't running aggregation then
5513 * we should still limit the hardware queue depth.
5515 /* XXX TXQ locking */
5516 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5519 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5524 * If this was the last entry on the original list, stop.
5525 * Otherwise nodes that have been rescheduled onto the end
5526 * of the TID FIFO list will just keep being rescheduled.
5528 * XXX What should we do about nodes that were paused
5529 * but are pending a leaking frame in response to a ps-poll?
5530 * They'll be put at the front of the list; so they'll
5531 * prematurely trigger this condition! Ew.
5543 * Return net80211 TID struct pointer, or NULL for none
5545 struct ieee80211_tx_ampdu *
5546 ath_tx_get_tx_tid(struct ath_node *an, int tid)
5548 struct ieee80211_node *ni = &an->an_node;
5549 struct ieee80211_tx_ampdu *tap;
5551 if (tid == IEEE80211_NONQOS_TID)
5554 tap = &ni->ni_tx_ampdu[tid];
5559 * Is AMPDU-TX running?
5562 ath_tx_ampdu_running(struct ath_softc *sc, struct ath_node *an, int tid)
5564 struct ieee80211_tx_ampdu *tap;
5566 if (tid == IEEE80211_NONQOS_TID)
5569 tap = ath_tx_get_tx_tid(an, tid);
5571 return 0; /* Not valid; default to not running */
5573 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5577 * Is AMPDU-TX negotiation pending?
5580 ath_tx_ampdu_pending(struct ath_softc *sc, struct ath_node *an, int tid)
5582 struct ieee80211_tx_ampdu *tap;
5584 if (tid == IEEE80211_NONQOS_TID)
5587 tap = ath_tx_get_tx_tid(an, tid);
5589 return 0; /* Not valid; default to not pending */
5591 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5595 * Is AMPDU-TX pending for the given TID?
5600 * Method to handle sending an ADDBA request.
5602 * We tap this so the relevant flags can be set to pause the TID
5603 * whilst waiting for the response.
5605 * XXX there's no timeout handler we can override?
5608 ath_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5609 int dialogtoken, int baparamset, int batimeout)
5611 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5612 int tid = tap->txa_tid;
5613 struct ath_node *an = ATH_NODE(ni);
5614 struct ath_tid *atid = &an->an_tid[tid];
5617 * XXX danger Will Robinson!
5619 * Although the taskqueue may be running and scheduling some more
5620 * packets, these should all be _before_ the addba sequence number.
5621 * However, net80211 will keep self-assigning sequence numbers
5622 * until addba has been negotiated.
5624 * In the past, these packets would be "paused" (which still works
5625 * fine, as they're being scheduled to the driver in the same
5626 * serialised method which is calling the addba request routine)
5627 * and when the aggregation session begins, they'll be dequeued
5628 * as aggregate packets and added to the BAW. However, now there's
5629 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5630 * packets. Thus they never get included in the BAW tracking and
5631 * this can cause the initial burst of packets after the addba
5632 * negotiation to "hang", as they quickly fall outside the BAW.
5634 * The "eventual" solution should be to tag these packets with
5635 * dobaw. Although net80211 has given us a sequence number,
5636 * it'll be "after" the left edge of the BAW and thus it'll
5641 * This is a bit annoying. Until net80211 HT code inherits some
5642 * (any) locking, we may have this called in parallel BUT only
5643 * one response/timeout will be called. Grr.
5645 if (atid->addba_tx_pending == 0) {
5646 ath_tx_tid_pause(sc, atid);
5647 atid->addba_tx_pending = 1;
5651 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5652 "%s: %6D: called; dialogtoken=%d, baparamset=%d, batimeout=%d\n",
5656 dialogtoken, baparamset, batimeout);
5657 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5658 "%s: txa_start=%d, ni_txseqs=%d\n",
5659 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5661 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5666 * Handle an ADDBA response.
5668 * We unpause the queue so TX'ing can resume.
5670 * Any packets TX'ed from this point should be "aggregate" (whether
5671 * aggregate or not) so the BAW is updated.
5673 * Note! net80211 keeps self-assigning sequence numbers until
5674 * ampdu is negotiated. This means the initially-negotiated BAW left
5675 * edge won't match the ni->ni_txseq.
5677 * So, being very dirty, the BAW left edge is "slid" here to match
5680 * What likely SHOULD happen is that all packets subsequent to the
5681 * addba request should be tagged as aggregate and queued as non-aggregate
5682 * frames; thus updating the BAW. For now though, I'll just slide the
5686 ath_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5687 int status, int code, int batimeout)
5689 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5690 int tid = tap->txa_tid;
5691 struct ath_node *an = ATH_NODE(ni);
5692 struct ath_tid *atid = &an->an_tid[tid];
5695 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5696 "%s: %6D: called; status=%d, code=%d, batimeout=%d\n", __func__,
5699 status, code, batimeout);
5701 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5702 "%s: txa_start=%d, ni_txseqs=%d\n",
5703 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5706 * Call this first, so the interface flags get updated
5707 * before the TID is unpaused. Otherwise a race condition
5708 * exists where the unpaused TID still doesn't yet have
5709 * IEEE80211_AGGR_RUNNING set.
5711 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
5714 atid->addba_tx_pending = 0;
5717 * Slide the BAW left edge to wherever net80211 left it for us.
5718 * Read above for more information.
5720 tap->txa_start = ni->ni_txseqs[tid];
5721 ath_tx_tid_resume(sc, atid);
5728 * Stop ADDBA on a queue.
5730 * This can be called whilst BAR TX is currently active on the queue,
5731 * so make sure this is unblocked before continuing.
5734 ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
5736 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5737 int tid = tap->txa_tid;
5738 struct ath_node *an = ATH_NODE(ni);
5739 struct ath_tid *atid = &an->an_tid[tid];
5743 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL, "%s: %6D: called\n",
5749 * Pause TID traffic early, so there aren't any races
5750 * Unblock the pending BAR held traffic, if it's currently paused.
5753 ath_tx_tid_pause(sc, atid);
5754 if (atid->bar_wait) {
5756 * bar_unsuspend() expects bar_tx == 1, as it should be
5757 * called from the TX completion path. This quietens
5758 * the warning. It's cleared for us anyway.
5761 ath_tx_tid_bar_unsuspend(sc, atid);
5765 /* There's no need to hold the TXQ lock here */
5766 sc->sc_addba_stop(ni, tap);
5769 * ath_tx_tid_cleanup will resume the TID if possible, otherwise
5770 * it'll set the cleanup flag, and it'll be unpaused once
5771 * things have been cleaned up.
5775 ath_tx_tid_cleanup(sc, an, tid, &bf_cq);
5777 * Unpause the TID if no cleanup is required.
5779 if (! atid->cleanup_inprogress)
5780 ath_tx_tid_resume(sc, atid);
5783 /* Handle completing frames and fail them */
5784 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5785 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5786 ath_tx_default_comp(sc, bf, 1);
5792 * Handle a node reassociation.
5794 * We may have a bunch of frames queued to the hardware; those need
5795 * to be marked as cleanup.
5798 ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an)
5800 struct ath_tid *tid;
5807 ATH_TX_UNLOCK_ASSERT(sc);
5810 for (i = 0; i < IEEE80211_TID_SIZE; i++) {
5811 tid = &an->an_tid[i];
5812 if (tid->hwq_depth == 0)
5814 ath_tx_tid_pause(sc, tid);
5815 DPRINTF(sc, ATH_DEBUG_NODE,
5816 "%s: %6D: TID %d: cleaning up TID\n",
5818 an->an_node.ni_macaddr,
5821 ath_tx_tid_cleanup(sc, an, i, &bf_cq);
5823 * Unpause the TID if no cleanup is required.
5825 if (! tid->cleanup_inprogress)
5826 ath_tx_tid_resume(sc, tid);
5830 /* Handle completing frames and fail them */
5831 while ((bf = TAILQ_FIRST(&bf_cq)) != NULL) {
5832 TAILQ_REMOVE(&bf_cq, bf, bf_list);
5833 ath_tx_default_comp(sc, bf, 1);
5838 * Note: net80211 bar_timeout() doesn't call this function on BAR failure;
5839 * it simply tears down the aggregation session. Ew.
5841 * It however will call ieee80211_ampdu_stop() which will call
5842 * ic->ic_addba_stop().
5844 * XXX This uses a hard-coded max BAR count value; the whole
5845 * XXX BAR TX success or failure should be better handled!
5848 ath_bar_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
5851 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5852 int tid = tap->txa_tid;
5853 struct ath_node *an = ATH_NODE(ni);
5854 struct ath_tid *atid = &an->an_tid[tid];
5855 int attempts = tap->txa_attempts;
5857 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5858 "%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d\n",
5867 /* Note: This may update the BAW details */
5868 sc->sc_bar_response(ni, tap, status);
5870 /* Unpause the TID */
5872 * XXX if this is attempt=50, the TID will be downgraded
5873 * XXX to a non-aggregate session. So we must unpause the
5874 * XXX TID here or it'll never be done.
5876 * Also, don't call it if bar_tx/bar_wait are 0; something
5877 * has beaten us to the punch? (XXX figure out what?)
5879 if (status == 0 || attempts == 50) {
5881 if (atid->bar_tx == 0 || atid->bar_wait == 0)
5882 DPRINTF(sc, ATH_DEBUG_SW_TX_BAR,
5883 "%s: huh? bar_tx=%d, bar_wait=%d\n",
5885 atid->bar_tx, atid->bar_wait);
5887 ath_tx_tid_bar_unsuspend(sc, atid);
5893 * This is called whenever the pending ADDBA request times out.
5894 * Unpause and reschedule the TID.
5897 ath_addba_response_timeout(struct ieee80211_node *ni,
5898 struct ieee80211_tx_ampdu *tap)
5900 struct ath_softc *sc = ni->ni_ic->ic_ifp->if_softc;
5901 int tid = tap->txa_tid;
5902 struct ath_node *an = ATH_NODE(ni);
5903 struct ath_tid *atid = &an->an_tid[tid];
5905 DPRINTF(sc, ATH_DEBUG_SW_TX_CTRL,
5906 "%s: %6D: TID=%d, called; resuming\n",
5913 atid->addba_tx_pending = 0;
5916 /* Note: This updates the aggregate state to (again) pending */
5917 sc->sc_addba_response_timeout(ni, tap);
5919 /* Unpause the TID; which reschedules it */
5921 ath_tx_tid_resume(sc, atid);
5926 * Check if a node is asleep or not.
5929 ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an)
5932 ATH_TX_LOCK_ASSERT(sc);
5934 return (an->an_is_powersave);
5938 * Mark a node as currently "in powersaving."
5939 * This suspends all traffic on the node.
5941 * This must be called with the node/tx locks free.
5943 * XXX TODO: the locking silliness below is due to how the node
5944 * locking currently works. Right now, the node lock is grabbed
5945 * to do rate control lookups and these are done with the TX
5946 * queue lock held. This means the node lock can't be grabbed
5947 * first here or a LOR will occur.
5949 * Eventually (hopefully!) the TX path code will only grab
5950 * the TXQ lock when transmitting and the ath_node lock when
5951 * doing node/TID operations. There are other complications -
5952 * the sched/unsched operations involve walking the per-txq
5953 * 'active tid' list and this requires both locks to be held.
5956 ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
5958 struct ath_tid *atid;
5959 struct ath_txq *txq;
5962 ATH_TX_UNLOCK_ASSERT(sc);
5964 /* Suspend all traffic on the node */
5967 if (an->an_is_powersave) {
5968 DPRINTF(sc, ATH_DEBUG_XMIT,
5969 "%s: %6D: node was already asleep!\n",
5970 __func__, an->an_node.ni_macaddr, ":");
5975 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
5976 atid = &an->an_tid[tid];
5977 txq = sc->sc_ac2q[atid->ac];
5979 ath_tx_tid_pause(sc, atid);
5982 /* Mark node as in powersaving */
5983 an->an_is_powersave = 1;
5989 * Mark a node as currently "awake."
5990 * This resumes all traffic to the node.
5993 ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
5995 struct ath_tid *atid;
5996 struct ath_txq *txq;
5999 ATH_TX_UNLOCK_ASSERT(sc);
6004 if (an->an_is_powersave == 0) {
6006 DPRINTF(sc, ATH_DEBUG_XMIT,
6007 "%s: an=%p: node was already awake\n",
6012 /* Mark node as awake */
6013 an->an_is_powersave = 0;
6015 * Clear any pending leaked frame requests
6017 an->an_leak_count = 0;
6019 for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
6020 atid = &an->an_tid[tid];
6021 txq = sc->sc_ac2q[atid->ac];
6023 ath_tx_tid_resume(sc, atid);
6029 ath_legacy_dma_txsetup(struct ath_softc *sc)
6032 /* nothing new needed */
6037 ath_legacy_dma_txteardown(struct ath_softc *sc)
6040 /* nothing new needed */
6045 ath_xmit_setup_legacy(struct ath_softc *sc)
6048 * For now, just set the descriptor length to sizeof(ath_desc);
6049 * worry about extracting the real length out of the HAL later.
6051 sc->sc_tx_desclen = sizeof(struct ath_desc);
6052 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6053 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6055 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6056 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6057 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6059 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6060 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6062 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;