2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * Driver for the Atheros Wireless LAN controller.
36 * This software is derived from work of Atsushi Onoe; his contribution
37 * is greatly appreciated.
43 * This is needed for register operations which are performed
44 * by the driver - eg, calls to ath_hal_gettsf32().
46 * It's also required for any AH_DEBUG checks in here, eg the
47 * module dependencies.
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
56 #include <sys/malloc.h>
58 #include <sys/mutex.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/sockio.h>
62 #include <sys/errno.h>
63 #include <sys/callout.h>
65 #include <sys/endian.h>
66 #include <sys/kthread.h>
67 #include <sys/taskqueue.h>
69 #include <sys/module.h>
73 #include <net/if_var.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_llc.h>
80 #include <net/ifq_var.h>
82 #include <netproto/802_11/ieee80211_var.h>
83 #include <netproto/802_11/ieee80211_regdomain.h>
84 #ifdef IEEE80211_SUPPORT_SUPERG
85 #include <netproto/802_11/ieee80211_superg.h>
87 #ifdef IEEE80211_SUPPORT_TDMA
88 #include <netproto/802_11/ieee80211_tdma.h>
94 #include <netinet/in.h>
95 #include <netinet/if_ether.h>
98 #include <dev/netif/ath/ath/if_athvar.h>
99 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
100 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
102 #include <dev/netif/ath/ath/if_ath_debug.h>
103 #include <dev/netif/ath/ath/if_ath_misc.h>
104 #include <dev/netif/ath/ath/if_ath_tsf.h>
105 #include <dev/netif/ath/ath/if_ath_tx.h>
106 #include <dev/netif/ath/ath/if_ath_sysctl.h>
107 #include <dev/netif/ath/ath/if_ath_led.h>
108 #include <dev/netif/ath/ath/if_ath_keycache.h>
109 #include <dev/netif/ath/ath/if_ath_rx.h>
110 #include <dev/netif/ath/ath/if_ath_beacon.h>
111 #include <dev/netif/ath/ath/if_athdfs.h>
114 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
117 #include <dev/netif/ath/ath/if_ath_tx_edma.h>
120 #include <dev/netif/ath/ath/if_ath_alq.h>
124 * some general macros
126 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
127 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
130 * XXX doesn't belong here, and should be tunable
132 #define ATH_TXSTATUS_RING_SIZE 512
134 MALLOC_DECLARE(M_ATHDEV);
136 static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
139 * Push some frames into the TX FIFO if we have space.
142 ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
144 struct ath_buf *bf, *bf_last;
147 ATH_TXQ_LOCK_ASSERT(txq);
149 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n",
153 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
154 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH)
158 * We have space in the FIFO - so let's push a frame
163 * Remove it from the normal list
165 ATH_TXQ_REMOVE(txq, bf, bf_list);
168 * XXX for now, we only dequeue a frame at a time, so
169 * that's only one buffer. Later on when we just
170 * push this staging _list_ into the queue, we'll
171 * set bf_last to the end pointer in the list.
174 DPRINTF(sc, ATH_DEBUG_TX_PROC,
175 "%s: Q%d: depth=%d; pushing %p->%p\n",
183 * Append it to the FIFO staging list
185 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list);
188 * Set fifo start / fifo end flags appropriately
191 bf->bf_flags |= ATH_BUF_FIFOPTR;
192 bf_last->bf_flags |= ATH_BUF_FIFOEND;
195 * Push _into_ the FIFO.
197 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
199 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
200 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
201 #endif/* ATH_DEBUG */
203 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
204 ath_tx_alq_post(sc, bf);
205 #endif /* ATH_DEBUG_ALQ */
206 txq->axq_fifo_depth++;
210 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
214 * Re-initialise the DMA FIFO with the current contents of
217 * This should only be called as part of the chip reset path, as it
218 * assumes the FIFO is currently empty.
221 ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
228 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n",
232 ATH_TXQ_LOCK_ASSERT(txq);
235 * Let's log if the tracked FIFO depth doesn't match
236 * what we actually push in.
238 old_fifo_depth = txq->axq_fifo_depth;
239 txq->axq_fifo_depth = 0;
242 * Walk the FIFO staging list, looking for "head" entries.
243 * Since we may have a partially completed list of frames,
244 * we push the first frame we see into the FIFO and re-mark
245 * it as the head entry. We then skip entries until we see
246 * FIFO end, at which point we get ready to push another
247 * entry into the FIFO.
249 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) {
251 * If we're looking for FIFOEND and we haven't found
254 * If we're looking for FIFOEND and we've found it,
255 * reset for another descriptor.
258 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
259 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
260 #endif/* ATH_DEBUG */
262 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
263 ath_tx_alq_post(sc, bf);
264 #endif /* ATH_DEBUG_ALQ */
266 if (fifostart == 0) {
267 if (bf->bf_flags & ATH_BUF_FIFOEND)
272 /* Make sure we're not overflowing the FIFO! */
273 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
274 device_printf(sc->sc_dev,
275 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n",
278 txq->axq_fifo_depth);
282 DPRINTF(sc, ATH_DEBUG_RESET,
283 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n",
288 !! (bf->bf_flags & ATH_BUF_FIFOPTR),
289 !! (bf->bf_flags & ATH_BUF_FIFOEND));
293 * Set this to be the first buffer in the FIFO
294 * list - even if it's also the last buffer in
297 bf->bf_flags |= ATH_BUF_FIFOPTR;
299 /* Push it into the FIFO and bump the FIFO count */
300 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
301 txq->axq_fifo_depth++;
304 * If this isn't the last entry either, let's
305 * clear fifostart so we continue looking for
308 if (! (bf->bf_flags & ATH_BUF_FIFOEND))
313 /* Only bother starting the queue if there's something in it */
315 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
317 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n",
321 txq->axq_fifo_depth);
323 /* And now, let's check! */
324 if (txq->axq_fifo_depth != old_fifo_depth) {
325 device_printf(sc->sc_dev,
326 "%s: Q%d: FIFO depth should be %d, is %d\n",
330 txq->axq_fifo_depth);
335 * Hand off this frame to a hardware queue.
337 * Things are a bit hairy in the EDMA world. The TX FIFO is only
338 * 8 entries deep, so we need to keep track of exactly what we've
339 * pushed into the FIFO and what's just sitting in the TX queue,
342 * So this is split into two halves - frames get appended to the
343 * TXQ; then a scheduler is called to push some frames into the
347 ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
353 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
354 ("%s: busy status 0x%x", __func__, bf->bf_flags));
357 * XXX TODO: write a hard-coded check to ensure that
358 * the queue id in the TX descriptor matches txq->axq_qnum.
361 /* Update aggr stats */
362 if (bf->bf_state.bfs_aggr)
363 txq->axq_aggr_depth++;
365 /* Push and update frame stats */
366 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
368 /* For now, set the link pointer in the last descriptor
371 * Later on, when it comes time to handling multiple descriptors
372 * in one FIFO push, we can link descriptors together this way.
376 * Finally, call the FIFO schedule routine to schedule some
377 * frames to the FIFO.
379 ath_edma_tx_fifo_fill(sc, txq);
384 * Hand off this frame to a multicast software queue.
386 * The EDMA TX CABQ will get a list of chained frames, chained
387 * together using the next pointer. The single head of that
388 * particular queue is pushed to the hardware CABQ.
391 ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
395 ATH_TX_LOCK_ASSERT(sc);
396 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
397 ("%s: busy status 0x%x", __func__, bf->bf_flags));
401 * XXX this is mostly duplicated in ath_tx_handoff_mcast().
403 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
404 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
405 struct ieee80211_frame *wh;
407 /* mark previous frame */
408 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
409 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
411 /* re-sync buffer to memory */
412 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
413 BUS_DMASYNC_PREWRITE);
415 /* link descriptor */
416 ath_hal_settxdesclink(sc->sc_ah,
421 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
422 ath_tx_alq_post(sc, bf);
423 #endif /* ATH_DEBUG_ALQ */
424 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
429 * Handoff this frame to the hardware.
431 * For the multicast queue, this will treat it as a software queue
432 * and append it to the list, after updating the MORE_DATA flag
433 * in the previous frame. The cabq processing code will ensure
434 * that the queue contents gets transferred over.
436 * For the hardware queues, this will queue a frame to the queue
437 * like before, then populate the FIFO from that. Since the
438 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that
439 * frames such as management frames don't get prematurely dropped.
441 * This does imply that a similar flush-hwq-to-fifoq method will
442 * need to be called from the processq function, before the
443 * per-node software scheduler is called.
446 ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
450 DPRINTF(sc, ATH_DEBUG_XMIT_DESC,
451 "%s: called; bf=%p, txq=%p, qnum=%d\n",
457 if (txq->axq_qnum == ATH_TXQ_SWQ)
458 ath_edma_xmit_handoff_mcast(sc, txq, bf);
460 ath_edma_xmit_handoff_hw(sc, txq, bf);
464 ath_edma_setup_txfifo(struct ath_softc *sc, int qnum)
466 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
468 te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH,
469 M_ATHDEV, M_INTWAIT | M_ZERO);
470 if (te->m_fifo == NULL) {
471 device_printf(sc->sc_dev, "%s: malloc failed\n",
477 * Set initial "empty" state.
479 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0;
485 ath_edma_free_txfifo(struct ath_softc *sc, int qnum)
487 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
489 /* XXX TODO: actually deref the ath_buf entries? */
490 kfree(te->m_fifo, M_ATHDEV);
495 ath_edma_dma_txsetup(struct ath_softc *sc)
500 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma,
501 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE);
505 ath_hal_setuptxstatusring(sc->sc_ah,
506 (void *) sc->sc_txsdma.dd_desc,
507 sc->sc_txsdma.dd_desc_paddr,
508 ATH_TXSTATUS_RING_SIZE);
510 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
511 ath_edma_setup_txfifo(sc, i);
518 ath_edma_dma_txteardown(struct ath_softc *sc)
522 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
523 ath_edma_free_txfifo(sc, i);
526 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL);
531 * Drain all TXQs, potentially after completing the existing completed
535 ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
537 struct ifnet *ifp = sc->sc_ifp;
540 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
542 (void) ath_stoptxdma(sc);
545 * If reset type is noloss, the TX FIFO needs to be serviced
546 * and those frames need to be handled.
548 * Otherwise, just toss everything in each TX queue.
550 if (reset_type == ATH_RESET_NOLOSS) {
551 ath_edma_tx_processq(sc, 0);
552 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
553 if (ATH_TXQ_SETUP(sc, i)) {
554 ATH_TXQ_LOCK(&sc->sc_txq[i]);
556 * Free the holding buffer; DMA is now
559 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
561 * Reset the link pointer to NULL; there's
562 * no frames to chain DMA to.
564 sc->sc_txq[i].axq_link = NULL;
565 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
569 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
570 if (ATH_TXQ_SETUP(sc, i))
571 ath_tx_draintxq(sc, &sc->sc_txq[i]);
575 /* XXX dump out the TX completion FIFO contents */
577 /* XXX dump out the frames */
579 IF_LOCK(&ifp->if_snd);
580 #if defined(__DragonFly__)
581 ifq_clr_oactive(&ifp->if_snd);
583 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
585 IF_UNLOCK(&ifp->if_snd);
590 * TX completion tasklet.
594 ath_edma_tx_proc(void *arg, int npending)
596 struct ath_softc *sc = (struct ath_softc *) arg;
599 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
602 ath_edma_tx_processq(sc, 1);
606 * Process the TX status queue.
609 ath_edma_tx_processq(struct ath_softc *sc, int dosched)
611 struct ath_hal *ah = sc->sc_ah;
613 struct ath_tx_status ts;
616 struct ieee80211_node *ni;
622 uint32_t txstatus[32];
625 for (idx = 0; ; idx++) {
626 bzero(&ts, sizeof(ts));
628 ATH_TXSTATUS_LOCK(sc);
630 ath_hal_gettxrawtxdesc(ah, txstatus);
632 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
633 ATH_TXSTATUS_UNLOCK(sc);
635 if (status == HAL_EINPROGRESS)
639 if (sc->sc_debug & ATH_DEBUG_TX_PROC)
640 if (ts.ts_queue_id != sc->sc_bhalq)
641 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id,
642 idx, (status == HAL_OK));
646 * If there is an error with this descriptor, continue
649 * XXX TBD: log some statistics?
651 if (status == HAL_EIO) {
652 device_printf(sc->sc_dev, "%s: invalid TX status?\n",
657 #if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG)
658 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
659 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
662 #endif /* ATH_DEBUG_ALQ */
665 * At this point we have a valid status descriptor.
666 * The QID and descriptor ID (which currently isn't set)
667 * is part of the status.
669 * We then assume that the descriptor in question is the
670 * -head- of the given QID. Eventually we should verify
671 * this by using the descriptor ID.
675 * The beacon queue is not currently a "real" queue.
676 * Frames aren't pushed onto it and the lock isn't setup.
677 * So skip it for now; the beacon handling code will
678 * free and alloc more beacon buffers as appropriate.
680 if (ts.ts_queue_id == sc->sc_bhalq)
683 txq = &sc->sc_txq[ts.ts_queue_id];
686 bf = ATH_TXQ_FIRST(&txq->fifo);
689 * Work around the situation where I'm seeing notifications
690 * for Q1 when no frames are available. That needs to be
691 * debugged but not by crashing _here_.
694 device_printf(sc->sc_dev, "%s: Q%d: empty?\n",
701 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n",
704 !! (bf->bf_flags & ATH_BUF_FIFOPTR),
705 !! (bf->bf_flags & ATH_BUF_FIFOEND));
707 /* XXX TODO: actually output debugging info about this */
710 /* XXX assert the buffer/descriptor matches the status descid */
711 if (ts.ts_desc_id != bf->bf_descid) {
712 device_printf(sc->sc_dev,
713 "%s: mismatched descid (qid=%d, tsdescid=%d, "
722 /* This removes the buffer and decrements the queue depth */
723 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
724 if (bf->bf_state.bfs_aggr)
725 txq->axq_aggr_depth--;
728 * If this was the end of a FIFO set, decrement FIFO depth
730 if (bf->bf_flags & ATH_BUF_FIFOEND)
731 txq->axq_fifo_depth--;
734 * If this isn't the final buffer in a FIFO set, mark
735 * the buffer as busy so it goes onto the holding queue.
737 if (! (bf->bf_flags & ATH_BUF_FIFOEND))
738 bf->bf_flags |= ATH_BUF_BUSY;
740 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n",
744 txq->fifo.axq_depth);
746 /* XXX assert FIFO depth >= 0 */
750 * Outside of the TX lock - if the buffer is end
751 * end buffer in this FIFO, we don't need a holding
754 if (bf->bf_flags & ATH_BUF_FIFOEND) {
756 ath_txq_freeholdingbuf(sc, txq);
761 * First we need to make sure ts_rate is valid.
763 * Pre-EDMA chips pass the whole TX descriptor to
764 * the proctxdesc function which will then fill out
765 * ts_rate based on the ts_finaltsi (final TX index)
766 * in the TX descriptor. However the TX completion
767 * FIFO doesn't have this information. So here we
768 * do a separate HAL call to populate that information.
770 * The same problem exists with ts_longretry.
771 * The FreeBSD HAL corrects ts_longretry in the HAL layer;
772 * the AR9380 HAL currently doesn't. So until the HAL
773 * is imported and this can be added, we correct for it
777 /* XXX faked for now. Ew. */
778 if (ts.ts_finaltsi < 4) {
780 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
781 switch (ts.ts_finaltsi) {
782 case 3: ts.ts_longretry +=
783 bf->bf_state.bfs_rc[2].tries;
784 case 2: ts.ts_longretry +=
785 bf->bf_state.bfs_rc[1].tries;
786 case 1: ts.ts_longretry +=
787 bf->bf_state.bfs_rc[0].tries;
790 device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
793 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode;
797 * XXX This is terrible.
799 * Right now, some code uses the TX status that is
800 * passed in here, but the completion handlers in the
801 * software TX path also use bf_status.ds_txstat.
802 * Ew. That should all go away.
804 * XXX It's also possible the rate control completion
805 * routine is called twice.
807 memcpy(&bf->bf_status, &ts, sizeof(ts));
812 /* XXX duplicate from ath_tx_processq */
813 if (ni != NULL && ts.ts_status == 0 &&
814 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
816 sc->sc_stats.ast_tx_rssi = ts.ts_rssi;
817 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
821 /* Handle frame completion and rate control update */
822 ath_tx_process_buf_completion(sc, txq, &ts, bf);
824 /* bf is invalid at this point */
827 * Now that there's space in the FIFO, let's push some
828 * more frames into it.
832 ath_edma_tx_fifo_fill(sc, txq);
839 IF_LOCK(&sc->sc_ifp->if_snd);
840 #if defined(__DragonFly__)
841 ifq_clr_oactive(&sc->sc_ifp->if_snd);
843 sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
845 IF_UNLOCK(&sc->sc_ifp->if_snd);
848 /* Kick software scheduler */
850 * XXX It's inefficient to do this if the FIFO queue is full,
851 * but there's no easy way right now to only populate
852 * the txq task for _one_ TXQ. This should be fixed.
859 ath_edma_attach_comp_func(struct ath_softc *sc)
862 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc);
866 ath_xmit_setup_edma(struct ath_softc *sc)
869 /* Fetch EDMA field and buffer sizes */
870 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen);
871 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
872 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
875 device_printf(sc->sc_dev, "TX descriptor length: %d\n",
877 device_printf(sc->sc_dev, "TX status length: %d\n",
878 sc->sc_tx_statuslen);
879 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
883 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
884 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
885 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func;
887 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart;
888 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff;
889 sc->sc_tx.xmit_drain = ath_edma_tx_drain;