2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
30 #include <sys/cdefs.h>
33 * Driver for the Atheros Wireless LAN controller.
35 * This software is derived from work of Atsushi Onoe; his contribution
36 * is greatly appreciated.
42 * This is needed for register operations which are performed
43 * by the driver - eg, calls to ath_hal_gettsf32().
45 * It's also required for any AH_DEBUG checks in here, eg the
46 * module dependencies.
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysctl.h>
55 #include <sys/malloc.h>
57 #include <sys/mutex.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
68 #include <sys/module.h>
70 #include <machine/pmap.h>
73 #include <net/if_var.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_types.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_llc.h>
80 #include <net/ifq_var.h>
82 #include <netproto/802_11/ieee80211_var.h>
83 #include <netproto/802_11/ieee80211_regdomain.h>
84 #ifdef IEEE80211_SUPPORT_SUPERG
85 #include <netproto/802_11/ieee80211_superg.h>
87 #ifdef IEEE80211_SUPPORT_TDMA
88 #include <netproto/802_11/ieee80211_tdma.h>
94 #include <netinet/in.h>
95 #include <netinet/if_ether.h>
98 #include <dev/netif/ath/ath/if_athvar.h>
99 #include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
100 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
102 #include <dev/netif/ath/ath/if_ath_debug.h>
103 #include <dev/netif/ath/ath/if_ath_misc.h>
104 #include <dev/netif/ath/ath/if_ath_tsf.h>
105 #include <dev/netif/ath/ath/if_ath_tx.h>
106 #include <dev/netif/ath/ath/if_ath_sysctl.h>
107 #include <dev/netif/ath/ath/if_ath_led.h>
108 #include <dev/netif/ath/ath/if_ath_keycache.h>
109 #include <dev/netif/ath/ath/if_ath_rx.h>
110 #include <dev/netif/ath/ath/if_ath_beacon.h>
111 #include <dev/netif/ath/ath/if_athdfs.h>
114 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
117 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
120 #include <dev/netif/ath/ath/if_ath_alq.h>
124 * some general macros
126 #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
127 #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
129 MALLOC_DECLARE(M_ATHDEV);
134 * + Make sure the FIFO is correctly flushed and reinitialised
136 * + Verify multi-descriptor frames work!
137 * + There's a "memory use after free" which needs to be tracked down
138 * and fixed ASAP. I've seen this in the legacy path too, so it
139 * may be a generic RX path issue.
143 * XXX shuffle the function orders so these pre-declarations aren't
146 static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
148 static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
149 static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
150 static int ath_edma_recv_proc_queue(struct ath_softc *sc,
151 HAL_RX_QUEUE qtype, int dosched);
152 static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
153 HAL_RX_QUEUE qtype, int dosched);
156 ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
158 struct ath_hal *ah = sc->sc_ah;
161 ath_hal_stoppcurecv(ah);
162 ath_hal_setrxfilter(ah, 0);
163 ath_hal_stopdmarecv(ah);
167 /* Flush RX pending for each queue */
168 /* XXX should generic-ify this */
169 if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
170 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
171 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
174 if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
175 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
176 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
182 * Re-initialise the FIFO given the current buffer contents.
183 * Specifically, walk from head -> tail, pushing the FIFO contents
184 * back into the FIFO.
187 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
189 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
193 ATH_RX_LOCK_ASSERT(sc);
196 for (j = 0; j < re->m_fifo_depth; j++) {
198 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
199 "%s: Q%d: pos=%i, addr=0x%jx\n",
203 (uintmax_t)bf->bf_daddr);
204 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
205 INCR(i, re->m_fifolen);
208 /* Ensure this worked out right */
209 if (i != re->m_fifo_tail) {
210 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
218 ath_edma_handle_rxfifo_reset(struct ath_softc *sc)
220 if (sc->sc_rxfifo_state == ATH_RXFIFO_RESET) {
221 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
222 "%s: Re-initing HP FIFO\n", __func__);
223 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
224 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
225 "%s: Re-initing LP FIFO\n", __func__);
226 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
227 sc->sc_rxfifo_state = ATH_RXFIFO_OK;
234 * XXX TODO: this needs to reallocate the FIFO entries when a reset
235 * occurs, in case the FIFO is filled up and no new descriptors get
236 * thrown into the FIFO.
239 ath_edma_startrecv(struct ath_softc *sc)
241 struct ath_hal *ah = sc->sc_ah;
247 ath_edma_handle_rxfifo_reset(sc);
249 /* Add up to m_fifolen entries in each queue */
251 * These must occur after the above write so the FIFO buffers
252 * are pushed/tracked in the same order as the hardware will
255 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
256 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
258 ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
259 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
262 ath_hal_startpcurecv(ah);
270 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
273 ath_power_set_power_state(sc, HAL_PM_AWAKE);
275 ath_edma_handle_rxfifo_reset(sc);
276 ath_edma_recv_proc_queue(sc, qtype, dosched);
277 ath_power_restore_power_state(sc);
279 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
283 ath_edma_recv_sched(struct ath_softc *sc, int dosched)
285 ath_power_set_power_state(sc, HAL_PM_AWAKE);
287 ath_edma_handle_rxfifo_reset(sc);
288 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
289 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
290 ath_power_restore_power_state(sc);
292 taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
296 ath_edma_recv_flush(struct ath_softc *sc)
299 DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
305 ath_power_set_power_state(sc, HAL_PM_AWAKE);
308 * Flush any active frames from FIFO -> deferred list
310 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
311 ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
314 * Process what's in the deferred queue
317 * XXX: If we read the tsf/channoise here and then pass it in,
318 * we could restore the power state before processing
319 * the deferred queue.
321 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
322 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
324 ath_power_restore_power_state(sc);
332 * Process frames from the current queue into the deferred queue.
335 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
338 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
339 struct ath_rx_status *rs;
343 struct ath_hal *ah = sc->sc_ah;
349 tsf = ath_hal_gettsf64(ah);
350 nf = ath_hal_getchannoise(ah, sc->sc_curchan);
351 sc->sc_stats.ast_rx_noise = nf;
356 bf = re->m_fifo[re->m_fifo_head];
357 /* This shouldn't occur! */
359 device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
368 * Sync descriptor memory - this also syncs the buffer for us.
369 * EDMA descriptors are in cached memory.
372 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
373 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
374 rs = &bf->bf_status.ds_rxstat;
375 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
378 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
379 ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
380 #endif /* ATH_DEBUG */
382 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
383 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
384 sc->sc_rx_statuslen, (char *) ds);
385 #endif /* ATH_DEBUG */
386 if (bf->bf_rxstatus == HAL_EINPROGRESS) {
387 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
388 "%s: Q%d: still in-prog!\n", __func__, qtype);
393 * Completed descriptor.
395 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
396 "%s: Q%d: completed!\n", __func__, qtype);
400 * We've been synced already, so unmap.
402 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
405 * Remove the FIFO entry and place it on the completion
408 re->m_fifo[re->m_fifo_head] = NULL;
409 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
411 /* Bump the descriptor FIFO stats */
412 INCR(re->m_fifo_head, re->m_fifolen);
414 /* XXX check it doesn't fall below 0 */
415 } while (re->m_fifo_depth > 0);
417 /* Append some more fresh frames to the FIFO */
418 /* (kick already handled when dosched != 0) */
420 n = ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
424 /* rx signal state monitoring */
425 ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
427 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
428 "ath edma rx proc: npkts=%d\n",
435 * Flush the deferred queue.
437 * This destructively flushes the deferred queue - it doesn't
438 * call the wireless stack on each mbuf.
441 ath_edma_flush_deferred_queue(struct ath_softc *sc)
445 ATH_RX_LOCK_ASSERT(sc);
447 /* Free in one set, inside the lock */
448 while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) != NULL) {
449 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list);
450 ath_edma_rxbuf_free(sc, bf);
452 while ((bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) != NULL) {
453 TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list);
454 ath_edma_rxbuf_free(sc, bf);
459 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
465 struct ath_buf *next;
466 struct ath_rx_status *rs;
473 nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
475 * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
476 * otherwise we may end up adding in the wrong values if this
477 * is delayed too far..
479 tsf = ath_hal_gettsf64(sc->sc_ah);
481 /* Copy the list over */
483 TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
486 /* Handle the completed descriptors */
487 TAILQ_FOREACH_MUTABLE(bf, &rxlist, bf_list, next) {
489 * Skip the RX descriptor status - start at the data offset
491 m_adj(bf->bf_m, sc->sc_rx_statuslen);
493 /* Handle the frame */
495 rs = &bf->bf_status.ds_rxstat;
498 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
506 ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
507 "ath edma rx deferred proc: ngood=%d\n",
510 /* Free in one set, inside the lock */
513 while ((bf = TAILQ_FIRST(&rxlist)) != NULL) {
514 /* Free the buffer/mbuf */
515 TAILQ_REMOVE(&rxlist, bf, bf_list);
516 ath_edma_rxbuf_free(sc, bf);
524 ath_edma_recv_tasklet(void *arg, int npending)
526 struct ath_softc *sc = (struct ath_softc *) arg;
527 struct ifnet *ifp = sc->sc_ifp;
528 #ifdef IEEE80211_SUPPORT_SUPERG
529 struct ieee80211com *ic = ifp->if_l2com;
534 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
538 wlan_serialize_enter();
540 if (sc->sc_inreset_cnt > 0) {
541 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
544 wlan_serialize_exit();
550 ath_power_set_power_state(sc, HAL_PM_AWAKE);
552 ath_edma_handle_rxfifo_reset(sc);
553 n1 = ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
554 n2 = ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
556 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
557 ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
559 /* Handle resched and kickpcu appropriately */
561 if (sc->sc_kickpcu) {
563 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
564 kprintf("k(%d,%d)", n1, n2);
567 /* reload imask XXX */
569 ath_hal_intrset(sc->sc_ah, sc->sc_imask);
574 * XXX: If we read the tsf/channoise here and then pass it in,
575 * we could restore the power state before processing
576 * the deferred queue.
578 ath_power_restore_power_state(sc);
580 /* XXX inside IF_LOCK ? */
581 if (!ifq_is_oactive(&ifp->if_snd)) {
582 #ifdef IEEE80211_SUPPORT_SUPERG
583 ieee80211_ff_age_all(ic, 100);
585 if (!ifq_is_empty(&ifp->if_snd))
588 if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
589 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
594 wlan_serialize_exit();
598 * Allocate an RX mbuf for the given ath_buf and initialise
601 * + Allocate a 4KB mbuf;
602 * + Setup the DMA map for the given buffer;
606 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
613 ATH_RX_LOCK_ASSERT(sc);
615 m = m_getjcl(MB_DONTWAIT, MT_DATA, M_PKTHDR, sc->sc_edma_bufsize);
616 /* m = m_getcl(MB_WAIT, MT_DATA, M_PKTHDR);*/
617 /* m = m_getm(NULL, sc->sc_edma_bufsize, MB_WAIT, MT_DATA);*/
619 return (ENOBUFS); /* XXX ?*/
621 /* XXX warn/enforce alignment */
623 len = m->m_ext.ext_size;
625 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
632 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
635 * Populate ath_buf fields.
637 KKASSERT(m->m_len >= sc->sc_rx_statuslen);
638 bf->bf_desc = mtod(m, struct ath_desc *);
639 bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */
643 * Zero the descriptor and ensure it makes it out to the
644 * bounce buffer if one is required.
646 * XXX PREWRITE will copy the whole buffer; we only needed it
647 * to sync the first 32 DWORDS. Oh well.
649 memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
652 * Create DMA mapping.
654 error = bus_dmamap_load_mbuf_segment(sc->sc_dmat,
655 bf->bf_dmamap, m, bf->bf_segs, 1, &bf->bf_nseg, BUS_DMA_NOWAIT);
658 device_printf(sc->sc_dev, "%s: failed; error=%d\n",
666 * Set daddr to the physical mapping page.
668 bf->bf_daddr = bf->bf_segs[0].ds_addr;
671 * Prepare for the upcoming read.
673 * We need to both sync some data into the buffer (the zero'ed
674 * descriptor payload) and also prepare for the read that's going
677 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
685 * Allocate a RX buffer.
687 static struct ath_buf *
688 ath_edma_rxbuf_alloc(struct ath_softc *sc)
693 ATH_RX_LOCK_ASSERT(sc);
695 /* Allocate buffer */
696 bf = TAILQ_FIRST(&sc->sc_rxbuf);
697 /* XXX shouldn't happen upon startup? */
699 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
704 /* Remove it from the free list */
705 TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
707 /* Assign RX mbuf to it */
708 error = ath_edma_rxbuf_init(sc, bf);
710 device_printf(sc->sc_dev,
711 "%s: bf=%p, rxbuf alloc failed! error=%d\n",
715 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
723 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
726 ATH_RX_LOCK_ASSERT(sc);
729 * Only unload the frame if we haven't consumed
730 * the mbuf via ath_rx_pkt().
733 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
739 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
743 * Allocate up to 'n' entries and push them onto the hardware FIFO.
745 * Return how many entries were successfully pushed onto the
749 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
751 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
755 ATH_RX_LOCK_ASSERT(sc);
758 * Allocate buffers until the FIFO is full or nbufs is reached.
760 for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
761 /* Ensure the FIFO is already blank, complain loudly! */
762 if (re->m_fifo[re->m_fifo_tail] != NULL) {
763 device_printf(sc->sc_dev,
764 "%s: Q%d: fifo[%d] != NULL (%p)\n",
768 re->m_fifo[re->m_fifo_tail]);
771 ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
773 /* XXX check it's not < 0 */
774 re->m_fifo[re->m_fifo_tail] = NULL;
777 bf = ath_edma_rxbuf_alloc(sc);
778 /* XXX should ensure the FIFO is not NULL? */
780 device_printf(sc->sc_dev,
781 "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
789 re->m_fifo[re->m_fifo_tail] = bf;
791 /* Write to the RX FIFO */
792 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
793 "%s: Q%d: putrxbuf=%p (0x%jx)\n",
797 (uintmax_t) bf->bf_daddr);
798 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
801 INCR(re->m_fifo_tail, re->m_fifolen);
805 * Return how many were allocated.
807 DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
816 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
818 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
821 ATH_RX_LOCK_ASSERT(sc);
823 for (i = 0; i < re->m_fifolen; i++) {
824 if (re->m_fifo[i] != NULL) {
826 struct ath_buf *bf = re->m_fifo[i];
828 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
829 ath_printrxbuf(sc, bf, 0, HAL_OK);
831 ath_edma_rxbuf_free(sc, re->m_fifo[i]);
832 re->m_fifo[i] = NULL;
837 if (re->m_rxpending != NULL) {
838 m_freem(re->m_rxpending);
839 re->m_rxpending = NULL;
841 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
847 * Setup the initial RX FIFO structure.
850 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
852 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
854 ATH_RX_LOCK_ASSERT(sc);
856 if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
857 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
862 device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n",
867 /* Allocate ath_buf FIFO array, pre-zero'ed */
868 re->m_fifo = kmalloc(sizeof(struct ath_buf *) * re->m_fifolen,
871 if (re->m_fifo == NULL) {
872 device_printf(sc->sc_dev, "%s: malloc failed\n",
878 * Set initial "empty" state.
880 re->m_rxpending = NULL;
881 re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
887 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
889 struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
891 device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
895 kfree(re->m_fifo, M_ATHDEV);
901 ath_edma_dma_rxsetup(struct ath_softc *sc)
906 * Create RX DMA tag and buffers.
908 error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
909 "rx", ath_rxbuf, sc->sc_rx_statuslen);
914 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
915 (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
922 ath_edma_dma_rxteardown(struct ath_softc *sc)
926 ath_edma_flush_deferred_queue(sc);
927 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
928 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
930 ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
931 ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
934 /* Free RX ath_buf */
935 /* Free RX DMA tag */
936 if (sc->sc_rxdma.dd_desc_len != 0)
937 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
943 ath_recv_setup_edma(struct ath_softc *sc)
946 /* Set buffer size to 4k */
947 sc->sc_edma_bufsize = 4096;
949 /* Fetch EDMA field and buffer sizes */
950 (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
952 /* Configure the hardware with the RX buffer size */
953 (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
954 sc->sc_rx_statuslen);
956 device_printf(sc->sc_dev, "RX status length: %d\n",
957 sc->sc_rx_statuslen);
958 device_printf(sc->sc_dev, "RX buffer size: %d\n",
959 sc->sc_edma_bufsize);
961 sc->sc_rx.recv_stop = ath_edma_stoprecv;
962 sc->sc_rx.recv_start = ath_edma_startrecv;
963 sc->sc_rx.recv_flush = ath_edma_recv_flush;
964 sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
965 sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
967 sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
968 sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
970 sc->sc_rx.recv_sched = ath_edma_recv_sched;
971 sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;