ath - Basic #include adjustments
[dragonfly.git] / sys / dev / netif / ath / ath / if_ath_rx_edma.c
1 /*-
2  * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29
30 #include <sys/cdefs.h>
31
32 /*
33  * Driver for the Atheros Wireless LAN controller.
34  *
35  * This software is derived from work of Atsushi Onoe; his contribution
36  * is greatly appreciated.
37  */
38
39 #include "opt_inet.h"
40 #include "opt_ath.h"
41 /*
42  * This is needed for register operations which are performed
43  * by the driver - eg, calls to ath_hal_gettsf32().
44  *
45  * It's also required for any AH_DEBUG checks in here, eg the
46  * module dependencies.
47  */
48 #include "opt_ah.h"
49 #include "opt_wlan.h"
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/sysctl.h>
54 #include <sys/mbuf.h>
55 #include <sys/malloc.h>
56 #include <sys/lock.h>
57 #include <sys/mutex.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/errno.h>
62 #include <sys/callout.h>
63 #include <sys/bus.h>
64 #include <sys/endian.h>
65 #include <sys/kthread.h>
66 #include <sys/taskqueue.h>
67 #include <sys/priv.h>
68 #include <sys/module.h>
69 #include <sys/ktr.h>
70
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_types.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_llc.h>
79
80 #include <netproto/802_11/ieee80211_var.h>
81 #include <netproto/802_11/ieee80211_regdomain.h>
82 #ifdef IEEE80211_SUPPORT_SUPERG
83 #include <netproto/802_11/ieee80211_superg.h>
84 #endif
85 #ifdef IEEE80211_SUPPORT_TDMA
86 #include <netproto/802_11/ieee80211_tdma.h>
87 #endif
88
89 #include <net/bpf.h>
90
91 #ifdef INET
92 #include <netinet/in.h>
93 #include <netinet/if_ether.h>
94 #endif
95
96 #include <dev/netif/ath/ath/if_athvar.h>
97 #include <dev/netif/ath/ath_hal/ah_devid.h>             /* XXX for softled */
98 #include <dev/netif/ath/ath_hal/ah_diagcodes.h>
99
100 #include <dev/netif/ath/ath/if_ath_debug.h>
101 #include <dev/netif/ath/ath/if_ath_misc.h>
102 #include <dev/netif/ath/ath/if_ath_tsf.h>
103 #include <dev/netif/ath/ath/if_ath_tx.h>
104 #include <dev/netif/ath/ath/if_ath_sysctl.h>
105 #include <dev/netif/ath/ath/if_ath_led.h>
106 #include <dev/netif/ath/ath/if_ath_keycache.h>
107 #include <dev/netif/ath/ath/if_ath_rx.h>
108 #include <dev/netif/ath/ath/if_ath_beacon.h>
109 #include <dev/netif/ath/ath/if_athdfs.h>
110
111 #ifdef ATH_TX99_DIAG
112 #include <dev/netif/ath/ath_tx99/ath_tx99.h>
113 #endif
114
115 #include <dev/netif/ath/ath/if_ath_rx_edma.h>
116
117 #ifdef  ATH_DEBUG_ALQ
118 #include <dev/netif/ath/ath/if_ath_alq.h>
119 #endif
120
121 /*
122  * some general macros
123   */
124 #define INCR(_l, _sz)           (_l) ++; (_l) &= ((_sz) - 1)
125 #define DECR(_l, _sz)           (_l) --; (_l) &= ((_sz) - 1)
126
127 MALLOC_DECLARE(M_ATHDEV);
128
129 /*
130  * XXX TODO:
131  *
132  * + Make sure the FIFO is correctly flushed and reinitialised
133  *   through a reset;
134  * + Verify multi-descriptor frames work!
135  * + There's a "memory use after free" which needs to be tracked down
136  *   and fixed ASAP.  I've seen this in the legacy path too, so it
137  *   may be a generic RX path issue.
138  */
139
140 /*
141  * XXX shuffle the function orders so these pre-declarations aren't
142  * required!
143  */
144 static  int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype,
145             int nbufs);
146 static  int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype);
147 static  void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf);
148 static  void ath_edma_recv_proc_queue(struct ath_softc *sc,
149             HAL_RX_QUEUE qtype, int dosched);
150 static  int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc,
151             HAL_RX_QUEUE qtype, int dosched);
152
153 static void
154 ath_edma_stoprecv(struct ath_softc *sc, int dodelay)
155 {
156         struct ath_hal *ah = sc->sc_ah;
157
158         ATH_RX_LOCK(sc);
159         ath_hal_stoppcurecv(ah);
160         ath_hal_setrxfilter(ah, 0);
161         ath_hal_stopdmarecv(ah);
162
163         DELAY(3000);
164
165         /* Flush RX pending for each queue */
166         /* XXX should generic-ify this */
167         if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) {
168                 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
169                 sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
170         }
171
172         if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) {
173                 m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
174                 sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
175         }
176         ATH_RX_UNLOCK(sc);
177 }
178
179 /*
180  * Re-initialise the FIFO given the current buffer contents.
181  * Specifically, walk from head -> tail, pushing the FIFO contents
182  * back into the FIFO.
183  */
184 static void
185 ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
186 {
187         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
188         struct ath_buf *bf;
189         int i, j;
190
191         ATH_RX_LOCK_ASSERT(sc);
192
193         i = re->m_fifo_head;
194         for (j = 0; j < re->m_fifo_depth; j++) {
195                 bf = re->m_fifo[i];
196                 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
197                     "%s: Q%d: pos=%i, addr=0x%jx\n",
198                     __func__,
199                     qtype,
200                     i,
201                     (uintmax_t)bf->bf_daddr);
202                 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
203                 INCR(i, re->m_fifolen);
204         }
205
206         /* Ensure this worked out right */
207         if (i != re->m_fifo_tail) {
208                 device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n",
209                     __func__,
210                     i,
211                     re->m_fifo_tail);
212         }
213 }
214
215 /*
216  * Start receive.
217  *
218  * XXX TODO: this needs to reallocate the FIFO entries when a reset
219  * occurs, in case the FIFO is filled up and no new descriptors get
220  * thrown into the FIFO.
221  */
222 static int
223 ath_edma_startrecv(struct ath_softc *sc)
224 {
225         struct ath_hal *ah = sc->sc_ah;
226
227         ATH_RX_LOCK(sc);
228
229         /* Enable RX FIFO */
230         ath_hal_rxena(ah);
231
232         /*
233          * Entries should only be written out if the
234          * FIFO is empty.
235          *
236          * XXX This isn't correct. I should be looking
237          * at the value of AR_RXDP_SIZE (0x0070) to determine
238          * how many entries are in here.
239          *
240          * A warm reset will clear the registers but not the FIFO.
241          *
242          * And I believe this is actually the address of the last
243          * handled buffer rather than the current FIFO pointer.
244          * So if no frames have been (yet) seen, we'll reinit the
245          * FIFO.
246          *
247          * I'll chase that up at some point.
248          */
249         if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_HP) == 0) {
250                 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
251                     "%s: Re-initing HP FIFO\n", __func__);
252                 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP);
253         }
254         if (ath_hal_getrxbuf(sc->sc_ah, HAL_RX_QUEUE_LP) == 0) {
255                 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
256                     "%s: Re-initing LP FIFO\n", __func__);
257                 ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP);
258         }
259
260         /* Add up to m_fifolen entries in each queue */
261         /*
262          * These must occur after the above write so the FIFO buffers
263          * are pushed/tracked in the same order as the hardware will
264          * process them.
265          */
266         ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP,
267             sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen);
268
269         ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP,
270             sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen);
271
272         ath_mode_init(sc);
273         ath_hal_startpcurecv(ah);
274
275         ATH_RX_UNLOCK(sc);
276
277         return (0);
278 }
279
280 static void
281 ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
282     int dosched)
283 {
284
285         ath_edma_recv_proc_queue(sc, qtype, dosched);
286         taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
287 }
288
289 static void
290 ath_edma_recv_sched(struct ath_softc *sc, int dosched)
291 {
292
293         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched);
294         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched);
295         taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
296 }
297
298 static void
299 ath_edma_recv_flush(struct ath_softc *sc)
300 {
301
302         DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
303
304         ATH_PCU_LOCK(sc);
305         sc->sc_rxproc_cnt++;
306         ATH_PCU_UNLOCK(sc);
307
308         /*
309          * Flush any active frames from FIFO -> deferred list
310          */
311         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0);
312         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0);
313
314         /*
315          * Process what's in the deferred queue
316          */
317         ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0);
318         ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0);
319
320         ATH_PCU_LOCK(sc);
321         sc->sc_rxproc_cnt--;
322         ATH_PCU_UNLOCK(sc);
323 }
324
325 /*
326  * Process frames from the current queue into the deferred queue.
327  */
328 static void
329 ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
330     int dosched)
331 {
332         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
333         struct ath_rx_status *rs;
334         struct ath_desc *ds;
335         struct ath_buf *bf;
336         struct mbuf *m;
337         struct ath_hal *ah = sc->sc_ah;
338         uint64_t tsf;
339         uint16_t nf;
340         int npkts = 0;
341
342         tsf = ath_hal_gettsf64(ah);
343         nf = ath_hal_getchannoise(ah, sc->sc_curchan);
344         sc->sc_stats.ast_rx_noise = nf;
345
346         ATH_RX_LOCK(sc);
347
348         do {
349                 bf = re->m_fifo[re->m_fifo_head];
350                 /* This shouldn't occur! */
351                 if (bf == NULL) {
352                         device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n",
353                             __func__,
354                             qtype);
355                         break;
356                 }
357                 m = bf->bf_m;
358                 ds = bf->bf_desc;
359
360                 /*
361                  * Sync descriptor memory - this also syncs the buffer for us.
362                  * EDMA descriptors are in cached memory.
363                  */
364                 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
365                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
366                 rs = &bf->bf_status.ds_rxstat;
367                 bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr,
368                     NULL, rs);
369 #ifdef  ATH_DEBUG
370                 if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
371                         ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
372 #endif /* ATH_DEBUG */
373 #ifdef  ATH_DEBUG_ALQ
374                 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
375                         if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
376                             sc->sc_rx_statuslen, (char *) ds);
377 #endif /* ATH_DEBUG */
378                 if (bf->bf_rxstatus == HAL_EINPROGRESS)
379                         break;
380
381                 /*
382                  * Completed descriptor.
383                  */
384                 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
385                     "%s: Q%d: completed!\n", __func__, qtype);
386                 npkts++;
387
388                 /*
389                  * We've been synced already, so unmap.
390                  */
391                 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
392
393                 /*
394                  * Remove the FIFO entry and place it on the completion
395                  * queue.
396                  */
397                 re->m_fifo[re->m_fifo_head] = NULL;
398                 TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list);
399
400                 /* Bump the descriptor FIFO stats */
401                 INCR(re->m_fifo_head, re->m_fifolen);
402                 re->m_fifo_depth--;
403                 /* XXX check it doesn't fall below 0 */
404         } while (re->m_fifo_depth > 0);
405
406         /* Append some more fresh frames to the FIFO */
407         if (dosched)
408                 ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen);
409
410         ATH_RX_UNLOCK(sc);
411
412         /* rx signal state monitoring */
413         ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
414
415         ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
416             "ath edma rx proc: npkts=%d\n",
417             npkts);
418
419         /* Handle resched and kickpcu appropriately */
420         ATH_PCU_LOCK(sc);
421         if (dosched && sc->sc_kickpcu) {
422                 ATH_KTR(sc, ATH_KTR_ERROR, 0,
423                     "ath_edma_recv_proc_queue(): kickpcu");
424                 if (npkts > 0)
425                         device_printf(sc->sc_dev,
426                             "%s: handled npkts %d\n",
427                             __func__, npkts);
428
429                 /*
430                  * XXX TODO: what should occur here? Just re-poke and
431                  * re-enable the RX FIFO?
432                  */
433                 sc->sc_kickpcu = 0;
434         }
435         ATH_PCU_UNLOCK(sc);
436
437         return;
438 }
439
440 /*
441  * Flush the deferred queue.
442  *
443  * This destructively flushes the deferred queue - it doesn't
444  * call the wireless stack on each mbuf.
445  */
446 static void
447 ath_edma_flush_deferred_queue(struct ath_softc *sc)
448 {
449         struct ath_buf *bf, *next;
450
451         ATH_RX_LOCK_ASSERT(sc);
452
453         /* Free in one set, inside the lock */
454         TAILQ_FOREACH_SAFE(bf,
455             &sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf_list, next) {
456                 /* Free the buffer/mbuf */
457                 ath_edma_rxbuf_free(sc, bf);
458         }
459         TAILQ_FOREACH_SAFE(bf,
460             &sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf_list, next) {
461                 /* Free the buffer/mbuf */
462                 ath_edma_rxbuf_free(sc, bf);
463         }
464 }
465
466 static int
467 ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
468     int dosched)
469 {
470         int ngood = 0;
471         uint64_t tsf;
472         struct ath_buf *bf, *next;
473         struct ath_rx_status *rs;
474         int16_t nf;
475         ath_bufhead rxlist;
476         struct mbuf *m;
477
478         TAILQ_INIT(&rxlist);
479
480         nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan);
481         /*
482          * XXX TODO: the NF/TSF should be stamped on the bufs themselves,
483          * otherwise we may end up adding in the wrong values if this
484          * is delayed too far..
485          */
486         tsf = ath_hal_gettsf64(sc->sc_ah);
487
488         /* Copy the list over */
489         ATH_RX_LOCK(sc);
490         TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list);
491         ATH_RX_UNLOCK(sc);
492
493         /* Handle the completed descriptors */
494         TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
495                 /*
496                  * Skip the RX descriptor status - start at the data offset
497                  */
498                 m_adj(bf->bf_m, sc->sc_rx_statuslen);
499
500                 /* Handle the frame */
501
502                 rs = &bf->bf_status.ds_rxstat;
503                 m = bf->bf_m;
504                 bf->bf_m = NULL;
505                 if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m))
506                         ngood++;
507         }
508
509         if (ngood) {
510                 sc->sc_lastrx = tsf;
511         }
512
513         ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1,
514             "ath edma rx deferred proc: ngood=%d\n",
515             ngood);
516
517         /* Free in one set, inside the lock */
518         ATH_RX_LOCK(sc);
519         TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) {
520                 /* Free the buffer/mbuf */
521                 ath_edma_rxbuf_free(sc, bf);
522         }
523         ATH_RX_UNLOCK(sc);
524
525         return (ngood);
526 }
527
528 static void
529 ath_edma_recv_tasklet(void *arg, int npending)
530 {
531         struct ath_softc *sc = (struct ath_softc *) arg;
532         struct ifnet *ifp = sc->sc_ifp;
533 #ifdef  IEEE80211_SUPPORT_SUPERG
534         struct ieee80211com *ic = ifp->if_l2com;
535 #endif
536
537         DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n",
538             __func__,
539             npending);
540
541         ATH_PCU_LOCK(sc);
542         if (sc->sc_inreset_cnt > 0) {
543                 device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n",
544                     __func__);
545                 ATH_PCU_UNLOCK(sc);
546                 return;
547         }
548         sc->sc_rxproc_cnt++;
549         ATH_PCU_UNLOCK(sc);
550
551         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1);
552         ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1);
553
554         ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1);
555         ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1);
556
557         /* XXX inside IF_LOCK ? */
558         if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
559 #ifdef  IEEE80211_SUPPORT_SUPERG
560                 ieee80211_ff_age_all(ic, 100);
561 #endif
562                 if (! IFQ_IS_EMPTY(&ifp->if_snd))
563                         ath_tx_kick(sc);
564         }
565         if (ath_dfs_tasklet_needed(sc, sc->sc_curchan))
566                 taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
567
568         ATH_PCU_LOCK(sc);
569         sc->sc_rxproc_cnt--;
570         ATH_PCU_UNLOCK(sc);
571 }
572
573 /*
574  * Allocate an RX mbuf for the given ath_buf and initialise
575  * it for EDMA.
576  *
577  * + Allocate a 4KB mbuf;
578  * + Setup the DMA map for the given buffer;
579  * + Return that.
580  */
581 static int
582 ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
583 {
584
585         struct mbuf *m;
586         int error;
587         int len;
588
589         ATH_RX_LOCK_ASSERT(sc);
590
591         m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
592         if (! m)
593                 return (ENOBUFS);               /* XXX ?*/
594
595         /* XXX warn/enforce alignment */
596
597         len = m->m_ext.ext_size;
598 #if 0
599         device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n",
600             __func__,
601             m,
602             len,
603             mtod(m, char *));
604 #endif
605
606         m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
607
608         /*
609          * Populate ath_buf fields.
610          */
611         bf->bf_desc = mtod(m, struct ath_desc *);
612         bf->bf_lastds = bf->bf_desc;    /* XXX only really for TX? */
613         bf->bf_m = m;
614
615         /*
616          * Zero the descriptor and ensure it makes it out to the
617          * bounce buffer if one is required.
618          *
619          * XXX PREWRITE will copy the whole buffer; we only needed it
620          * to sync the first 32 DWORDS.  Oh well.
621          */
622         memset(bf->bf_desc, '\0', sc->sc_rx_statuslen);
623
624         /*
625          * Create DMA mapping.
626          */
627         error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
628             bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT);
629
630         if (error != 0) {
631                 device_printf(sc->sc_dev, "%s: failed; error=%d\n",
632                     __func__,
633                     error);
634                 m_freem(m);
635                 return (error);
636         }
637
638         /*
639          * Set daddr to the physical mapping page.
640          */
641         bf->bf_daddr = bf->bf_segs[0].ds_addr;
642
643         /*
644          * Prepare for the upcoming read.
645          *
646          * We need to both sync some data into the buffer (the zero'ed
647          * descriptor payload) and also prepare for the read that's going
648          * to occur.
649          */
650         bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
651             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
652
653         /* Finish! */
654         return (0);
655 }
656
657 /*
658  * Allocate a RX buffer.
659  */
660 static struct ath_buf *
661 ath_edma_rxbuf_alloc(struct ath_softc *sc)
662 {
663         struct ath_buf *bf;
664         int error;
665
666         ATH_RX_LOCK_ASSERT(sc);
667
668         /* Allocate buffer */
669         bf = TAILQ_FIRST(&sc->sc_rxbuf);
670         /* XXX shouldn't happen upon startup? */
671         if (bf == NULL) {
672                 device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n",
673                     __func__);
674                 return (NULL);
675         }
676
677         /* Remove it from the free list */
678         TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
679
680         /* Assign RX mbuf to it */
681         error = ath_edma_rxbuf_init(sc, bf);
682         if (error != 0) {
683                 device_printf(sc->sc_dev,
684                     "%s: bf=%p, rxbuf alloc failed! error=%d\n",
685                     __func__,
686                     bf,
687                     error);
688                 TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
689                 return (NULL);
690         }
691
692         return (bf);
693 }
694
695 static void
696 ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf)
697 {
698
699         ATH_RX_LOCK_ASSERT(sc);
700
701         /*
702          * Only unload the frame if we haven't consumed
703          * the mbuf via ath_rx_pkt().
704          */
705         if (bf->bf_m) {
706                 bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
707                 m_freem(bf->bf_m);
708                 bf->bf_m = NULL;
709         }
710
711         /* XXX lock? */
712         TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
713 }
714
715 /*
716  * Allocate up to 'n' entries and push them onto the hardware FIFO.
717  *
718  * Return how many entries were successfully pushed onto the
719  * FIFO.
720  */
721 static int
722 ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs)
723 {
724         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
725         struct ath_buf *bf;
726         int i;
727
728         ATH_RX_LOCK_ASSERT(sc);
729
730         /*
731          * Allocate buffers until the FIFO is full or nbufs is reached.
732          */
733         for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) {
734                 /* Ensure the FIFO is already blank, complain loudly! */
735                 if (re->m_fifo[re->m_fifo_tail] != NULL) {
736                         device_printf(sc->sc_dev,
737                             "%s: Q%d: fifo[%d] != NULL (%p)\n",
738                             __func__,
739                             qtype,
740                             re->m_fifo_tail,
741                             re->m_fifo[re->m_fifo_tail]);
742
743                         /* Free the slot */
744                         ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]);
745                         re->m_fifo_depth--;
746                         /* XXX check it's not < 0 */
747                         re->m_fifo[re->m_fifo_tail] = NULL;
748                 }
749
750                 bf = ath_edma_rxbuf_alloc(sc);
751                 /* XXX should ensure the FIFO is not NULL? */
752                 if (bf == NULL) {
753                         device_printf(sc->sc_dev,
754                             "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n",
755                             __func__,
756                             qtype,
757                             i,
758                             nbufs);
759                         break;
760                 }
761
762                 re->m_fifo[re->m_fifo_tail] = bf;
763
764                 /* Write to the RX FIFO */
765                 DPRINTF(sc, ATH_DEBUG_EDMA_RX,
766                     "%s: Q%d: putrxbuf=%p (0x%jx)\n",
767                     __func__,
768                     qtype,
769                     bf->bf_desc,
770                     (uintmax_t) bf->bf_daddr);
771                 ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype);
772
773                 re->m_fifo_depth++;
774                 INCR(re->m_fifo_tail, re->m_fifolen);
775         }
776
777         /*
778          * Return how many were allocated.
779          */
780         DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n",
781             __func__,
782             qtype,
783             nbufs,
784             i);
785         return (i);
786 }
787
788 static int
789 ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype)
790 {
791         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
792         int i;
793
794         ATH_RX_LOCK_ASSERT(sc);
795
796         for (i = 0; i < re->m_fifolen; i++) {
797                 if (re->m_fifo[i] != NULL) {
798 #ifdef  ATH_DEBUG
799                         struct ath_buf *bf = re->m_fifo[i];
800
801                         if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
802                                 ath_printrxbuf(sc, bf, 0, HAL_OK);
803 #endif
804                         ath_edma_rxbuf_free(sc, re->m_fifo[i]);
805                         re->m_fifo[i] = NULL;
806                         re->m_fifo_depth--;
807                 }
808         }
809
810         if (re->m_rxpending != NULL) {
811                 m_freem(re->m_rxpending);
812                 re->m_rxpending = NULL;
813         }
814         re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
815
816         return (0);
817 }
818
819 /*
820  * Setup the initial RX FIFO structure.
821  */
822 static int
823 ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype)
824 {
825         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
826
827         ATH_RX_LOCK_ASSERT(sc);
828
829         if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) {
830                 device_printf(sc->sc_dev, "%s: qtype=%d, failed\n",
831                     __func__,
832                     qtype);
833                 return (-EINVAL);
834         }
835         device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n",
836             __func__,
837             qtype,
838             re->m_fifolen);
839
840         /* Allocate ath_buf FIFO array, pre-zero'ed */
841         re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen,
842             M_ATHDEV,
843             M_NOWAIT | M_ZERO);
844         if (re->m_fifo == NULL) {
845                 device_printf(sc->sc_dev, "%s: malloc failed\n",
846                     __func__);
847                 return (-ENOMEM);
848         }
849
850         /*
851          * Set initial "empty" state.
852          */
853         re->m_rxpending = NULL;
854         re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0;
855
856         return (0);
857 }
858
859 static int
860 ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype)
861 {
862         struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
863
864         device_printf(sc->sc_dev, "%s: called; qtype=%d\n",
865             __func__,
866             qtype);
867         
868         free(re->m_fifo, M_ATHDEV);
869
870         return (0);
871 }
872
873 static int
874 ath_edma_dma_rxsetup(struct ath_softc *sc)
875 {
876         int error;
877
878         /*
879          * Create RX DMA tag and buffers.
880          */
881         error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
882             "rx", ath_rxbuf, sc->sc_rx_statuslen);
883         if (error != 0)
884                 return error;
885
886         ATH_RX_LOCK(sc);
887         (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP);
888         (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP);
889         ATH_RX_UNLOCK(sc);
890
891         return (0);
892 }
893
894 static int
895 ath_edma_dma_rxteardown(struct ath_softc *sc)
896 {
897
898         ATH_RX_LOCK(sc);
899         ath_edma_flush_deferred_queue(sc);
900         ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP);
901         ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP);
902
903         ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP);
904         ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP);
905         ATH_RX_UNLOCK(sc);
906
907         /* Free RX ath_buf */
908         /* Free RX DMA tag */
909         if (sc->sc_rxdma.dd_desc_len != 0)
910                 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
911
912         return (0);
913 }
914
915 void
916 ath_recv_setup_edma(struct ath_softc *sc)
917 {
918
919         /* Set buffer size to 4k */
920         sc->sc_edma_bufsize = 4096;
921
922         /* Fetch EDMA field and buffer sizes */
923         (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen);
924
925         /* Configure the hardware with the RX buffer size */
926         (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize -
927             sc->sc_rx_statuslen);
928
929         device_printf(sc->sc_dev, "RX status length: %d\n",
930             sc->sc_rx_statuslen);
931         device_printf(sc->sc_dev, "RX buffer size: %d\n",
932             sc->sc_edma_bufsize);
933
934         sc->sc_rx.recv_stop = ath_edma_stoprecv;
935         sc->sc_rx.recv_start = ath_edma_startrecv;
936         sc->sc_rx.recv_flush = ath_edma_recv_flush;
937         sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet;
938         sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init;
939
940         sc->sc_rx.recv_setup = ath_edma_dma_rxsetup;
941         sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown;
942
943         sc->sc_rx.recv_sched = ath_edma_recv_sched;
944         sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue;
945 }