ath - Basic re-port, base code compile
[dragonfly.git] / sys / dev / netif / ath / ath / if_ath_tx_edma.c
CommitLineData
572ff6f6
MD
1/*-
2 * Copyright (c) 2012 Adrian Chadd <adrian@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13 * redistribution must be conditioned upon including a substantially
14 * similar Disclaimer requirement for further binary redistribution.
15 *
16 * NO WARRANTY
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27 * THE POSSIBILITY OF SUCH DAMAGES.
28 */
29
30#include <sys/cdefs.h>
572ff6f6
MD
31
32/*
33 * Driver for the Atheros Wireless LAN controller.
34 *
35 * This software is derived from work of Atsushi Onoe; his contribution
36 * is greatly appreciated.
37 */
38
39#include "opt_inet.h"
40#include "opt_ath.h"
41/*
42 * This is needed for register operations which are performed
43 * by the driver - eg, calls to ath_hal_gettsf32().
44 *
45 * It's also required for any AH_DEBUG checks in here, eg the
46 * module dependencies.
47 */
48#include "opt_ah.h"
49#include "opt_wlan.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/sysctl.h>
54#include <sys/mbuf.h>
55#include <sys/malloc.h>
56#include <sys/lock.h>
57#include <sys/mutex.h>
58#include <sys/kernel.h>
59#include <sys/socket.h>
60#include <sys/sockio.h>
61#include <sys/errno.h>
62#include <sys/callout.h>
63#include <sys/bus.h>
64#include <sys/endian.h>
65#include <sys/kthread.h>
66#include <sys/taskqueue.h>
67#include <sys/priv.h>
68#include <sys/module.h>
69#include <sys/ktr.h>
572ff6f6
MD
70
71#include <net/if.h>
72#include <net/if_var.h>
73#include <net/if_dl.h>
74#include <net/if_media.h>
75#include <net/if_types.h>
76#include <net/if_arp.h>
77#include <net/ethernet.h>
78#include <net/if_llc.h>
3133c5e3 79#include <net/ifq_var.h>
572ff6f6 80
5cd80a8c
MD
81#include <netproto/802_11/ieee80211_var.h>
82#include <netproto/802_11/ieee80211_regdomain.h>
572ff6f6 83#ifdef IEEE80211_SUPPORT_SUPERG
5cd80a8c 84#include <netproto/802_11/ieee80211_superg.h>
572ff6f6
MD
85#endif
86#ifdef IEEE80211_SUPPORT_TDMA
5cd80a8c 87#include <netproto/802_11/ieee80211_tdma.h>
572ff6f6
MD
88#endif
89
90#include <net/bpf.h>
91
92#ifdef INET
93#include <netinet/in.h>
94#include <netinet/if_ether.h>
95#endif
96
5cd80a8c
MD
97#include <dev/netif/ath/ath/if_athvar.h>
98#include <dev/netif/ath/ath_hal/ah_devid.h> /* XXX for softled */
99#include <dev/netif/ath/ath_hal/ah_diagcodes.h>
100
101#include <dev/netif/ath/ath/if_ath_debug.h>
102#include <dev/netif/ath/ath/if_ath_misc.h>
103#include <dev/netif/ath/ath/if_ath_tsf.h>
104#include <dev/netif/ath/ath/if_ath_tx.h>
105#include <dev/netif/ath/ath/if_ath_sysctl.h>
106#include <dev/netif/ath/ath/if_ath_led.h>
107#include <dev/netif/ath/ath/if_ath_keycache.h>
108#include <dev/netif/ath/ath/if_ath_rx.h>
109#include <dev/netif/ath/ath/if_ath_beacon.h>
110#include <dev/netif/ath/ath/if_athdfs.h>
572ff6f6
MD
111
112#ifdef ATH_TX99_DIAG
5cd80a8c 113#include <dev/netif/ath/ath_tx99/ath_tx99.h>
572ff6f6
MD
114#endif
115
5cd80a8c 116#include <dev/netif/ath/ath/if_ath_tx_edma.h>
572ff6f6
MD
117
118#ifdef ATH_DEBUG_ALQ
5cd80a8c 119#include <dev/netif/ath/ath/if_ath_alq.h>
572ff6f6
MD
120#endif
121
122/*
123 * some general macros
124 */
125#define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1)
126#define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1)
127
128/*
129 * XXX doesn't belong here, and should be tunable
130 */
131#define ATH_TXSTATUS_RING_SIZE 512
132
133MALLOC_DECLARE(M_ATHDEV);
134
135static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
136
137/*
138 * Push some frames into the TX FIFO if we have space.
139 */
140static void
141ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
142{
143 struct ath_buf *bf, *bf_last;
144 int i = 0;
145
146 ATH_TXQ_LOCK_ASSERT(txq);
147
148 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: called\n",
149 __func__,
150 txq->axq_qnum);
151
152 TAILQ_FOREACH(bf, &txq->axq_q, bf_list) {
153 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH)
154 break;
155
156 /*
157 * We have space in the FIFO - so let's push a frame
158 * into it.
159 */
160
161 /*
162 * Remove it from the normal list
163 */
164 ATH_TXQ_REMOVE(txq, bf, bf_list);
165
166 /*
167 * XXX for now, we only dequeue a frame at a time, so
168 * that's only one buffer. Later on when we just
169 * push this staging _list_ into the queue, we'll
170 * set bf_last to the end pointer in the list.
171 */
172 bf_last = bf;
173 DPRINTF(sc, ATH_DEBUG_TX_PROC,
174 "%s: Q%d: depth=%d; pushing %p->%p\n",
175 __func__,
176 txq->axq_qnum,
177 txq->axq_fifo_depth,
178 bf,
179 bf_last);
180
181 /*
182 * Append it to the FIFO staging list
183 */
184 ATH_TXQ_INSERT_TAIL(&txq->fifo, bf, bf_list);
185
186 /*
187 * Set fifo start / fifo end flags appropriately
188 *
189 */
190 bf->bf_flags |= ATH_BUF_FIFOPTR;
191 bf_last->bf_flags |= ATH_BUF_FIFOEND;
192
193 /*
194 * Push _into_ the FIFO.
195 */
196 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
197#ifdef ATH_DEBUG
198 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
199 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
200#endif/* ATH_DEBUG */
201#ifdef ATH_DEBUG_ALQ
202 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
203 ath_tx_alq_post(sc, bf);
204#endif /* ATH_DEBUG_ALQ */
205 txq->axq_fifo_depth++;
206 i++;
207 }
208 if (i > 0)
209 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
210}
211
212/*
213 * Re-initialise the DMA FIFO with the current contents of
214 * said TXQ.
215 *
216 * This should only be called as part of the chip reset path, as it
217 * assumes the FIFO is currently empty.
218 */
219static void
220ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
221{
222 struct ath_buf *bf;
223 int i = 0;
224 int fifostart = 1;
225 int old_fifo_depth;
226
227 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: called\n",
228 __func__,
229 txq->axq_qnum);
230
231 ATH_TXQ_LOCK_ASSERT(txq);
232
233 /*
234 * Let's log if the tracked FIFO depth doesn't match
235 * what we actually push in.
236 */
237 old_fifo_depth = txq->axq_fifo_depth;
238 txq->axq_fifo_depth = 0;
239
240 /*
241 * Walk the FIFO staging list, looking for "head" entries.
242 * Since we may have a partially completed list of frames,
243 * we push the first frame we see into the FIFO and re-mark
244 * it as the head entry. We then skip entries until we see
245 * FIFO end, at which point we get ready to push another
246 * entry into the FIFO.
247 */
248 TAILQ_FOREACH(bf, &txq->fifo.axq_q, bf_list) {
249 /*
250 * If we're looking for FIFOEND and we haven't found
251 * it, skip.
252 *
253 * If we're looking for FIFOEND and we've found it,
254 * reset for another descriptor.
255 */
256#ifdef ATH_DEBUG
257 if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
258 ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
259#endif/* ATH_DEBUG */
260#ifdef ATH_DEBUG_ALQ
261 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
262 ath_tx_alq_post(sc, bf);
263#endif /* ATH_DEBUG_ALQ */
264
265 if (fifostart == 0) {
266 if (bf->bf_flags & ATH_BUF_FIFOEND)
267 fifostart = 1;
268 continue;
269 }
270
271 /* Make sure we're not overflowing the FIFO! */
272 if (txq->axq_fifo_depth >= HAL_TXFIFO_DEPTH) {
273 device_printf(sc->sc_dev,
274 "%s: Q%d: more frames in the queue; FIFO depth=%d?!\n",
275 __func__,
276 txq->axq_qnum,
277 txq->axq_fifo_depth);
278 }
279
280#if 0
281 DPRINTF(sc, ATH_DEBUG_RESET,
282 "%s: Q%d: depth=%d: pushing bf=%p; start=%d, end=%d\n",
283 __func__,
284 txq->axq_qnum,
285 txq->axq_fifo_depth,
286 bf,
287 !! (bf->bf_flags & ATH_BUF_FIFOPTR),
288 !! (bf->bf_flags & ATH_BUF_FIFOEND));
289#endif
290
291 /*
292 * Set this to be the first buffer in the FIFO
293 * list - even if it's also the last buffer in
294 * a FIFO list!
295 */
296 bf->bf_flags |= ATH_BUF_FIFOPTR;
297
298 /* Push it into the FIFO and bump the FIFO count */
299 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
300 txq->axq_fifo_depth++;
301
302 /*
303 * If this isn't the last entry either, let's
304 * clear fifostart so we continue looking for
305 * said last entry.
306 */
307 if (! (bf->bf_flags & ATH_BUF_FIFOEND))
308 fifostart = 0;
309 i++;
310 }
311
312 /* Only bother starting the queue if there's something in it */
313 if (i > 0)
314 ath_hal_txstart(sc->sc_ah, txq->axq_qnum);
315
316 DPRINTF(sc, ATH_DEBUG_RESET, "%s: Q%d: FIFO depth was %d, is %d\n",
317 __func__,
318 txq->axq_qnum,
319 old_fifo_depth,
320 txq->axq_fifo_depth);
321
322 /* And now, let's check! */
323 if (txq->axq_fifo_depth != old_fifo_depth) {
324 device_printf(sc->sc_dev,
325 "%s: Q%d: FIFO depth should be %d, is %d\n",
326 __func__,
327 txq->axq_qnum,
328 old_fifo_depth,
329 txq->axq_fifo_depth);
330 }
331}
332
333/*
334 * Hand off this frame to a hardware queue.
335 *
336 * Things are a bit hairy in the EDMA world. The TX FIFO is only
337 * 8 entries deep, so we need to keep track of exactly what we've
338 * pushed into the FIFO and what's just sitting in the TX queue,
339 * waiting to go out.
340 *
341 * So this is split into two halves - frames get appended to the
342 * TXQ; then a scheduler is called to push some frames into the
343 * actual TX FIFO.
344 */
345static void
346ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
347 struct ath_buf *bf)
348{
349
350 ATH_TXQ_LOCK(txq);
351
352 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
353 ("%s: busy status 0x%x", __func__, bf->bf_flags));
354
355 /*
356 * XXX TODO: write a hard-coded check to ensure that
357 * the queue id in the TX descriptor matches txq->axq_qnum.
358 */
359
360 /* Update aggr stats */
361 if (bf->bf_state.bfs_aggr)
362 txq->axq_aggr_depth++;
363
364 /* Push and update frame stats */
365 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
366
367 /* For now, set the link pointer in the last descriptor
368 * to be NULL.
369 *
370 * Later on, when it comes time to handling multiple descriptors
371 * in one FIFO push, we can link descriptors together this way.
372 */
373
374 /*
375 * Finally, call the FIFO schedule routine to schedule some
376 * frames to the FIFO.
377 */
378 ath_edma_tx_fifo_fill(sc, txq);
379 ATH_TXQ_UNLOCK(txq);
380}
381
382/*
383 * Hand off this frame to a multicast software queue.
384 *
385 * The EDMA TX CABQ will get a list of chained frames, chained
386 * together using the next pointer. The single head of that
387 * particular queue is pushed to the hardware CABQ.
388 */
389static void
390ath_edma_xmit_handoff_mcast(struct ath_softc *sc, struct ath_txq *txq,
391 struct ath_buf *bf)
392{
393
394 ATH_TX_LOCK_ASSERT(sc);
395 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
396 ("%s: busy status 0x%x", __func__, bf->bf_flags));
397
398 ATH_TXQ_LOCK(txq);
399 /*
400 * XXX this is mostly duplicated in ath_tx_handoff_mcast().
401 */
402 if (ATH_TXQ_LAST(txq, axq_q_s) != NULL) {
403 struct ath_buf *bf_last = ATH_TXQ_LAST(txq, axq_q_s);
404 struct ieee80211_frame *wh;
405
406 /* mark previous frame */
407 wh = mtod(bf_last->bf_m, struct ieee80211_frame *);
408 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
409
410 /* re-sync buffer to memory */
411 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap,
412 BUS_DMASYNC_PREWRITE);
413
414 /* link descriptor */
415 ath_hal_settxdesclink(sc->sc_ah,
416 bf_last->bf_lastds,
417 bf->bf_daddr);
418 }
419#ifdef ATH_DEBUG_ALQ
420 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
421 ath_tx_alq_post(sc, bf);
422#endif /* ATH_DEBUG_ALQ */
423 ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
424 ATH_TXQ_UNLOCK(txq);
425}
426
427/*
428 * Handoff this frame to the hardware.
429 *
430 * For the multicast queue, this will treat it as a software queue
431 * and append it to the list, after updating the MORE_DATA flag
432 * in the previous frame. The cabq processing code will ensure
433 * that the queue contents gets transferred over.
434 *
435 * For the hardware queues, this will queue a frame to the queue
436 * like before, then populate the FIFO from that. Since the
437 * EDMA hardware has 8 FIFO slots per TXQ, this ensures that
438 * frames such as management frames don't get prematurely dropped.
439 *
440 * This does imply that a similar flush-hwq-to-fifoq method will
441 * need to be called from the processq function, before the
442 * per-node software scheduler is called.
443 */
444static void
445ath_edma_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
446 struct ath_buf *bf)
447{
448
449 DPRINTF(sc, ATH_DEBUG_XMIT_DESC,
450 "%s: called; bf=%p, txq=%p, qnum=%d\n",
451 __func__,
452 bf,
453 txq,
454 txq->axq_qnum);
455
456 if (txq->axq_qnum == ATH_TXQ_SWQ)
457 ath_edma_xmit_handoff_mcast(sc, txq, bf);
458 else
459 ath_edma_xmit_handoff_hw(sc, txq, bf);
460}
461
462static int
463ath_edma_setup_txfifo(struct ath_softc *sc, int qnum)
464{
465 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
466
3133c5e3 467 te->m_fifo = kmalloc(sizeof(struct ath_buf *) * HAL_TXFIFO_DEPTH,
572ff6f6 468 M_ATHDEV,
3133c5e3 469 M_INTWAIT | M_ZERO);
572ff6f6
MD
470 if (te->m_fifo == NULL) {
471 device_printf(sc->sc_dev, "%s: malloc failed\n",
472 __func__);
473 return (-ENOMEM);
474 }
475
476 /*
477 * Set initial "empty" state.
478 */
479 te->m_fifo_head = te->m_fifo_tail = te->m_fifo_depth = 0;
480
481 return (0);
482}
483
484static int
485ath_edma_free_txfifo(struct ath_softc *sc, int qnum)
486{
487 struct ath_tx_edma_fifo *te = &sc->sc_txedma[qnum];
488
489 /* XXX TODO: actually deref the ath_buf entries? */
3133c5e3 490 kfree(te->m_fifo, M_ATHDEV);
572ff6f6
MD
491 return (0);
492}
493
494static int
495ath_edma_dma_txsetup(struct ath_softc *sc)
496{
497 int error;
498 int i;
499
500 error = ath_descdma_alloc_desc(sc, &sc->sc_txsdma,
501 NULL, "txcomp", sc->sc_tx_statuslen, ATH_TXSTATUS_RING_SIZE);
502 if (error != 0)
503 return (error);
504
505 ath_hal_setuptxstatusring(sc->sc_ah,
506 (void *) sc->sc_txsdma.dd_desc,
507 sc->sc_txsdma.dd_desc_paddr,
508 ATH_TXSTATUS_RING_SIZE);
509
510 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
511 ath_edma_setup_txfifo(sc, i);
512 }
513
514 return (0);
515}
516
517static int
518ath_edma_dma_txteardown(struct ath_softc *sc)
519{
520 int i;
521
522 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
523 ath_edma_free_txfifo(sc, i);
524 }
525
526 ath_descdma_cleanup(sc, &sc->sc_txsdma, NULL);
527 return (0);
528}
529
530/*
531 * Drain all TXQs, potentially after completing the existing completed
532 * frames.
533 */
534static void
535ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
536{
537 struct ifnet *ifp = sc->sc_ifp;
538 int i;
539
540 DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
541
542 (void) ath_stoptxdma(sc);
543
544 /*
545 * If reset type is noloss, the TX FIFO needs to be serviced
546 * and those frames need to be handled.
547 *
548 * Otherwise, just toss everything in each TX queue.
549 */
550 if (reset_type == ATH_RESET_NOLOSS) {
551 ath_edma_tx_processq(sc, 0);
552 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
553 if (ATH_TXQ_SETUP(sc, i)) {
554 ATH_TXQ_LOCK(&sc->sc_txq[i]);
555 /*
556 * Free the holding buffer; DMA is now
557 * stopped.
558 */
559 ath_txq_freeholdingbuf(sc, &sc->sc_txq[i]);
560 /*
561 * Reset the link pointer to NULL; there's
562 * no frames to chain DMA to.
563 */
564 sc->sc_txq[i].axq_link = NULL;
565 ATH_TXQ_UNLOCK(&sc->sc_txq[i]);
566 }
567 }
568 } else {
569 for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
570 if (ATH_TXQ_SETUP(sc, i))
571 ath_tx_draintxq(sc, &sc->sc_txq[i]);
572 }
573 }
574
575 /* XXX dump out the TX completion FIFO contents */
576
577 /* XXX dump out the frames */
578
579 IF_LOCK(&ifp->if_snd);
3133c5e3 580 ifq_clr_oactive(&ifp->if_snd);
572ff6f6
MD
581 IF_UNLOCK(&ifp->if_snd);
582 sc->sc_wd_timer = 0;
583}
584
585/*
586 * TX completion tasklet.
587 */
588
589static void
590ath_edma_tx_proc(void *arg, int npending)
591{
592 struct ath_softc *sc = (struct ath_softc *) arg;
593
594#if 0
595 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
596 __func__, npending);
597#endif
598 ath_edma_tx_processq(sc, 1);
599}
600
601/*
602 * Process the TX status queue.
603 */
604static void
605ath_edma_tx_processq(struct ath_softc *sc, int dosched)
606{
607 struct ath_hal *ah = sc->sc_ah;
608 HAL_STATUS status;
609 struct ath_tx_status ts;
610 struct ath_txq *txq;
611 struct ath_buf *bf;
612 struct ieee80211_node *ni;
613 int nacked = 0;
614 int idx;
615
616#ifdef ATH_DEBUG
617 /* XXX */
618 uint32_t txstatus[32];
619#endif
620
621 for (idx = 0; ; idx++) {
622 bzero(&ts, sizeof(ts));
623
624 ATH_TXSTATUS_LOCK(sc);
625#ifdef ATH_DEBUG
626 ath_hal_gettxrawtxdesc(ah, txstatus);
627#endif
628 status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
629 ATH_TXSTATUS_UNLOCK(sc);
630
631 if (status == HAL_EINPROGRESS)
632 break;
633
634#ifdef ATH_DEBUG
635 if (sc->sc_debug & ATH_DEBUG_TX_PROC)
636 if (ts.ts_queue_id != sc->sc_bhalq)
637 ath_printtxstatbuf(sc, NULL, txstatus, ts.ts_queue_id,
638 idx, (status == HAL_OK));
639#endif
640
641 /*
642 * If there is an error with this descriptor, continue
643 * processing.
644 *
645 * XXX TBD: log some statistics?
646 */
647 if (status == HAL_EIO) {
648 device_printf(sc->sc_dev, "%s: invalid TX status?\n",
649 __func__);
650 break;
651 }
652
653#if defined(ATH_DEBUG_ALQ) && defined(ATH_DEBUG)
654 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
655 if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
656 sc->sc_tx_statuslen,
657 (char *) txstatus);
658#endif /* ATH_DEBUG_ALQ */
659
660 /*
661 * At this point we have a valid status descriptor.
662 * The QID and descriptor ID (which currently isn't set)
663 * is part of the status.
664 *
665 * We then assume that the descriptor in question is the
666 * -head- of the given QID. Eventually we should verify
667 * this by using the descriptor ID.
668 */
669
670 /*
671 * The beacon queue is not currently a "real" queue.
672 * Frames aren't pushed onto it and the lock isn't setup.
673 * So skip it for now; the beacon handling code will
674 * free and alloc more beacon buffers as appropriate.
675 */
676 if (ts.ts_queue_id == sc->sc_bhalq)
677 continue;
678
679 txq = &sc->sc_txq[ts.ts_queue_id];
680
681 ATH_TXQ_LOCK(txq);
682 bf = ATH_TXQ_FIRST(&txq->fifo);
683
684 /*
685 * Work around the situation where I'm seeing notifications
686 * for Q1 when no frames are available. That needs to be
687 * debugged but not by crashing _here_.
688 */
689 if (bf == NULL) {
690 device_printf(sc->sc_dev, "%s: Q%d: empty?\n",
691 __func__,
692 ts.ts_queue_id);
693 ATH_TXQ_UNLOCK(txq);
694 continue;
695 }
696
697 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d, bf=%p, start=%d, end=%d\n",
698 __func__,
699 ts.ts_queue_id, bf,
700 !! (bf->bf_flags & ATH_BUF_FIFOPTR),
701 !! (bf->bf_flags & ATH_BUF_FIFOEND));
702
703 /* XXX TODO: actually output debugging info about this */
704
705#if 0
706 /* XXX assert the buffer/descriptor matches the status descid */
707 if (ts.ts_desc_id != bf->bf_descid) {
708 device_printf(sc->sc_dev,
709 "%s: mismatched descid (qid=%d, tsdescid=%d, "
710 "bfdescid=%d\n",
711 __func__,
712 ts.ts_queue_id,
713 ts.ts_desc_id,
714 bf->bf_descid);
715 }
716#endif
717
718 /* This removes the buffer and decrements the queue depth */
719 ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
720 if (bf->bf_state.bfs_aggr)
721 txq->axq_aggr_depth--;
722
723 /*
724 * If this was the end of a FIFO set, decrement FIFO depth
725 */
726 if (bf->bf_flags & ATH_BUF_FIFOEND)
727 txq->axq_fifo_depth--;
728
729 /*
730 * If this isn't the final buffer in a FIFO set, mark
731 * the buffer as busy so it goes onto the holding queue.
732 */
733 if (! (bf->bf_flags & ATH_BUF_FIFOEND))
734 bf->bf_flags |= ATH_BUF_BUSY;
735
736 DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: Q%d: FIFO depth is now %d (%d)\n",
737 __func__,
738 txq->axq_qnum,
739 txq->axq_fifo_depth,
740 txq->fifo.axq_depth);
741
742 /* XXX assert FIFO depth >= 0 */
743 ATH_TXQ_UNLOCK(txq);
744
745 /*
746 * Outside of the TX lock - if the buffer is end
747 * end buffer in this FIFO, we don't need a holding
748 * buffer any longer.
749 */
750 if (bf->bf_flags & ATH_BUF_FIFOEND) {
751 ATH_TXQ_LOCK(txq);
752 ath_txq_freeholdingbuf(sc, txq);
753 ATH_TXQ_UNLOCK(txq);
754 }
755
756 /*
757 * First we need to make sure ts_rate is valid.
758 *
759 * Pre-EDMA chips pass the whole TX descriptor to
760 * the proctxdesc function which will then fill out
761 * ts_rate based on the ts_finaltsi (final TX index)
762 * in the TX descriptor. However the TX completion
763 * FIFO doesn't have this information. So here we
764 * do a separate HAL call to populate that information.
765 *
766 * The same problem exists with ts_longretry.
767 * The FreeBSD HAL corrects ts_longretry in the HAL layer;
768 * the AR9380 HAL currently doesn't. So until the HAL
769 * is imported and this can be added, we correct for it
770 * here.
771 */
772 /* XXX TODO */
773 /* XXX faked for now. Ew. */
774 if (ts.ts_finaltsi < 4) {
775 ts.ts_rate =
776 bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
777 switch (ts.ts_finaltsi) {
778 case 3: ts.ts_longretry +=
779 bf->bf_state.bfs_rc[2].tries;
780 case 2: ts.ts_longretry +=
781 bf->bf_state.bfs_rc[1].tries;
782 case 1: ts.ts_longretry +=
783 bf->bf_state.bfs_rc[0].tries;
784 }
785 } else {
786 device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
787 __func__,
788 ts.ts_finaltsi);
789 ts.ts_rate = bf->bf_state.bfs_rc[0].ratecode;
790 }
791
792 /*
793 * XXX This is terrible.
794 *
795 * Right now, some code uses the TX status that is
796 * passed in here, but the completion handlers in the
797 * software TX path also use bf_status.ds_txstat.
798 * Ew. That should all go away.
799 *
800 * XXX It's also possible the rate control completion
801 * routine is called twice.
802 */
803 memcpy(&bf->bf_status, &ts, sizeof(ts));
804
805 ni = bf->bf_node;
806
807 /* Update RSSI */
808 /* XXX duplicate from ath_tx_processq */
809 if (ni != NULL && ts.ts_status == 0 &&
810 ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) {
811 nacked++;
812 sc->sc_stats.ast_tx_rssi = ts.ts_rssi;
813 ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
814 ts.ts_rssi);
815 }
816
817 /* Handle frame completion and rate control update */
818 ath_tx_process_buf_completion(sc, txq, &ts, bf);
819
820 /* bf is invalid at this point */
821
822 /*
823 * Now that there's space in the FIFO, let's push some
824 * more frames into it.
825 */
826 ATH_TXQ_LOCK(txq);
827 if (dosched)
828 ath_edma_tx_fifo_fill(sc, txq);
829 ATH_TXQ_UNLOCK(txq);
830 }
831
832 sc->sc_wd_timer = 0;
833
834 if (idx > 0) {
835 IF_LOCK(&sc->sc_ifp->if_snd);
3133c5e3 836 ifq_clr_oactive(&sc->sc_ifp->if_snd);
572ff6f6
MD
837 IF_UNLOCK(&sc->sc_ifp->if_snd);
838 }
839
840 /* Kick software scheduler */
841 /*
842 * XXX It's inefficient to do this if the FIFO queue is full,
843 * but there's no easy way right now to only populate
844 * the txq task for _one_ TXQ. This should be fixed.
845 */
846 if (dosched)
847 ath_tx_swq_kick(sc);
848}
849
850static void
851ath_edma_attach_comp_func(struct ath_softc *sc)
852{
853
854 TASK_INIT(&sc->sc_txtask, 0, ath_edma_tx_proc, sc);
855}
856
857void
858ath_xmit_setup_edma(struct ath_softc *sc)
859{
860
861 /* Fetch EDMA field and buffer sizes */
862 (void) ath_hal_gettxdesclen(sc->sc_ah, &sc->sc_tx_desclen);
863 (void) ath_hal_gettxstatuslen(sc->sc_ah, &sc->sc_tx_statuslen);
864 (void) ath_hal_getntxmaps(sc->sc_ah, &sc->sc_tx_nmaps);
865
866 device_printf(sc->sc_dev, "TX descriptor length: %d\n",
867 sc->sc_tx_desclen);
868 device_printf(sc->sc_dev, "TX status length: %d\n",
869 sc->sc_tx_statuslen);
870 device_printf(sc->sc_dev, "TX buffers per descriptor: %d\n",
871 sc->sc_tx_nmaps);
872
873 sc->sc_tx.xmit_setup = ath_edma_dma_txsetup;
874 sc->sc_tx.xmit_teardown = ath_edma_dma_txteardown;
875 sc->sc_tx.xmit_attach_comp_func = ath_edma_attach_comp_func;
876
877 sc->sc_tx.xmit_dma_restart = ath_edma_dma_restart;
878 sc->sc_tx.xmit_handoff = ath_edma_xmit_handoff;
879 sc->sc_tx.xmit_drain = ath_edma_tx_drain;
880}