3 * ===================================
4 * HARP | Host ATM Research Platform
5 * ===================================
8 * This Host ATM Research Platform ("HARP") file (the "Software") is
9 * made available by Network Computing Services, Inc. ("NetworkCS")
10 * "AS IS". NetworkCS does not provide maintenance, improvements or
11 * support of any kind.
13 * NETWORKCS MAKES NO WARRANTIES OR REPRESENTATIONS, EXPRESS OR IMPLIED,
14 * INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY
15 * AND FITNESS FOR A PARTICULAR PURPOSE, AS TO ANY ELEMENT OF THE
16 * SOFTWARE OR ANY SUPPORT PROVIDED IN CONNECTION WITH THIS SOFTWARE.
17 * In no event shall NetworkCS be responsible for any damages, including
18 * but not limited to consequential damages, arising from or relating to
19 * any use of the Software or related support.
21 * Copyright 1994-1998 Network Computing Services, Inc.
23 * Copies of this Software may be made, however, the above copyright
24 * notice must be reproduced on all copies.
26 * @(#) $FreeBSD: src/sys/dev/hfa/fore_buffer.c,v 1.5 2000/01/15 21:01:04 mks Exp $
27 * @(#) $DragonFly: src/sys/dev/atm/hfa/fore_buffer.c,v 1.4 2003/08/27 10:35:16 rob Exp $
31 * FORE Systems 200-Series Adapter Support
32 * ---------------------------------------
34 * Buffer Supply queue management
38 #include "fore_include.h"
43 static void fore_buf_drain (Fore_unit *);
44 static void fore_buf_supply_1s (Fore_unit *);
45 static void fore_buf_supply_1l (Fore_unit *);
49 * Allocate Buffer Supply Queues Data Structures
51 * Here we are allocating memory for both Strategy 1 Small and Large
52 * structures contiguously.
55 * fup pointer to device unit structure
58 * 0 allocations successful
59 * else allocation failed
62 fore_buf_allocate(fup)
68 * Allocate non-cacheable memory for buffer supply status words
71 sizeof(Q_status) * (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
72 QSTAT_ALIGN, ATM_DEV_NONCACHE);
76 fup->fu_buf1s_stat = (Q_status *) memp;
77 fup->fu_buf1l_stat = ((Q_status *) memp) + BUF1_SM_QUELEN;
79 memp = DMA_GET_ADDR(fup->fu_buf1s_stat,
80 sizeof(Q_status) * (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
81 QSTAT_ALIGN, ATM_DEV_NONCACHE);
85 fup->fu_buf1s_statd = (Q_status *) memp;
86 fup->fu_buf1l_statd = ((Q_status *) memp) + BUF1_SM_QUELEN;
89 * Allocate memory for buffer supply descriptors
91 memp = atm_dev_alloc(sizeof(Buf_descr) *
92 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
93 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
98 fup->fu_buf1s_desc = (Buf_descr *) memp;
99 fup->fu_buf1l_desc = ((Buf_descr *) memp) +
100 (BUF1_SM_QUELEN * BUF1_SM_ENTSIZE);
102 memp = DMA_GET_ADDR(fup->fu_buf1s_desc, sizeof(Buf_descr) *
103 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
104 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
109 fup->fu_buf1s_descd = (Buf_descr *) memp;
110 fup->fu_buf1l_descd = ((Buf_descr *) memp) +
111 (BUF1_SM_QUELEN * BUF1_SM_ENTSIZE);
118 * Buffer Supply Queues Initialization
120 * Allocate and initialize the host-resident buffer supply queue structures
121 * and then initialize the CP-resident queue structures.
123 * Called at interrupt level.
126 * fup pointer to device unit structure
132 fore_buf_initialize(fup)
135 Aali *aap = fup->fu_aali;
145 * Initialize Strategy 1 Small Queues
149 * Point to CP-resident buffer supply queue
151 cqp = (Buf_queue *)(fup->fu_ram + CP_READ(aap->aali_buf1s_q));
154 * Point to host-resident buffer supply queue structures
156 hbp = fup->fu_buf1s_q;
157 qsp = fup->fu_buf1s_stat;
158 qsp_dma = fup->fu_buf1s_statd;
159 bdp = fup->fu_buf1s_desc;
160 bdp_dma = fup->fu_buf1s_descd;
163 * Loop thru all queue entries and do whatever needs doing
165 for (i = 0; i < BUF1_SM_QUELEN; i++) {
168 * Set queue status word to free
173 * Set up host queue entry and link into ring
175 hbp->hbq_cpelem = cqp;
176 hbp->hbq_status = qsp;
177 hbp->hbq_descr = bdp;
178 hbp->hbq_descr_dma = bdp_dma;
179 if (i == (BUF1_SM_QUELEN - 1))
180 hbp->hbq_next = fup->fu_buf1s_q;
182 hbp->hbq_next = hbp + 1;
185 * Now let the CP into the game
187 cqp->cq_status = (CP_dma) CP_WRITE(qsp_dma);
190 * Bump all queue pointers
195 bdp += BUF1_SM_ENTSIZE;
196 bdp_dma += BUF1_SM_ENTSIZE;
201 * Initialize queue pointers
203 fup->fu_buf1s_head = fup->fu_buf1s_tail = fup->fu_buf1s_q;
207 * Initialize Strategy 1 Large Queues
211 * Point to CP-resident buffer supply queue
213 cqp = (Buf_queue *)(fup->fu_ram + CP_READ(aap->aali_buf1l_q));
216 * Point to host-resident buffer supply queue structures
218 hbp = fup->fu_buf1l_q;
219 qsp = fup->fu_buf1l_stat;
220 qsp_dma = fup->fu_buf1l_statd;
221 bdp = fup->fu_buf1l_desc;
222 bdp_dma = fup->fu_buf1l_descd;
225 * Loop thru all queue entries and do whatever needs doing
227 for (i = 0; i < BUF1_LG_QUELEN; i++) {
230 * Set queue status word to free
235 * Set up host queue entry and link into ring
237 hbp->hbq_cpelem = cqp;
238 hbp->hbq_status = qsp;
239 hbp->hbq_descr = bdp;
240 hbp->hbq_descr_dma = bdp_dma;
241 if (i == (BUF1_LG_QUELEN - 1))
242 hbp->hbq_next = fup->fu_buf1l_q;
244 hbp->hbq_next = hbp + 1;
247 * Now let the CP into the game
249 cqp->cq_status = (CP_dma) CP_WRITE(qsp_dma);
252 * Bump all queue pointers
257 bdp += BUF1_LG_ENTSIZE;
258 bdp_dma += BUF1_LG_ENTSIZE;
263 * Initialize queue pointers
265 fup->fu_buf1l_head = fup->fu_buf1l_tail = fup->fu_buf1l_q;
272 * Supply Buffers to CP
274 * This function will resupply the CP with buffers to be used to
275 * store incoming data.
277 * May be called in interrupt state.
278 * Must be called with interrupts locked out.
281 * fup pointer to device unit structure
292 * First, clean out the supply queues
297 * Then, supply the buffers for each queue
299 fore_buf_supply_1s(fup);
300 fore_buf_supply_1l(fup);
307 * Supply Strategy 1 Small Buffers to CP
309 * May be called in interrupt state.
310 * Must be called with interrupts locked out.
313 * fup pointer to device unit structure
319 fore_buf_supply_1s(fup)
330 * Figure out how many buffers we should be giving to the CP.
331 * We're basing this calculation on the current number of open
332 * VCCs thru this device, with certain minimum and maximum values
333 * enforced. This will then allow us to figure out how many more
334 * buffers we need to supply to the CP. This will be rounded up
335 * to fill a supply queue entry.
337 nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC);
339 nbuf = MIN(nbuf, BUF1_SM_CPPOOL);
340 nbuf -= fup->fu_buf1s_cnt;
341 nbuf = roundup(nbuf, BUF1_SM_ENTSIZE);
344 * OK, now supply the buffers to the CP
349 * Acquire a supply queue entry
351 hbp = fup->fu_buf1s_tail;
352 if (!((*hbp->hbq_status) & QSTAT_FREE))
354 bdp = hbp->hbq_descr;
357 * Get a buffer for each descriptor in the queue entry
359 for (i = 0; i < BUF1_SM_ENTSIZE; i++, bdp++) {
365 KB_ALLOCPKT(m, BUF1_SM_SIZE, KB_F_NOWAIT, KB_T_DATA);
369 KB_HEADSET(m, BUF1_SM_DOFF);
372 * Point to buffer handle structure
374 bhp = (Buf_handle *)((caddr_t)m + BUF1_SM_HOFF);
375 bhp->bh_type = BHT_S1_SMALL;
378 * Setup buffer descriptor
380 bdp->bsd_handle = bhp;
381 KB_DATASTART(m, cp, caddr_t);
382 bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR(
383 cp, BUF1_SM_SIZE, BUF_DATA_ALIGN, 0);
384 if (bdp->bsd_buffer == NULL) {
386 * Unable to assign dma address - free up
387 * this descriptor's buffer
389 fup->fu_stats->st_drv.drv_bf_segdma++;
395 * All set, so queue buffer (handle)
397 ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1s_bq);
401 * If we we're not able to fill all the descriptors for
402 * an entry, free up what's been partially built
404 if (i != BUF1_SM_ENTSIZE) {
408 * Clean up each used descriptor
410 for (bdp = hbp->hbq_descr; i; i--, bdp++) {
412 bhp = bdp->bsd_handle;
414 DEQUEUE(bhp, Buf_handle, bh_qelem,
418 ((caddr_t)bhp - BUF1_SM_HOFF);
419 KB_DATASTART(m, cp, caddr_t);
420 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_SM_SIZE, 0);
427 * Finally, we've got an entry ready for the CP.
428 * So claim the host queue entry and setup the CP-resident
429 * queue entry. The CP will (potentially) grab the supplied
430 * buffers when the descriptor pointer is set.
432 fup->fu_buf1s_tail = hbp->hbq_next;
433 (*hbp->hbq_status) = QSTAT_PENDING;
434 cqp = hbp->hbq_cpelem;
435 cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma);
438 * Update counters, etc for supplied buffers
440 fup->fu_buf1s_cnt += BUF1_SM_ENTSIZE;
441 nbuf -= BUF1_SM_ENTSIZE;
449 * Supply Strategy 1 Large Buffers to CP
451 * May be called in interrupt state.
452 * Must be called with interrupts locked out.
455 * fup pointer to device unit structure
461 fore_buf_supply_1l(fup)
472 * Figure out how many buffers we should be giving to the CP.
473 * We're basing this calculation on the current number of open
474 * VCCs thru this device, with certain minimum and maximum values
475 * enforced. This will then allow us to figure out how many more
476 * buffers we need to supply to the CP. This will be rounded up
477 * to fill a supply queue entry.
479 nvcc = MAX(fup->fu_open_vcc, BUF_MIN_VCC);
480 nbuf = nvcc * 4 * RECV_MAX_SEGS;
481 nbuf = MIN(nbuf, BUF1_LG_CPPOOL);
482 nbuf -= fup->fu_buf1l_cnt;
483 nbuf = roundup(nbuf, BUF1_LG_ENTSIZE);
486 * OK, now supply the buffers to the CP
491 * Acquire a supply queue entry
493 hbp = fup->fu_buf1l_tail;
494 if (!((*hbp->hbq_status) & QSTAT_FREE))
496 bdp = hbp->hbq_descr;
499 * Get a buffer for each descriptor in the queue entry
501 for (i = 0; i < BUF1_LG_ENTSIZE; i++, bdp++) {
505 * Get a cluster buffer
507 KB_ALLOCEXT(m, BUF1_LG_SIZE, KB_F_NOWAIT, KB_T_DATA);
511 KB_HEADSET(m, BUF1_LG_DOFF);
514 * Point to buffer handle structure
516 bhp = (Buf_handle *)((caddr_t)m + BUF1_LG_HOFF);
517 bhp->bh_type = BHT_S1_LARGE;
520 * Setup buffer descriptor
522 bdp->bsd_handle = bhp;
523 KB_DATASTART(m, cp, caddr_t);
524 bhp->bh_dma = bdp->bsd_buffer = (H_dma) DMA_GET_ADDR(
525 cp, BUF1_LG_SIZE, BUF_DATA_ALIGN, 0);
526 if (bdp->bsd_buffer == NULL) {
528 * Unable to assign dma address - free up
529 * this descriptor's buffer
531 fup->fu_stats->st_drv.drv_bf_segdma++;
537 * All set, so queue buffer (handle)
539 ENQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq);
543 * If we we're not able to fill all the descriptors for
544 * an entry, free up what's been partially built
546 if (i != BUF1_LG_ENTSIZE) {
550 * Clean up each used descriptor
552 for (bdp = hbp->hbq_descr; i; i--, bdp++) {
553 bhp = bdp->bsd_handle;
555 DEQUEUE(bhp, Buf_handle, bh_qelem,
559 ((caddr_t)bhp - BUF1_LG_HOFF);
560 KB_DATASTART(m, cp, caddr_t);
561 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0);
568 * Finally, we've got an entry ready for the CP.
569 * So claim the host queue entry and setup the CP-resident
570 * queue entry. The CP will (potentially) grab the supplied
571 * buffers when the descriptor pointer is set.
573 fup->fu_buf1l_tail = hbp->hbq_next;
574 (*hbp->hbq_status) = QSTAT_PENDING;
575 cqp = hbp->hbq_cpelem;
576 cqp->cq_descr = (CP_dma) CP_WRITE((u_long)hbp->hbq_descr_dma);
579 * Update counters, etc for supplied buffers
581 fup->fu_buf1l_cnt += BUF1_LG_ENTSIZE;
582 nbuf -= BUF1_LG_ENTSIZE;
590 * Drain Buffer Supply Queues
592 * This function will free all completed entries at the head of each
593 * buffer supply queue. Since we consider the CP to "own" the buffers
594 * once we put them on a supply queue and since a completed supply queue
595 * entry is only telling us that the CP has accepted the buffers that we
596 * gave to it, there's not much to do here.
598 * May be called in interrupt state.
599 * Must be called with interrupts locked out.
602 * fup pointer to device unit structure
614 * Drain Strategy 1 Small Queue
618 * Process each completed entry
620 while (*fup->fu_buf1s_head->hbq_status & QSTAT_COMPLETED) {
622 hbp = fup->fu_buf1s_head;
624 if (*hbp->hbq_status & QSTAT_ERROR) {
626 * XXX - what does this mean???
628 log(LOG_ERR, "fore_buf_drain: buf1s queue error\n");
632 * Mark this entry free for use and bump head pointer
633 * to the next entry in the queue
635 *hbp->hbq_status = QSTAT_FREE;
636 fup->fu_buf1s_head = hbp->hbq_next;
641 * Drain Strategy 1 Large Queue
645 * Process each completed entry
647 while (*fup->fu_buf1l_head->hbq_status & QSTAT_COMPLETED) {
649 hbp = fup->fu_buf1l_head;
651 if (*hbp->hbq_status & QSTAT_ERROR) {
653 * XXX - what does this mean???
655 log(LOG_ERR, "fore_buf_drain: buf1l queue error\n");
659 * Mark this entry free for use and bump head pointer
660 * to the next entry in the queue
662 *hbp->hbq_status = QSTAT_FREE;
663 fup->fu_buf1l_head = hbp->hbq_next;
671 * Free Buffer Supply Queue Data Structures
674 * fup pointer to device unit structure
687 * Free any previously supplied and not returned buffers
689 if (fup->fu_flags & CUF_INITED) {
692 * Run through Strategy 1 Small queue
694 while ((bhp = Q_HEAD(fup->fu_buf1s_bq, Buf_handle)) != NULL) {
700 m = (KBuffer *)((caddr_t)bhp - BUF1_SM_HOFF);
703 * Dequeue handle and free buffer
705 DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1s_bq);
707 KB_DATASTART(m, cp, caddr_t);
708 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_SM_SIZE, 0);
714 * Run through Strategy 1 Large queue
716 while ((bhp = Q_HEAD(fup->fu_buf1l_bq, Buf_handle)) != NULL) {
722 m = (KBuffer *)((caddr_t)bhp - BUF1_LG_HOFF);
725 * Dequeue handle and free buffer
727 DEQUEUE(bhp, Buf_handle, bh_qelem, fup->fu_buf1l_bq);
729 KB_DATASTART(m, cp, caddr_t);
730 DMA_FREE_ADDR(cp, bhp->bh_dma, BUF1_LG_SIZE, 0);
737 * Free the status words
739 if (fup->fu_buf1s_stat) {
740 if (fup->fu_buf1s_statd) {
741 DMA_FREE_ADDR(fup->fu_buf1s_stat, fup->fu_buf1s_statd,
743 (BUF1_SM_QUELEN + BUF1_LG_QUELEN),
746 atm_dev_free((volatile void *)fup->fu_buf1s_stat);
747 fup->fu_buf1s_stat = NULL;
748 fup->fu_buf1s_statd = NULL;
749 fup->fu_buf1l_stat = NULL;
750 fup->fu_buf1l_statd = NULL;
754 * Free the transmit descriptors
756 if (fup->fu_buf1s_desc) {
757 if (fup->fu_buf1s_descd) {
758 DMA_FREE_ADDR(fup->fu_buf1s_desc, fup->fu_buf1s_descd,
760 ((BUF1_SM_QUELEN * BUF1_SM_ENTSIZE) +
761 (BUF1_LG_QUELEN * BUF1_LG_ENTSIZE)),
764 atm_dev_free(fup->fu_buf1s_desc);
765 fup->fu_buf1s_desc = NULL;
766 fup->fu_buf1s_descd = NULL;
767 fup->fu_buf1l_desc = NULL;
768 fup->fu_buf1l_descd = NULL;