1 /* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2 /* (sync'd to midway.c 1.68) */
6 * Copyright (c) 1996 Charles D. Cranor and Washington University.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * $FreeBSD: src/sys/dev/en/midway.c,v 1.19.2.1 2003/01/23 21:06:42 sam Exp $
40 * m i d w a y . c e n i 1 5 5 d r i v e r
42 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
43 * started: spring, 1996 (written from scratch).
45 * notes from the author:
46 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
47 * ENI driver was especially useful in figuring out how this card works.
48 * I would also like to thank Werner for promptly answering email and being
53 #undef EN_DEBUG_RANGE /* check ranges on en_read/en_write's? */
54 #define EN_MBUF_OPT /* try and put more stuff in mbuf? */
58 #define EN_DMA 1 /* use dma? */
60 #define EN_NOTXDMA 0 /* hook to disable tx dma only */
61 #define EN_NORXDMA 0 /* hook to disable rx dma only */
62 #define EN_DDBHOOK 1 /* compile in ddb functions */
63 #if defined(MIDWAY_ADPONLY)
64 #define EN_ENIDMAFIX 0 /* no ENI cards to worry about */
66 #define EN_ENIDMAFIX 1 /* avoid byte DMA on the ENI card (see below) */
70 * note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
71 * appears to be broken. it works just fine if there is no load... however
72 * when the card is loaded the data get corrupted. to see this, one only
73 * has to use "telnet" over ATM. do the following command in "telnet":
74 * cat /usr/share/misc/termcap
75 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
76 * use of the byte aligner). watch "netstat -s" for checksum errors.
78 * I further tested this by adding a function that compared the transmit
79 * data on the card's SRAM with the data in the mbuf chain _after_ the
80 * "transmit DMA complete" interrupt. using the "telnet" test I got data
81 * mismatches where the byte-aligned data should have been. using ddb
82 * and en_dumpmem() I verified that the DTQs fed into the card were
83 * absolutely correct. thus, we are forced to concluded that the ENI
84 * hardware is buggy. note that the Adaptec version of the card works
85 * just fine with byte DMA.
87 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
91 #if defined(DIAGNOSTIC) && !defined(EN_DIAG)
92 #define EN_DIAG /* link in with master DIAG option */
95 #define EN_COUNT(X) (X)++
97 #define EN_COUNT(X) /* nothing */
103 #define STATIC /* nothing */
104 #define INLINE /* nothing */
106 #define STATIC static
107 #define INLINE __inline
108 #endif /* EN_DEBUG */
110 #include "use_en.h" /* XXX for midwayvar.h's NEN */
111 #include "opt_inet.h"
112 #include "opt_natm.h"
114 /* enable DDBHOOK when DDB is available */
120 #include <sys/param.h>
122 #include <sys/systm.h>
123 #include <sys/queue.h>
124 #include <sys/sockio.h>
125 #include <sys/mbuf.h>
126 #include <sys/socket.h>
127 #include <sys/proc.h>
128 #include <sys/thread2.h>
131 #include <net/if_atm.h>
132 #include <net/ifq_var.h>
136 #if defined(INET) || defined(INET6)
137 #include <netinet/in.h>
138 #include <netinet/if_atm.h>
142 #include <netproto/natm/natm.h>
145 #include "midwayreg.h"
146 #include "midwayvar.h"
147 #include <vm/pmap.h> /* for vtophys proto */
149 #ifndef IFF_NOTRAILERS
150 #define IFF_NOTRAILERS 0
154 #define BPFATTACH(ifp, dlt, hlen) bpfattach((ifp), (dlt), (hlen))
161 #define EN_TXHIWAT (64*1024) /* max 64 KB waiting to be DMAd out */
165 #define EN_MINDMA 32 /* don't DMA anything less than this (bytes) */
168 #define RX_NONE 0xffff /* recv VC not in use */
170 #define EN_OBHDR ATM_PH_DRIVER7 /* TBD in first mbuf ! */
171 #define EN_OBTRL ATM_PH_DRIVER8 /* PDU trailier in last mbuf ! */
173 #define ENOTHER_FREE 0x01 /* free rxslot */
174 #define ENOTHER_DRAIN 0x02 /* almost free (drain DRQ dma) */
175 #define ENOTHER_RAW 0x04 /* 'raw' access (aka boodi mode) */
176 #define ENOTHER_SWSL 0x08 /* in software service list */
178 static int en_dma = EN_DMA; /* use DMA (switch off for dbg) */
181 * autoconfig attachments
184 struct cfdriver en_cd = {
193 * params to en_txlaunch() function
197 u_int32_t tbd1; /* TBD 1 */
198 u_int32_t tbd2; /* TBD 2 */
199 u_int32_t pdu1; /* PDU 1 (aal5) */
200 int nodma; /* don't use DMA */
201 int need; /* total space we need (pad out if less data) */
202 int mlen; /* length of mbuf (for dtq) */
203 struct mbuf *t; /* data */
204 u_int32_t aal; /* aal code */
205 u_int32_t atm_vci; /* vci */
206 u_int8_t atm_flags; /* flags */
211 * dma table (index by # of words)
213 * plan A: use WMAYBE (obsolete)
214 * plan B: avoid WMAYBE
218 u_int8_t bcode; /* code */
219 u_int8_t divshift; /* byte divisor */
222 static struct en_dmatab en_dma_planB[] = {
223 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
224 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
225 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
226 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
227 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
228 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
229 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
230 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
231 { MIDDMA_16WORD, 6}, /* 16 */
234 static struct en_dmatab *en_dmaplan = en_dma_planB;
240 STATIC INLINE int en_b2sz (int) __attribute__ ((unused));
242 int en_dump (int,int);
243 int en_dumpmem (int,int,int);
245 STATIC void en_dmaprobe (struct en_softc *);
246 STATIC int en_dmaprobe_doit (struct en_softc *, u_int8_t *,
248 STATIC INLINE int en_dqneed (struct en_softc *, caddr_t, u_int,
250 STATIC void en_init (struct en_softc *);
251 STATIC int en_ioctl (struct ifnet *, EN_IOCTL_CMDT, caddr_t,
253 STATIC INLINE int en_k2sz (int);
254 STATIC void en_loadvc (struct en_softc *, int);
255 STATIC int en_mfix (struct en_softc *, struct mbuf **,
257 STATIC INLINE struct mbuf *en_mget (struct en_softc *, u_int,
259 STATIC INLINE u_int32_t en_read (struct en_softc *,
261 STATIC int en_rxctl (struct en_softc *, struct atm_pseudoioctl *,
263 STATIC void en_txdma (struct en_softc *, int);
264 STATIC void en_txlaunch (struct en_softc *, int,
266 STATIC void en_service (struct en_softc *);
267 STATIC void en_start (struct ifnet *, struct ifaltq_subque *);
268 STATIC INLINE int en_sz2b (int);
269 STATIC INLINE void en_write (struct en_softc *, u_int32_t,
277 * raw read/write macros
280 #define EN_READDAT(SC,R) en_read(SC,R)
281 #define EN_WRITEDAT(SC,R,V) en_write(SC,R,V)
284 * cooked read/write macros
287 #define EN_READ(SC,R) ntohl(en_read(SC,R))
288 #define EN_WRITE(SC,R,V) en_write(SC,R, htonl(V))
290 #define EN_WRAPADD(START,STOP,CUR,VAL) { \
291 (CUR) = (CUR) + (VAL); \
292 if ((CUR) >= (STOP)) \
293 (CUR) = (START) + ((CUR) - (STOP)); \
296 #define WORD_IDX(START, X) (((X) - (START)) / sizeof(u_int32_t))
298 /* we store sc->dtq and sc->drq data in the following format... */
299 #define EN_DQ_MK(SLOT,LEN) (((SLOT) << 20)|(LEN)|(0x80000))
300 /* the 0x80000 ensures we != 0 */
301 #define EN_DQ_SLOT(X) ((X) >> 20)
302 #define EN_DQ_LEN(X) ((X) & 0x3ffff)
304 /* format of DTQ/DRQ word 1 differs between ENI and ADP */
305 #if defined(MIDWAY_ENIONLY)
307 #define MID_MK_TXQ(SC,CNT,CHAN,END,BCODE) \
308 EN_WRITE((SC), (SC)->dtq_us, \
309 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (BCODE)));
311 #define MID_MK_RXQ(SC,CNT,VCI,END,BCODE) \
312 EN_WRITE((SC), (SC)->drq_us, \
313 MID_MK_RXQ_ENI((CNT), (VCI), (END), (BCODE)));
315 #elif defined(MIDWAY_ADPONLY)
317 #define MID_MK_TXQ(SC,CNT,CHAN,END,JK) \
318 EN_WRITE((SC), (SC)->dtq_us, \
319 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK)));
321 #define MID_MK_RXQ(SC,CNT,VCI,END,JK) \
322 EN_WRITE((SC), (SC)->drq_us, \
323 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK)));
327 #define MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE) { \
328 if ((SC)->is_adaptec) \
329 EN_WRITE((SC), (SC)->dtq_us, \
330 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK_OR_BCODE))); \
332 EN_WRITE((SC), (SC)->dtq_us, \
333 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (JK_OR_BCODE))); \
336 #define MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE) { \
337 if ((SC)->is_adaptec) \
338 EN_WRITE((SC), (SC)->drq_us, \
339 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK_OR_BCODE))); \
341 EN_WRITE((SC), (SC)->drq_us, \
342 MID_MK_RXQ_ENI((CNT), (VCI), (END), (JK_OR_BCODE))); \
347 /* add an item to the DTQ */
348 #define EN_DTQADD(SC,CNT,CHAN,JK_OR_BCODE,ADDR,LEN,END) { \
350 (SC)->dtq[MID_DTQ_A2REG((SC)->dtq_us)] = EN_DQ_MK(CHAN,LEN); \
351 MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE); \
353 EN_WRITE((SC), (SC)->dtq_us, (ADDR)); \
354 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, (SC)->dtq_us, 4); \
357 EN_WRITE((SC), MID_DMA_WRTX, MID_DTQ_A2REG((SC)->dtq_us)); \
361 #define EN_DRQADD(SC,CNT,VCI,JK_OR_BCODE,ADDR,LEN,SLOT,END) { \
363 (SC)->drq[MID_DRQ_A2REG((SC)->drq_us)] = EN_DQ_MK(SLOT,LEN); \
364 MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE); \
366 EN_WRITE((SC), (SC)->drq_us, (ADDR)); \
367 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, (SC)->drq_us, 4); \
370 EN_WRITE((SC), MID_DMA_WRRX, MID_DRQ_A2REG((SC)->drq_us)); \
376 * the code is arranged in a specific way:
377 * [1] short/inline functions
378 * [2] autoconfig stuff
380 * [4] reset -> init -> trasmit -> intr -> receive functions
384 /***********************************************************************/
387 * en_read: read a word from the card. this is the only function
388 * that reads from the card.
391 STATIC INLINE u_int32_t
392 en_read(struct en_softc *sc, u_int32_t r)
394 #ifdef EN_DEBUG_RANGE
395 if (r > MID_MAXOFF || (r % 4))
396 panic("en_read out of range, r=0x%x", r);
399 return(bus_space_read_4(sc->en_memt, sc->en_base, r));
403 * en_write: write a word to the card. this is the only function that
404 * writes to the card.
408 en_write(struct en_softc *sc, u_int32_t r, u_int32_t v)
410 #ifdef EN_DEBUG_RANGE
411 if (r > MID_MAXOFF || (r % 4))
412 panic("en_write out of range, r=0x%x", r);
415 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
419 * en_k2sz: convert KBytes to a size parameter (a log2)
434 default: panic("en_k2sz");
438 #define en_log2(X) en_k2sz(X)
442 * en_b2sz: convert a DMA burst code to its byte size
449 case MIDDMA_WORD: return(1*4);
451 case MIDDMA_2WORD: return(2*4);
453 case MIDDMA_4WORD: return(4*4);
455 case MIDDMA_8WORD: return(8*4);
456 case MIDDMA_16WMAYBE:
457 case MIDDMA_16WORD: return(16*4);
458 default: panic("en_b2sz");
465 * en_sz2b: convert a burst size (bytes) to DMA burst code
472 case 1*4: return(MIDDMA_WORD);
473 case 2*4: return(MIDDMA_2WORD);
474 case 4*4: return(MIDDMA_4WORD);
475 case 8*4: return(MIDDMA_8WORD);
476 case 16*4: return(MIDDMA_16WORD);
477 default: panic("en_sz2b");
484 * en_dqneed: calculate number of DTQ/DRQ's needed for a buffer
488 en_dqneed(struct en_softc *sc, caddr_t data, u_int len, u_int tx)
490 int result, needalign, sz;
492 #if !defined(MIDWAY_ENIONLY)
493 #if !defined(MIDWAY_ADPONLY)
495 #endif /* !MIDWAY_ADPONLY */
496 return(1); /* adaptec can DMA anything in one go */
499 #if !defined(MIDWAY_ADPONLY)
501 if (len < EN_MINDMA) {
502 if (!tx) /* XXX: conservative */
503 return(1); /* will copy/DMA_JK */
506 if (tx) { /* byte burst? */
507 needalign = (((uintptr_t) (void *) data) % sizeof(u_int32_t));
510 sz = min(len, sizeof(u_int32_t) - needalign);
516 if (sc->alburst && len) {
517 needalign = (((uintptr_t) (void *) data) & sc->bestburstmask);
519 result++; /* alburst */
520 sz = min(len, sc->bestburstlen - needalign);
525 if (len >= sc->bestburstlen) {
526 sz = len / sc->bestburstlen;
527 sz = sz * sc->bestburstlen;
529 result++; /* best shot */
533 result++; /* clean up */
534 if (tx && (len % sizeof(u_int32_t)) != 0)
535 result++; /* byte cleanup */
539 #endif /* !MIDWAY_ADPONLY */
544 * en_mget: get an mbuf chain that can hold totlen bytes and return it
545 * (for recv) [based on am7990_get from if_le and ieget from if_ie]
546 * after this call the sum of all the m_len's in the chain will be totlen.
549 STATIC INLINE struct mbuf *
550 en_mget(struct en_softc *sc, u_int totlen, u_int *drqneed)
553 struct mbuf *top, **mp;
556 MGETHDR(m, MB_DONTWAIT, MT_DATA);
559 m->m_pkthdr.rcvif = &sc->enif;
560 m->m_pkthdr.len = totlen;
565 /* if (top != NULL) then we've already got 1 mbuf on the chain */
568 MGET(m, MB_DONTWAIT, MT_DATA);
571 return(NULL); /* out of mbufs */
575 if (totlen >= MINCLSIZE) {
576 MCLGET(m, MB_DONTWAIT);
577 if ((m->m_flags & M_EXT) == 0) {
580 return(NULL); /* out of mbuf clusters */
584 m->m_len = min(totlen, m->m_len);
589 *drqneed += en_dqneed(sc, m->m_data, m->m_len, 0);
595 /***********************************************************************/
602 en_attach(struct en_softc *sc)
604 struct ifnet *ifp = &sc->enif;
605 char ethstr[ETHER_ADDRSTRLEN + 1];
607 u_int32_t reg, lcv, check, ptr, sav, midvloc;
610 * probe card to determine memory size. the stupid ENI card always
611 * reports to PCI that it needs 4MB of space (2MB regs and 2MB RAM).
612 * if it has less than 2MB RAM the addresses wrap in the RAM address space.
613 * (i.e. on a 512KB card addresses 0x3ffffc, 0x37fffc, and 0x2ffffc
614 * are aliases for 0x27fffc [note that RAM starts at offset 0x200000]).
619 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
620 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
621 EN_WRITE(sc, lcv, lcv); /* data[address] = address */
622 for (check = MID_PROBEOFF ; check < lcv ; check += MID_PROBSIZE) {
623 reg = EN_READ(sc, check);
624 if (reg != check) { /* found an alias! */
625 goto done_probe; /* and quit */
630 lcv -= MID_PROBSIZE; /* take one step back */
631 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
634 * determine the largest DMA burst supported
645 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
646 for (lcv = MID_RAMOFF ; lcv < MID_RAMOFF + sc->en_obmemsz ; lcv += 4)
647 EN_WRITE(sc, lcv, 0); /* zero memory */
649 reg = EN_READ(sc, MID_RESID);
651 kprintf("%s: ATM midway v%d, board IDs %d.%d, %s%s%s, %ldKB on-board RAM\n",
652 sc->sc_dev.dv_xname, MID_VER(reg), MID_MID(reg), MID_DID(reg),
653 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
654 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
655 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
656 (long)(sc->en_obmemsz / 1024));
658 if (sc->is_adaptec) {
659 if (sc->bestburstlen == 64 && sc->alburst == 0)
660 kprintf("%s: passed 64 byte DMA test\n", sc->sc_dev.dv_xname);
662 kprintf("%s: FAILED DMA TEST: burst=%d, alburst=%d\n",
663 sc->sc_dev.dv_xname, sc->bestburstlen, sc->alburst);
665 kprintf("%s: maximum DMA burst length = %d bytes%s\n", sc->sc_dev.dv_xname,
666 sc->bestburstlen, (sc->alburst) ? " (must align)" : "");
670 * link into network subsystem and prepare card
673 sc->enif.if_softc = sc;
674 ifp->if_flags = IFF_SIMPLEX|IFF_NOTRAILERS;
675 ifp->if_ioctl = en_ioctl;
676 ifp->if_output = atm_output;
677 ifp->if_start = en_start;
683 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
684 sc->rxvc2slot[lcv] = RX_NONE;
685 sc->txspeed[lcv] = 0; /* full */
686 sc->txvc2slot[lcv] = 0; /* full speed == slot 0 */
689 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
690 ptr = sav = MID_BUFOFF;
691 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
692 sz = sz - (ptr - sav);
693 if (EN_TXSZ*1024 * EN_NTX > sz) {
694 kprintf("%s: EN_NTX/EN_TXSZ too big\n", sc->sc_dev.dv_xname);
697 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
698 sc->txslot[lcv].mbsize = 0;
699 sc->txslot[lcv].start = ptr;
700 ptr += (EN_TXSZ * 1024);
701 sz -= (EN_TXSZ * 1024);
702 sc->txslot[lcv].stop = ptr;
703 sc->txslot[lcv].nref = 0;
704 bzero(&sc->txslot[lcv].indma, sizeof(sc->txslot[lcv].indma));
705 bzero(&sc->txslot[lcv].q, sizeof(sc->txslot[lcv].q));
707 kprintf("%s: tx%d: start 0x%x, stop 0x%x\n", sc->sc_dev.dv_xname, lcv,
708 sc->txslot[lcv].start, sc->txslot[lcv].stop);
713 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
714 sz = sz - (ptr - sav);
715 sc->en_nrx = sz / (EN_RXSZ * 1024);
716 if (sc->en_nrx <= 0) {
717 kprintf("%s: EN_NTX/EN_TXSZ/EN_RXSZ too big\n", sc->sc_dev.dv_xname);
722 * ensure that there is always one VC slot on the service list free
723 * so that we can tell the difference between a full and empty list.
725 if (sc->en_nrx >= MID_N_VC)
726 sc->en_nrx = MID_N_VC - 1;
728 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
729 sc->rxslot[lcv].rxhand = NULL;
730 sc->rxslot[lcv].oth_flags = ENOTHER_FREE;
731 bzero(&sc->rxslot[lcv].indma, sizeof(sc->rxslot[lcv].indma));
732 bzero(&sc->rxslot[lcv].q, sizeof(sc->rxslot[lcv].q));
733 midvloc = sc->rxslot[lcv].start = ptr;
734 ptr += (EN_RXSZ * 1024);
735 sz -= (EN_RXSZ * 1024);
736 sc->rxslot[lcv].stop = ptr;
737 midvloc = midvloc - MID_RAMOFF;
738 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
739 midvloc = midvloc >> MIDV_LOCTOPSHFT; /* we only want the top 11 bits */
740 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
741 sc->rxslot[lcv].mode = midvloc |
742 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
745 kprintf("%s: rx%d: start 0x%x, stop 0x%x, mode 0x%x\n", sc->sc_dev.dv_xname,
746 lcv, sc->rxslot[lcv].start, sc->rxslot[lcv].stop, sc->rxslot[lcv].mode);
751 sc->vtrash = sc->otrash = sc->mfix = sc->txmbovr = sc->dmaovr = 0;
752 sc->txoutspace = sc->txdtqout = sc->launch = sc->lheader = sc->ltail = 0;
753 sc->hwpull = sc->swadd = sc->rxqnotus = sc->rxqus = sc->rxoutboth = 0;
754 sc->rxdrqout = sc->ttrash = sc->rxmbufout = sc->mfixfail = 0;
755 sc->headbyte = sc->tailbyte = sc->tailflush = 0;
757 sc->need_drqs = sc->need_dtqs = 0;
759 kprintf("%s: %d %dKB receive buffers, %d %dKB transmit buffers allocated\n",
760 sc->sc_dev.dv_xname, sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
762 kprintf("%s: End Station Identifier (mac address) %s\n",
763 sc->sc_dev.dv_xname, kether_ntoa(sc->macaddr, ethstr));
768 atm_ifattach(ifp, NULL);
773 * en_dmaprobe: helper function for en_attach.
775 * see how the card handles DMA by running a few DMA tests. we need
776 * to figure out the largest number of bytes we can DMA in one burst
777 * ("bestburstlen"), and if the starting address for a burst needs to
778 * be aligned on any sort of boundary or not ("alburst").
781 * sparc1: bestburstlen=4, alburst=0 (ick, broken DMA!)
782 * sparc2: bestburstlen=64, alburst=1
783 * p166: bestburstlen=64, alburst=0
786 #define NBURSTS 3 /* number of bursts to use for dmaprobe */
787 #define BOUNDARY 1024 /* test misaligned dma crossing the bounday.
788 should be n * 64. at least 64*(NBURSTS+1).
789 dell P6 with EDO DRAM has 1K bounday problem */
792 en_dmaprobe(struct en_softc *sc)
795 /* be careful. kernel stack is only 8K */
796 u_int8_t buffer[BOUNDARY * 2 + 64 * (NBURSTS + 1)];
798 u_int32_t srcbuf[64], dstbuf[64];
801 int bestalgn, bestnotalgn, lcv, try;
806 /* setup src and dst buf at the end of the boundary */
807 sp = (u_int8_t *)roundup((uintptr_t)(void *)buffer, 64);
808 while (((uintptr_t)(void *)sp & (BOUNDARY - 1)) != (BOUNDARY - 64))
813 * we can't dma across page boundary so that, if buf is at a page
814 * boundary, move it to the next page. but still either src or dst
815 * will be at the boundary, which should be ok.
817 if ((((uintptr_t)(void *)sp + 64) & PAGE_MASK) == 0)
819 if ((((uintptr_t)(void *)dp + 64) & PAGE_MASK) == 0)
822 sp = (u_int8_t *) srcbuf;
823 while ((((unsigned long) sp) % MIDDMA_MAXBURST) != 0)
825 dp = (u_int8_t *) dstbuf;
826 while ((((unsigned long) dp) % MIDDMA_MAXBURST) != 0)
828 #endif /* !NBURSTS */
830 bestalgn = bestnotalgn = en_dmaprobe_doit(sc, sp, dp, 0);
832 for (lcv = 4 ; lcv < MIDDMA_MAXBURST ; lcv += 4) {
833 try = en_dmaprobe_doit(sc, sp+lcv, dp+lcv, 0);
835 if (try < bestnotalgn) {
840 if (try < bestnotalgn)
845 if (bestalgn != bestnotalgn) /* need bursts aligned */
848 sc->bestburstlen = bestalgn;
849 sc->bestburstshift = en_log2(bestalgn);
850 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
851 sc->bestburstcode = en_sz2b(bestalgn);
854 * correct pci chipsets should be able to handle misaligned-64-byte DMA.
855 * but there are too many broken chipsets around. we try to work around
856 * by finding the best workable dma size, but still some broken machines
857 * exhibit the problem later. so warn it here.
859 if (bestalgn != 64 || sc->alburst != 0) {
860 kprintf("%s: WARNING: DMA test detects a broken PCI chipset!\n",
861 sc->sc_dev.dv_xname);
862 kprintf(" trying to work around the problem... but if this doesn't\n");
863 kprintf(" work for you, you'd better switch to a newer motherboard.\n");
870 * en_dmaprobe_doit: do actual testing
874 en_dmaprobe_doit(struct en_softc *sc, u_int8_t *sp, u_int8_t *dp, int wmtry)
876 int lcv, retval = 4, cnt, count;
877 u_int32_t reg, bcode, midvloc;
880 * set up a 1k buffer at MID_BUFOFF
885 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
887 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(u_int32_t)) >> MIDV_LOCTOPSHFT;
888 EN_WRITE(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
889 EN_WRITE(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
890 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
891 EN_WRITE(sc, MID_DST_RP(0), 0);
892 EN_WRITE(sc, MID_WP_ST_CNT(0), 0);
895 for (lcv = 0 ; lcv < 64*NBURSTS; lcv++) /* set up sample data */
897 for (lcv = 0 ; lcv < 68 ; lcv++) /* set up sample data */
900 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* enable DMA (only) */
902 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
903 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
906 * try it now . . . DMA it out, then DMA it back in and compare
908 * note: in order to get the dma stuff to reverse directions it wants
909 * the "end" flag set! since we are not dma'ing valid data we may
910 * get an ident mismatch interrupt (which we will ignore).
912 * note: we've got two different tests rolled up in the same loop
914 * then we are doing a wmaybe test and wmtry is a byte count
915 * else we are doing a burst test
918 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
921 kprintf("DMA test lcv=%d, sp=0x%x, dp=0x%x, wmtry=%d\n",
925 /* zero SRAM and dest buffer */
926 for (cnt = 0 ; cnt < 1024; cnt += 4)
927 EN_WRITE(sc, MID_BUFOFF+cnt, 0); /* zero memory */
929 for (cnt = 0 ; cnt < 64*NBURSTS; cnt++)
931 for (cnt = 0 ; cnt < 68 ; cnt++)
936 count = (sc->bestburstlen - sizeof(u_int32_t)) / sizeof(u_int32_t);
937 bcode = en_dmaplan[count].bcode;
938 count = wmtry >> en_dmaplan[count].divshift;
940 bcode = en_sz2b(lcv);
944 /* build lcv-byte-DMA x NBURSTS */
946 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ADP(lcv*NBURSTS, 0, MID_DMA_END, 0));
948 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ENI(count*NBURSTS, 0, MID_DMA_END, bcode));
949 EN_WRITE(sc, sc->dtq_chip+4, vtophys(sp));
950 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
951 EN_WRITE(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
953 while (EN_READ(sc, MID_DMA_RDTX) != MID_DTQ_A2REG(sc->dtq_chip)) {
957 kprintf("%s: unexpected timeout in tx DMA test\n", sc->sc_dev.dv_xname);
959 kprintf(" alignment=0x%x, burst size=%d, dma addr reg=0x%x\n",
960 (u_long)sp & 63, lcv, EN_READ(sc, MID_DMA_ADDR));
962 return(retval); /* timeout, give up */
967 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
969 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ENI(count, 0, MID_DMA_END, bcode));
970 EN_WRITE(sc, sc->dtq_chip+4, vtophys(sp));
971 EN_WRITE(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip+8));
973 while (EN_READ(sc, MID_DMA_RDTX) == MID_DTQ_A2REG(sc->dtq_chip)) {
977 kprintf("%s: unexpected timeout in tx DMA test\n", sc->sc_dev.dv_xname);
978 return(retval); /* timeout, give up */
981 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
982 #endif /* !NBURSTS */
983 reg = EN_READ(sc, MID_INTACK);
984 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
985 kprintf("%s: unexpected status in tx DMA test: 0x%x\n",
986 sc->sc_dev.dv_xname, reg);
989 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
991 /* "return to sender..." address is known ... */
994 /* build lcv-byte-DMA x NBURSTS */
996 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ADP(lcv*NBURSTS, 0, MID_DMA_END, 0));
998 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ENI(count*NBURSTS, 0, MID_DMA_END, bcode));
999 EN_WRITE(sc, sc->drq_chip+4, vtophys(dp));
1000 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
1001 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1003 while (EN_READ(sc, MID_DMA_RDRX) != MID_DRQ_A2REG(sc->drq_chip)) {
1007 kprintf("%s: unexpected timeout in rx DMA test\n", sc->sc_dev.dv_xname);
1008 return(retval); /* timeout, give up */
1011 #else /* !NBURSTS */
1013 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
1015 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ENI(count, 0, MID_DMA_END, bcode));
1016 EN_WRITE(sc, sc->drq_chip+4, vtophys(dp));
1017 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip+8));
1019 while (EN_READ(sc, MID_DMA_RDRX) == MID_DRQ_A2REG(sc->drq_chip)) {
1023 kprintf("%s: unexpected timeout in rx DMA test\n", sc->sc_dev.dv_xname);
1024 return(retval); /* timeout, give up */
1027 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
1028 #endif /* !NBURSTS */
1029 reg = EN_READ(sc, MID_INTACK);
1030 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
1031 kprintf("%s: unexpected status in rx DMA test: 0x%x\n",
1032 sc->sc_dev.dv_xname, reg);
1035 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
1038 return(bcmp(sp, dp, wmtry)); /* wmtry always exits here, no looping */
1042 if (bcmp(sp, dp, lcv * NBURSTS)) {
1043 /* kprintf("DMA test failed! lcv=%d, sp=0x%x, dp=0x%x\n", lcv, sp, dp); */
1044 return(retval); /* failed, use last value */
1047 if (bcmp(sp, dp, lcv))
1048 return(retval); /* failed, use last value */
1054 return(retval); /* studly 64 byte DMA present! oh baby!! */
1057 /***********************************************************************/
1060 * en_ioctl: handle ioctl requests
1062 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1063 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1064 * value, subtract one from sc->txslot[0].nref, add one to the
1065 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1070 en_ioctl(struct ifnet *ifp, EN_IOCTL_CMDT cmd, caddr_t data, struct ucred *cr)
1072 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1073 struct ifaddr *ifa = (struct ifaddr *) data;
1074 struct ifreq *ifr = (struct ifreq *) data;
1075 struct atm_pseudoioctl *api = (struct atm_pseudoioctl *)data;
1077 struct atm_rawioctl *ario = (struct atm_rawioctl *)data;
1085 case SIOCATMENA: /* enable circuit for recv */
1086 error = en_rxctl(sc, api, 1);
1089 case SIOCATMDIS: /* disable circuit for recv */
1090 error = en_rxctl(sc, api, 0);
1095 if ((slot = sc->rxvc2slot[ario->npcb->npcb_vci]) == RX_NONE) {
1099 if (ario->rawvalue > EN_RXSZ*1024)
1100 ario->rawvalue = EN_RXSZ*1024;
1101 if (ario->rawvalue) {
1102 sc->rxslot[slot].oth_flags |= ENOTHER_RAW;
1103 sc->rxslot[slot].raw_threshold = ario->rawvalue;
1105 sc->rxslot[slot].oth_flags &= (~ENOTHER_RAW);
1106 sc->rxslot[slot].raw_threshold = 0;
1109 kprintf("%s: rxvci%d: turn %s raw (boodi) mode\n",
1110 sc->sc_dev.dv_xname, ario->npcb->npcb_vci,
1111 (ario->rawvalue) ? "on" : "off");
1116 ifp->if_flags |= IFF_UP;
1117 #if defined(INET) || defined(INET6)
1118 if (ifa->ifa_addr->sa_family == AF_INET
1119 || ifa->ifa_addr->sa_family == AF_INET6) {
1122 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1126 /* what to do if not INET? */
1139 #if defined(SIOCSIFMTU) /* ??? copied from if_de */
1140 #if !defined(ifr_mtu)
1141 #define ifr_mtu ifr_metric
1145 * Set the interface MTU.
1148 if (ifr->ifr_mtu > ATMMTU) {
1153 ifp->if_mtu = ifr->ifr_mtu;
1154 /* XXXCDC: do we really need to reset on MTU size change? */
1158 #endif /* SIOCSIFMTU */
1170 * en_rxctl: turn on and off VCs for recv.
1174 en_rxctl(struct en_softc *sc, struct atm_pseudoioctl *pi, int on)
1176 u_int vci, flags, slot;
1177 u_int32_t oldmode, newmode;
1179 vci = ATM_PH_VCI(&pi->aph);
1180 flags = ATM_PH_FLAGS(&pi->aph);
1183 kprintf("%s: %s vpi=%d, vci=%d, flags=%d\n", sc->sc_dev.dv_xname,
1184 (on) ? "enable" : "disable", ATM_PH_VPI(&pi->aph), vci, flags);
1187 if (ATM_PH_VPI(&pi->aph) || vci >= MID_N_VC)
1195 if (sc->rxvc2slot[vci] != RX_NONE)
1197 for (slot = 0 ; slot < sc->en_nrx ; slot++)
1198 if (sc->rxslot[slot].oth_flags & ENOTHER_FREE)
1200 if (slot == sc->en_nrx)
1202 sc->rxvc2slot[vci] = slot;
1203 sc->rxslot[slot].rxhand = NULL;
1204 oldmode = sc->rxslot[slot].mode;
1205 newmode = (flags & ATM_PH_AAL5) ? MIDV_AAL5 : MIDV_NOAAL;
1206 sc->rxslot[slot].mode = MIDV_SETMODE(oldmode, newmode);
1207 sc->rxslot[slot].atm_vci = vci;
1208 sc->rxslot[slot].atm_flags = flags;
1209 sc->rxslot[slot].oth_flags = 0;
1210 sc->rxslot[slot].rxhand = pi->rxhand;
1211 if (sc->rxslot[slot].indma.ifq_head || sc->rxslot[slot].q.ifq_head)
1212 panic("en_rxctl: left over mbufs on enable");
1213 sc->txspeed[vci] = 0; /* full speed to start */
1214 sc->txvc2slot[vci] = 0; /* init value */
1215 sc->txslot[0].nref++; /* bump reference count */
1216 en_loadvc(sc, vci); /* does debug kprintf for us */
1224 if (sc->rxvc2slot[vci] == RX_NONE)
1226 slot = sc->rxvc2slot[vci];
1227 if ((sc->rxslot[slot].oth_flags & (ENOTHER_FREE|ENOTHER_DRAIN)) != 0)
1229 crit_enter(); /* block out enintr() */
1230 oldmode = EN_READ(sc, MID_VC(vci));
1231 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1232 EN_WRITE(sc, MID_VC(vci), (newmode | (oldmode & MIDV_INSERVICE)));
1233 /* halt in tracks, be careful to preserve inserivce bit */
1235 sc->rxslot[slot].rxhand = NULL;
1236 sc->rxslot[slot].mode = newmode;
1238 sc->txslot[sc->txvc2slot[vci]].nref--;
1239 sc->txspeed[vci] = 0;
1240 sc->txvc2slot[vci] = 0;
1242 /* if stuff is still going on we are going to have to drain it out */
1243 if (sc->rxslot[slot].indma.ifq_head ||
1244 sc->rxslot[slot].q.ifq_head ||
1245 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) != 0) {
1246 sc->rxslot[slot].oth_flags |= ENOTHER_DRAIN;
1248 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1249 sc->rxslot[slot].atm_vci = RX_NONE;
1250 sc->rxvc2slot[vci] = RX_NONE;
1252 crit_exit(); /* enable enintr() */
1254 kprintf("%s: rx%d: VCI %d is now %s\n", sc->sc_dev.dv_xname, slot, vci,
1255 (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) ? "draining" : "free");
1260 /***********************************************************************/
1263 * en_reset: reset the board, throw away work in progress.
1264 * must en_init to recover.
1268 en_reset(struct en_softc *sc)
1274 kprintf("%s: reset\n", sc->sc_dev.dv_xname);
1277 if (sc->en_busreset)
1278 sc->en_busreset(sc);
1279 EN_WRITE(sc, MID_RESID, 0x0); /* reset hardware */
1282 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1286 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1287 if (sc->rxvc2slot[lcv] == RX_NONE)
1289 slot = sc->rxvc2slot[lcv];
1291 IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1293 break; /* >>> exit 'while(1)' here <<< */
1297 IF_DEQUEUE(&sc->rxslot[slot].q, m);
1299 break; /* >>> exit 'while(1)' here <<< */
1302 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
1303 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1304 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1305 sc->rxvc2slot[lcv] = RX_NONE;
1307 kprintf("%s: rx%d: VCI %d is now free\n", sc->sc_dev.dv_xname, slot, lcv);
1313 * xmit: dump everything
1316 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1318 IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1320 break; /* >>> exit 'while(1)' here <<< */
1324 IF_DEQUEUE(&sc->txslot[lcv].q, m);
1326 break; /* >>> exit 'while(1)' here <<< */
1330 sc->txslot[lcv].mbsize = 0;
1338 * en_init: init board and sync the card with the data in the softc.
1342 en_init(struct en_softc *sc)
1347 if ((sc->enif.if_flags & IFF_UP) == 0) {
1349 kprintf("%s: going down\n", sc->sc_dev.dv_xname);
1351 en_reset(sc); /* to be safe */
1352 sc->enif.if_flags &= ~IFF_RUNNING; /* disable */
1357 kprintf("%s: going up\n", sc->sc_dev.dv_xname);
1359 sc->enif.if_flags |= IFF_RUNNING; /* enable */
1361 if (sc->en_busreset)
1362 sc->en_busreset(sc);
1363 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
1366 * init obmem data structures: vc tab, dma q's, slist.
1368 * note that we set drq_free/dtq_free to one less than the total number
1369 * of DTQ/DRQs present. we do this because the card uses the condition
1370 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1371 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1372 * the drq_us pointer will wrap all the way around]. by restricting
1373 * the number of active requests to (N - 1) we prevent the list from
1374 * becoming completely full. note that the card will sometimes give
1375 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1376 * keep that interrupt from messing us up.
1379 for (vc = 0 ; vc < MID_N_VC ; vc++)
1382 bzero(&sc->drq, sizeof(sc->drq));
1383 sc->drq_free = MID_DRQ_N - 1; /* N - 1 */
1384 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
1385 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1386 /* ensure zero queue */
1387 sc->drq_us = sc->drq_chip;
1389 bzero(&sc->dtq, sizeof(sc->dtq));
1390 sc->dtq_free = MID_DTQ_N - 1; /* N - 1 */
1391 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
1392 EN_WRITE(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1393 /* ensure zero queue */
1394 sc->dtq_us = sc->dtq_chip;
1396 sc->hwslistp = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
1397 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1400 kprintf("%s: drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, hwslist: 0x%x\n",
1401 sc->sc_dev.dv_xname, sc->drq_free, sc->drq_chip,
1402 sc->dtq_free, sc->dtq_chip, sc->hwslistp);
1405 for (slot = 0 ; slot < EN_NTX ; slot++) {
1406 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1407 EN_WRITE(sc, MIDX_READPTR(slot), 0);
1408 EN_WRITE(sc, MIDX_DESCSTART(slot), 0);
1409 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1410 loc = loc - MID_RAMOFF;
1411 loc = (loc & ~((EN_TXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
1412 loc = loc >> MIDV_LOCTOPSHFT; /* top 11 bits */
1413 EN_WRITE(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ), loc));
1415 kprintf("%s: tx%d: place 0x%x\n", sc->sc_dev.dv_xname, slot,
1416 EN_READ(sc, MIDX_PLACE(slot)));
1424 EN_WRITE(sc, MID_INTENA, MID_INT_TX|MID_INT_DMA_OVR|MID_INT_IDENT|
1425 MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_DMA_RX|MID_INT_DMA_TX|
1426 MID_INT_SERVICE| /* >>> MID_INT_SUNI| XXXCDC<<< */ MID_INT_STATS);
1427 EN_WRITE(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl)|MID_MCSR_ENDMA|
1428 MID_MCSR_ENTX|MID_MCSR_ENRX);
1434 * en_loadvc: load a vc tab entry from a slot
1438 en_loadvc(struct en_softc *sc, int vc)
1441 u_int32_t reg = EN_READ(sc, MID_VC(vc));
1443 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1444 EN_WRITE(sc, MID_VC(vc), reg);
1447 if ((slot = sc->rxvc2slot[vc]) == RX_NONE)
1450 /* no need to set CRC */
1451 EN_WRITE(sc, MID_DST_RP(vc), 0); /* read pointer = 0, desc. start = 0 */
1452 EN_WRITE(sc, MID_WP_ST_CNT(vc), 0); /* write pointer = 0 */
1453 EN_WRITE(sc, MID_VC(vc), sc->rxslot[slot].mode); /* set mode, size, loc */
1454 sc->rxslot[slot].cur = sc->rxslot[slot].start;
1457 kprintf("%s: rx%d: assigned to VCI %d\n", sc->sc_dev.dv_xname, slot, vc);
1463 * en_start: start transmitting the next packet that needs to go out
1464 * if there is one. note that atm_output() has already locked us.
1468 en_start(struct ifnet *ifp, struct ifaltq_subque *ifsq __unused)
1470 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1471 struct mbuf *m, *lastm, *prev;
1472 struct atm_pseudohdr *ap, *new_ap;
1473 int txchan, mlen, got, need, toadd, cellcnt, first;
1474 u_int32_t atm_vpi, atm_vci, atm_flags, *dat, aal;
1477 if ((ifp->if_flags & IFF_RUNNING) == 0)
1481 * remove everything from interface queue since we handle all queueing
1487 m = ifq_dequeue(&ifp->if_snd);
1489 return; /* EMPTY: >>> exit here <<< */
1492 * calculate size of packet (in bytes)
1493 * also, if we are not doing transmit DMA we eliminate all stupid
1494 * (non-word) alignments here using en_mfix(). calls to en_mfix()
1495 * seem to be due to tcp retransmits for the most part.
1497 * after this loop mlen total length of mbuf chain (including atm_ph),
1498 * and lastm is a pointer to the last mbuf on the chain.
1506 if ((!sc->is_adaptec && EN_ENIDMAFIX) || EN_NOTXDMA || !en_dma) {
1507 if ( ((uintptr_t)mtod(lastm, void *) % sizeof(u_int32_t)) != 0 ||
1508 ((lastm->m_len % sizeof(u_int32_t)) != 0 && lastm->m_next)) {
1509 first = (lastm == m);
1510 if (en_mfix(sc, &lastm, prev) == 0) { /* failed? */
1516 m = lastm; /* update */
1521 mlen += lastm->m_len;
1522 if (lastm->m_next == NULL)
1524 lastm = lastm->m_next;
1527 if (m == NULL) /* happens only if mfix fails */
1530 ap = mtod(m, struct atm_pseudohdr *);
1532 atm_vpi = ATM_PH_VPI(ap);
1533 atm_vci = ATM_PH_VCI(ap);
1534 atm_flags = ATM_PH_FLAGS(ap) & ~(EN_OBHDR|EN_OBTRL);
1535 aal = ((atm_flags & ATM_PH_AAL5) != 0)
1536 ? MID_TBD_AAL5 : MID_TBD_NOAAL5;
1539 * check that vpi/vci is one we can use
1542 if (atm_vpi || atm_vci > MID_N_VC) {
1543 kprintf("%s: output vpi=%d, vci=%d out of card range, dropping...\n",
1544 sc->sc_dev.dv_xname, atm_vpi, atm_vci);
1550 * computing how much padding we need on the end of the mbuf, then
1551 * see if we can put the TBD at the front of the mbuf where the
1552 * link header goes (well behaved protocols will reserve room for us).
1553 * last, check if room for PDU tail.
1555 * got = number of bytes of data we have
1556 * cellcnt = number of cells in this mbuf
1557 * need = number of bytes of data + padding we need (excludes TBD)
1558 * toadd = number of bytes of data we need to add to end of mbuf,
1559 * [including AAL5 PDU, if AAL5]
1562 got = mlen - sizeof(struct atm_pseudohdr *);
1563 toadd = (aal == MID_TBD_AAL5) ? MID_PDU_SIZE : 0; /* PDU */
1564 cellcnt = (got + toadd + (MID_ATMDATASZ - 1)) / MID_ATMDATASZ;
1565 need = cellcnt * MID_ATMDATASZ;
1566 toadd = need - got; /* recompute, including zero padding */
1569 kprintf("%s: txvci%d: mlen=%d, got=%d, need=%d, toadd=%d, cell#=%d\n",
1570 sc->sc_dev.dv_xname, atm_vci, mlen, got, need, toadd, cellcnt);
1571 kprintf(" leading_space=%d, trailing_space=%d\n",
1572 M_LEADINGSPACE(m), M_TRAILINGSPACE(lastm));
1578 * note: external storage (M_EXT) can be shared between mbufs
1579 * to avoid copying (see m_copym()). this means that the same
1580 * data buffer could be shared by several mbufs, and thus it isn't
1581 * a good idea to try and write TBDs or PDUs to M_EXT data areas.
1584 if (M_LEADINGSPACE(m) >= MID_TBD_SIZE && (m->m_flags & M_EXT) == 0) {
1585 m->m_data -= MID_TBD_SIZE;
1586 m->m_len += MID_TBD_SIZE;
1587 mlen += MID_TBD_SIZE;
1588 new_ap = mtod(m, struct atm_pseudohdr *);
1589 *new_ap = *ap; /* move it back */
1591 dat = ((u_int32_t *) ap) + 1;
1592 /* make sure the TBD is in proper byte order */
1593 *dat++ = htonl(MID_TBD_MK1(aal, sc->txspeed[atm_vci], cellcnt));
1594 *dat = htonl(MID_TBD_MK2(atm_vci, 0, 0));
1595 atm_flags |= EN_OBHDR;
1598 if (toadd && (lastm->m_flags & M_EXT) == 0 &&
1599 M_TRAILINGSPACE(lastm) >= toadd) {
1600 cp = mtod(lastm, u_int8_t *) + lastm->m_len;
1601 lastm->m_len += toadd;
1603 if (aal == MID_TBD_AAL5) {
1604 bzero(cp, toadd - MID_PDU_SIZE);
1605 dat = (u_int32_t *)(cp + toadd - MID_PDU_SIZE);
1606 /* make sure the PDU is in proper byte order */
1607 *dat = htonl(MID_PDU_MK1(0, 0, got));
1611 atm_flags |= EN_OBTRL;
1613 ATM_PH_FLAGS(ap) = atm_flags; /* update EN_OBHDR/EN_OBTRL bits */
1614 #endif /* EN_MBUF_OPT */
1617 * get assigned channel (will be zero unless txspeed[atm_vci] is set)
1620 txchan = sc->txvc2slot[atm_vci];
1622 if (sc->txslot[txchan].mbsize > EN_TXHIWAT) {
1623 EN_COUNT(sc->txmbovr);
1626 kprintf("%s: tx%d: buffer space shortage\n", sc->sc_dev.dv_xname,
1632 sc->txslot[txchan].mbsize += mlen;
1635 kprintf("%s: tx%d: VPI=%d, VCI=%d, FLAGS=0x%x, speed=0x%x\n",
1636 sc->sc_dev.dv_xname, txchan, atm_vpi, atm_vci, atm_flags,
1637 sc->txspeed[atm_vci]);
1638 kprintf(" adjusted mlen=%d, mbsize=%d\n", mlen,
1639 sc->txslot[txchan].mbsize);
1642 IF_ENQUEUE(&sc->txslot[txchan].q, m);
1644 en_txdma(sc, txchan);
1652 * en_mfix: fix a stupid mbuf
1655 STATIC int en_makeexclusive(struct en_softc *, struct mbuf **, struct mbuf *);
1658 en_makeexclusive(struct en_softc *sc, struct mbuf **mm, struct mbuf *prev)
1660 struct mbuf *m, *new;
1664 if (m->m_flags & M_EXT) {
1665 if (!(m->m_flags & M_EXT_CLUSTER)) {
1666 /* external buffer isn't an ordinary mbuf cluster! */
1667 kprintf("%s: mfix: special buffer! can't make a copy!\n",
1668 sc->sc_dev.dv_xname);
1672 if (m_sharecount(m) > 1) {
1673 /* make a real copy of the M_EXT mbuf since it is shared */
1674 new = m_getcl(MB_DONTWAIT, MT_DATA, m->m_flags & M_PKTHDR);
1677 EN_COUNT(sc->mfixfail);
1680 if (m->m_flags & M_PKTHDR)
1681 M_MOVE_PKTHDR(new, m);
1682 bcopy(m->m_data, new->m_data, m->m_len);
1683 new->m_len = m->m_len;
1684 new->m_next = m->m_next;
1691 /* the buffer is not shared, align the data offset using
1693 u_char *d = mtod(m, u_char *);
1694 int off = ((uintptr_t)(void *)d) % sizeof(u_int32_t);
1697 bcopy(d, d - off, m->m_len);
1698 m->m_data = (caddr_t)d - off;
1706 en_mfix(struct en_softc *sc, struct mbuf **mm, struct mbuf *prev)
1715 EN_COUNT(sc->mfix); /* count # of calls */
1717 kprintf("%s: mfix mbuf m_data=0x%x, m_len=%d\n", sc->sc_dev.dv_xname,
1718 m->m_data, m->m_len);
1721 d = mtod(m, u_char *);
1722 off = ((uintptr_t) (void *) d) % sizeof(u_int32_t);
1725 if ((m->m_flags & M_EXT) == 0) {
1726 bcopy(d, d - off, m->m_len); /* ALIGN! (with costly data copy...) */
1728 m->m_data = (caddr_t)d;
1730 /* can't write to an M_EXT mbuf since it may be shared */
1731 if (en_makeexclusive(sc, &m, prev) == 0)
1733 *mm = m; /* note: 'd' now invalid */
1737 off = m->m_len % sizeof(u_int32_t);
1741 if (m->m_flags & M_EXT) {
1742 /* can't write to an M_EXT mbuf since it may be shared */
1743 if (en_makeexclusive(sc, &m, prev) == 0)
1745 *mm = m; /* note: 'd' now invalid */
1748 d = mtod(m, u_char *) + m->m_len;
1749 off = sizeof(u_int32_t) - off;
1753 if (nxt != NULL && nxt->m_len == 0) {
1754 /* remove an empty mbuf. this avoids odd byte padding to an empty
1756 m->m_next = nxt = m_free(nxt);
1758 if (nxt == NULL) { /* out of data, zero fill */
1760 continue; /* next "off" */
1762 cp = mtod(nxt, u_char *);
1766 nxt->m_data = (caddr_t)cp;
1768 if (nxt != NULL && nxt->m_len == 0)
1769 m->m_next = m_free(nxt);
1774 * en_txdma: start trasmit DMA, if possible
1778 en_txdma(struct en_softc *sc, int chan)
1781 struct atm_pseudohdr *ap;
1782 struct en_launch launch = { .tbd1 = 0 };
1783 int datalen = 0, dtqneed, len, ncells;
1788 kprintf("%s: tx%d: starting...\n", sc->sc_dev.dv_xname, chan);
1792 * note: now that txlaunch handles non-word aligned/sized requests
1793 * the only time you can safely set launch.nodma is if you've en_mfix()'d
1794 * the mbuf chain. this happens only if EN_NOTXDMA || !en_dma.
1797 launch.nodma = (EN_NOTXDMA || !en_dma);
1802 * get an mbuf waiting for DMA
1805 launch.t = sc->txslot[chan].q.ifq_head; /* peek at head of queue */
1807 if (launch.t == NULL) {
1809 kprintf("%s: tx%d: ...done!\n", sc->sc_dev.dv_xname, chan);
1811 return; /* >>> exit here if no data waiting for DMA <<< */
1817 * note: launch.need = # bytes we need to get on the card
1818 * dtqneed = # of DTQs we need for this packet
1819 * launch.mlen = # of bytes in in mbuf chain (<= launch.need)
1822 ap = mtod(launch.t, struct atm_pseudohdr *);
1823 launch.atm_vci = ATM_PH_VCI(ap);
1824 launch.atm_flags = ATM_PH_FLAGS(ap);
1825 launch.aal = ((launch.atm_flags & ATM_PH_AAL5) != 0) ?
1826 MID_TBD_AAL5 : MID_TBD_NOAAL5;
1829 * XXX: have to recompute the length again, even though we already did
1830 * it in en_start(). might as well compute dtqneed here as well, so
1831 * this isn't that bad.
1834 if ((launch.atm_flags & EN_OBHDR) == 0) {
1835 dtqneed = 1; /* header still needs to be added */
1836 launch.need = MID_TBD_SIZE; /* not includeded with mbuf */
1838 dtqneed = 0; /* header on-board, dma with mbuf */
1843 for (tmp = launch.t ; tmp != NULL ; tmp = tmp->m_next) {
1846 cp = mtod(tmp, u_int8_t *);
1847 if (tmp == launch.t) {
1848 len -= sizeof(struct atm_pseudohdr); /* don't count this! */
1849 cp += sizeof(struct atm_pseudohdr);
1853 continue; /* atm_pseudohdr alone in first mbuf */
1855 dtqneed += en_dqneed(sc, (caddr_t) cp, len, 1);
1858 if ((launch.need % sizeof(u_int32_t)) != 0)
1859 dtqneed++; /* need DTQ to FLUSH internal buffer */
1861 if ((launch.atm_flags & EN_OBTRL) == 0) {
1862 if (launch.aal == MID_TBD_AAL5) {
1863 datalen = launch.need - MID_TBD_SIZE;
1864 launch.need += MID_PDU_SIZE; /* AAL5: need PDU tail */
1866 dtqneed++; /* need to work on the end a bit */
1870 * finish calculation of launch.need (need to figure out how much padding
1871 * we will need). launch.need includes MID_TBD_SIZE, but we need to
1872 * remove that to so we can round off properly. we have to add
1873 * MID_TBD_SIZE back in after calculating ncells.
1876 launch.need = roundup(launch.need - MID_TBD_SIZE, MID_ATMDATASZ);
1877 ncells = launch.need / MID_ATMDATASZ;
1878 launch.need += MID_TBD_SIZE;
1880 if (launch.need > EN_TXSZ * 1024) {
1881 kprintf("%s: tx%d: packet larger than xmit buffer (%d > %d)\n",
1882 sc->sc_dev.dv_xname, chan, launch.need, EN_TXSZ * 1024);
1887 * note: don't use the entire buffer space. if WRTX becomes equal
1888 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
1890 if (launch.need >= sc->txslot[chan].bfree) {
1891 EN_COUNT(sc->txoutspace);
1893 kprintf("%s: tx%d: out of transmit space\n", sc->sc_dev.dv_xname, chan);
1895 return; /* >>> exit here if out of obmem buffer space <<< */
1899 * ensure we have enough dtqs to go, if not, wait for more.
1905 if (dtqneed > sc->dtq_free) {
1907 EN_COUNT(sc->txdtqout);
1909 kprintf("%s: tx%d: out of transmit DTQs\n", sc->sc_dev.dv_xname, chan);
1911 return; /* >>> exit here if out of dtqs <<< */
1915 * it is a go, commit! dequeue mbuf start working on the xfer.
1918 IF_DEQUEUE(&sc->txslot[chan].q, tmp);
1920 if (launch.t != tmp)
1921 panic("en dequeue");
1922 #endif /* EN_DIAG */
1928 EN_COUNT(sc->launch);
1930 IFNET_STAT_INC(ifp, opackets, 1);
1932 if ((launch.atm_flags & EN_OBHDR) == 0) {
1933 EN_COUNT(sc->lheader);
1934 /* store tbd1/tbd2 in host byte order */
1935 launch.tbd1 = MID_TBD_MK1(launch.aal, sc->txspeed[launch.atm_vci], ncells);
1936 launch.tbd2 = MID_TBD_MK2(launch.atm_vci, 0, 0);
1938 if ((launch.atm_flags & EN_OBTRL) == 0 && launch.aal == MID_TBD_AAL5) {
1939 EN_COUNT(sc->ltail);
1940 launch.pdu1 = MID_PDU_MK1(0, 0, datalen); /* host byte order */
1943 en_txlaunch(sc, chan, &launch);
1947 * adjust the top of the mbuf to skip the pseudo atm header
1948 * (and TBD, if present) before passing the packet to bpf,
1949 * restore it afterwards.
1951 int size = sizeof(struct atm_pseudohdr);
1952 if (launch.atm_flags & EN_OBHDR)
1953 size += MID_TBD_SIZE;
1955 launch.t->m_data += size;
1956 launch.t->m_len -= size;
1958 BPF_MTAP(ifp, launch.t);
1960 launch.t->m_data -= size;
1961 launch.t->m_len += size;
1964 * do some housekeeping and get the next packet
1967 sc->txslot[chan].bfree -= launch.need;
1968 IF_ENQUEUE(&sc->txslot[chan].indma, launch.t);
1972 * END of txdma loop!
1980 IF_DEQUEUE(&sc->txslot[chan].q, tmp);
1981 if (launch.t != tmp)
1982 panic("en dequeue drop");
1984 sc->txslot[chan].mbsize -= launch.mlen;
1990 * en_txlaunch: launch an mbuf into the dma pool!
1994 en_txlaunch(struct en_softc *sc, int chan, struct en_launch *l)
1997 u_int32_t cur = sc->txslot[chan].cur,
1998 start = sc->txslot[chan].start,
1999 stop = sc->txslot[chan].stop,
2000 dma, *data, *datastop, count, bcode;
2001 int pad, addtail, need, len, needalign, cnt, end, mx;
2006 * need = # bytes card still needs (decr. to zero)
2007 * len = # of bytes left in current mbuf
2008 * cur = our current pointer
2009 * dma = last place we programmed into the DMA
2010 * data = pointer into data area of mbuf that needs to go next
2011 * cnt = # of bytes to transfer in this DTQ
2012 * bcode/count = DMA burst code, and chip's version of cnt
2014 * a single buffer can require up to 5 DTQs depending on its size
2015 * and alignment requirements. the 5 possible requests are:
2016 * [1] 1, 2, or 3 byte DMA to align src data pointer to word boundary
2017 * [2] alburst DMA to align src data pointer to bestburstlen
2018 * [3] 1 or more bestburstlen DMAs
2019 * [4] clean up burst (to last word boundary)
2020 * [5] 1, 2, or 3 byte final clean up DMA
2025 addtail = (l->atm_flags & EN_OBTRL) == 0; /* add a tail? */
2028 if ((need - MID_TBD_SIZE) % MID_ATMDATASZ)
2029 kprintf("%s: tx%d: bogus trasmit needs (%d)\n", sc->sc_dev.dv_xname, chan,
2033 kprintf("%s: tx%d: launch mbuf %p! cur=0x%x[%d], need=%d, addtail=%d\n",
2034 sc->sc_dev.dv_xname, chan, l->t, cur, (cur-start)/4, need, addtail);
2035 count = EN_READ(sc, MIDX_PLACE(chan));
2036 kprintf(" HW: base_address=0x%x, size=%d, read=%d, descstart=%d\n",
2037 MIDX_BASE(count), MIDX_SZ(count), EN_READ(sc, MIDX_READPTR(chan)),
2038 EN_READ(sc, MIDX_DESCSTART(chan)));
2042 * do we need to insert the TBD by hand?
2043 * note that tbd1/tbd2/pdu1 are in host byte order.
2046 if ((l->atm_flags & EN_OBHDR) == 0) {
2048 kprintf("%s: tx%d: insert header 0x%x 0x%x\n", sc->sc_dev.dv_xname,
2049 chan, l->tbd1, l->tbd2);
2051 EN_WRITE(sc, cur, l->tbd1);
2052 EN_WRAPADD(start, stop, cur, 4);
2053 EN_WRITE(sc, cur, l->tbd2);
2054 EN_WRAPADD(start, stop, cur, 4);
2059 * now do the mbufs...
2062 for (tmp = l->t ; tmp != NULL ; tmp = tmp->m_next) {
2064 /* get pointer to data and length */
2065 data = mtod(tmp, u_int32_t *);
2068 data += sizeof(struct atm_pseudohdr)/sizeof(u_int32_t);
2069 len -= sizeof(struct atm_pseudohdr);
2072 /* now, determine if we should copy it */
2073 if (l->nodma || (len < EN_MINDMA &&
2074 (len % 4) == 0 && ((uintptr_t) (void *) data % 4) == 0 &&
2078 * roundup len: the only time this will change the value of len
2079 * is when l->nodma is true, tmp is the last mbuf, and there is
2080 * a non-word number of bytes to transmit. in this case it is
2081 * safe to round up because we've en_mfix'd the mbuf (so the first
2082 * byte is word aligned there must be enough free bytes at the end
2083 * to round off to the next word boundary)...
2085 len = roundup(len, sizeof(u_int32_t));
2086 datastop = data + (len / sizeof(u_int32_t));
2087 /* copy loop: preserve byte order!!! use WRITEDAT */
2088 while (data != datastop) {
2089 EN_WRITEDAT(sc, cur, *data);
2091 EN_WRAPADD(start, stop, cur, 4);
2095 kprintf("%s: tx%d: copied %d bytes (%d left, cur now 0x%x)\n",
2096 sc->sc_dev.dv_xname, chan, len, need, cur);
2098 continue; /* continue on to next mbuf */
2101 /* going to do DMA, first make sure the dtq is in sync. */
2103 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0, 0, 0);
2105 kprintf("%s: tx%d: dtq_sync: advance pointer to %d\n",
2106 sc->sc_dev.dv_xname, chan, cur);
2111 * if this is the last buffer, and it looks like we are going to need to
2112 * flush the internal buffer, can we extend the length of this mbuf to
2116 if (tmp->m_next == NULL) {
2117 cnt = (need - len) % sizeof(u_int32_t);
2118 if (cnt && M_TRAILINGSPACE(tmp) >= cnt)
2119 len += cnt; /* pad for FLUSH */
2122 #if !defined(MIDWAY_ENIONLY)
2125 * the adaptec DMA engine is smart and handles everything for us.
2128 if (sc->is_adaptec) {
2129 /* need to DMA "len" bytes out to card */
2131 EN_WRAPADD(start, stop, cur, len);
2133 kprintf("%s: tx%d: adp_dma %d bytes (%d left, cur now 0x%x)\n",
2134 sc->sc_dev.dv_xname, chan, len, need, cur);
2136 end = (need == 0) ? MID_DMA_END : 0;
2137 EN_DTQADD(sc, len, chan, 0, vtophys(data), l->mlen, end);
2140 dma = cur; /* update dma pointer */
2143 #endif /* !MIDWAY_ENIONLY */
2145 #if !defined(MIDWAY_ADPONLY)
2148 * the ENI DMA engine is not so smart and need more help from us
2151 /* do we need to do a DMA op to align to word boundary? */
2152 needalign = (uintptr_t) (void *) data % sizeof(u_int32_t);
2154 EN_COUNT(sc->headbyte);
2155 cnt = sizeof(u_int32_t) - needalign;
2156 if (cnt == 2 && len >= cnt) {
2158 bcode = MIDDMA_2BYTE;
2160 cnt = min(cnt, len); /* prevent overflow */
2162 bcode = MIDDMA_BYTE;
2165 EN_WRAPADD(start, stop, cur, cnt);
2167 kprintf("%s: tx%d: small al_dma %d bytes (%d left, cur now 0x%x)\n",
2168 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2171 end = (need == 0) ? MID_DMA_END : 0;
2172 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2175 data = (u_int32_t *) ((u_char *)data + cnt);
2178 /* do we need to do a DMA op to align? */
2180 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0
2181 && len >= sizeof(u_int32_t)) {
2182 cnt = sc->bestburstlen - needalign;
2183 mx = len & ~(sizeof(u_int32_t)-1); /* don't go past end */
2186 count = cnt / sizeof(u_int32_t);
2187 bcode = MIDDMA_WORD;
2189 count = cnt / sizeof(u_int32_t);
2190 bcode = en_dmaplan[count].bcode;
2191 count = cnt >> en_dmaplan[count].divshift;
2194 EN_WRAPADD(start, stop, cur, cnt);
2196 kprintf("%s: tx%d: al_dma %d bytes (%d left, cur now 0x%x)\n",
2197 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2200 end = (need == 0) ? MID_DMA_END : 0;
2201 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2204 data = (u_int32_t *) ((u_char *)data + cnt);
2207 /* do we need to do a max-sized burst? */
2208 if (len >= sc->bestburstlen) {
2209 count = len >> sc->bestburstshift;
2210 cnt = count << sc->bestburstshift;
2211 bcode = sc->bestburstcode;
2213 EN_WRAPADD(start, stop, cur, cnt);
2215 kprintf("%s: tx%d: best_dma %d bytes (%d left, cur now 0x%x)\n",
2216 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2219 end = (need == 0) ? MID_DMA_END : 0;
2220 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2223 data = (u_int32_t *) ((u_char *)data + cnt);
2226 /* do we need to do a cleanup burst? */
2227 cnt = len & ~(sizeof(u_int32_t)-1);
2229 count = cnt / sizeof(u_int32_t);
2230 bcode = en_dmaplan[count].bcode;
2231 count = cnt >> en_dmaplan[count].divshift;
2233 EN_WRAPADD(start, stop, cur, cnt);
2235 kprintf("%s: tx%d: cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2236 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2239 end = (need == 0) ? MID_DMA_END : 0;
2240 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2243 data = (u_int32_t *) ((u_char *)data + cnt);
2246 /* any word fragments left? */
2248 EN_COUNT(sc->tailbyte);
2251 bcode = MIDDMA_2BYTE; /* use 2byte mode */
2254 bcode = MIDDMA_BYTE; /* use 1 byte mode */
2257 EN_WRAPADD(start, stop, cur, len);
2259 kprintf("%s: tx%d: byte cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2260 sc->sc_dev.dv_xname, chan, len, need, cur);
2262 end = (need == 0) ? MID_DMA_END : 0;
2263 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2268 dma = cur; /* update dma pointer */
2269 #endif /* !MIDWAY_ADPONLY */
2271 } /* next mbuf, please */
2274 * all mbuf data has been copied out to the obmem (or set up to be DMAd).
2275 * if the trailer or padding needs to be put in, do it now.
2277 * NOTE: experimental results reveal the following fact:
2278 * if you DMA "X" bytes to the card, where X is not a multiple of 4,
2279 * then the card will internally buffer the last (X % 4) bytes (in
2280 * hopes of getting (4 - (X % 4)) more bytes to make a complete word).
2281 * it is imporant to make sure we don't leave any important data in
2282 * this internal buffer because it is discarded on the last (end) DTQ.
2283 * one way to do this is to DMA in (4 - (X % 4)) more bytes to flush
2284 * the darn thing out.
2289 pad = need % sizeof(u_int32_t);
2292 * FLUSH internal data buffer. pad out with random data from the front
2293 * of the mbuf chain...
2295 bcode = (sc->is_adaptec) ? 0 : MIDDMA_BYTE;
2296 EN_COUNT(sc->tailflush);
2297 EN_WRAPADD(start, stop, cur, pad);
2298 EN_DTQADD(sc, pad, chan, bcode, vtophys(l->t->m_data), 0, 0);
2301 kprintf("%s: tx%d: pad/FLUSH dma %d bytes (%d left, cur now 0x%x)\n",
2302 sc->sc_dev.dv_xname, chan, pad, need, cur);
2307 pad = need / sizeof(u_int32_t); /* round *down* */
2308 if (l->aal == MID_TBD_AAL5)
2311 kprintf("%s: tx%d: padding %d bytes (cur now 0x%x)\n",
2312 sc->sc_dev.dv_xname, chan, pad * sizeof(u_int32_t), cur);
2315 EN_WRITEDAT(sc, cur, 0); /* no byte order issues with zero */
2316 EN_WRAPADD(start, stop, cur, 4);
2318 if (l->aal == MID_TBD_AAL5) {
2319 EN_WRITE(sc, cur, l->pdu1); /* in host byte order */
2320 EN_WRAPADD(start, stop, cur, 8);
2324 if (addtail || dma != cur) {
2325 /* write final descritor */
2326 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0,
2327 l->mlen, MID_DMA_END);
2328 /* dma = cur; */ /* not necessary since we are done */
2332 /* update current pointer */
2333 sc->txslot[chan].cur = cur;
2335 kprintf("%s: tx%d: DONE! cur now = 0x%x\n",
2336 sc->sc_dev.dv_xname, chan, cur);
2350 struct en_softc *sc = (struct en_softc *) arg;
2352 struct atm_pseudohdr ah;
2354 u_int32_t reg, kick, val, mask, chip, vci, slot, dtq, drq;
2355 int lcv, idx, need_softserv = 0;
2357 reg = EN_READ(sc, MID_INTACK);
2359 if ((reg & MID_INT_ANY) == 0)
2360 EN_INTR_RET(0); /* not us */
2363 kprintf("%s: interrupt=0x%b\n", sc->sc_dev.dv_xname, reg, MID_INTBITS);
2367 * unexpected errors that need a reset
2370 if ((reg & (MID_INT_IDENT|MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_SUNI)) != 0) {
2371 kprintf("%s: unexpected interrupt=0x%b, resetting card\n",
2372 sc->sc_dev.dv_xname, reg, MID_INTBITS);
2375 Debugger("en: unexpected error");
2377 sc->enif.if_flags &= ~IFF_RUNNING; /* FREEZE! */
2382 EN_INTR_RET(1); /* for us */
2385 /*******************
2389 kick = 0; /* bitmask of channels to kick */
2390 if (reg & MID_INT_TX) { /* TX done! */
2393 * check for tx complete, if detected then this means that some space
2394 * has come free on the card. we must account for it and arrange to
2395 * kick the channel to life (in case it is stalled waiting on the card).
2397 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2398 if (reg & MID_TXCHAN(lcv)) {
2399 kick = kick | mask; /* want to kick later */
2400 val = EN_READ(sc, MIDX_READPTR(lcv)); /* current read pointer */
2401 val = (val * sizeof(u_int32_t)) + sc->txslot[lcv].start;
2402 /* convert to offset */
2403 if (val > sc->txslot[lcv].cur)
2404 sc->txslot[lcv].bfree = val - sc->txslot[lcv].cur;
2406 sc->txslot[lcv].bfree = (val + (EN_TXSZ*1024)) - sc->txslot[lcv].cur;
2408 kprintf("%s: tx%d: trasmit done. %d bytes now free in buffer\n",
2409 sc->sc_dev.dv_xname, lcv, sc->txslot[lcv].bfree);
2415 if (reg & MID_INT_DMA_TX) { /* TX DMA done! */
2418 * check for TX DMA complete, if detected then this means that some DTQs
2419 * are now free. it also means some indma mbufs can be freed.
2420 * if we needed DTQs, kick all channels.
2422 val = EN_READ(sc, MID_DMA_RDTX); /* chip's current location */
2423 idx = MID_DTQ_A2REG(sc->dtq_chip);/* where we last saw chip */
2424 if (sc->need_dtqs) {
2425 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
2426 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
2428 kprintf("%s: cleared need DTQ condition\n", sc->sc_dev.dv_xname);
2431 while (idx != val) {
2433 if ((dtq = sc->dtq[idx]) != 0) {
2434 sc->dtq[idx] = 0; /* don't forget to zero it out when done */
2435 slot = EN_DQ_SLOT(dtq);
2436 IF_DEQUEUE(&sc->txslot[slot].indma, m);
2437 if (!m) panic("enintr: dtqsync");
2438 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
2440 kprintf("%s: tx%d: free %d dma bytes, mbsize now %d\n",
2441 sc->sc_dev.dv_xname, slot, EN_DQ_LEN(dtq),
2442 sc->txslot[slot].mbsize);
2446 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
2448 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
2453 * kick xmit channels as needed
2458 kprintf("%s: tx kick mask = 0x%x\n", sc->sc_dev.dv_xname, kick);
2460 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2461 if ((kick & mask) && sc->txslot[lcv].q.ifq_head) {
2462 en_txdma(sc, lcv); /* kick it! */
2464 } /* for each slot */
2468 /*******************
2473 * check for RX DMA complete, and pass the data "upstairs"
2476 if (reg & MID_INT_DMA_RX) {
2477 val = EN_READ(sc, MID_DMA_RDRX); /* chip's current location */
2478 idx = MID_DRQ_A2REG(sc->drq_chip);/* where we last saw chip */
2479 while (idx != val) {
2481 if ((drq = sc->drq[idx]) != 0) {
2482 sc->drq[idx] = 0; /* don't forget to zero it out when done */
2483 slot = EN_DQ_SLOT(drq);
2484 if (EN_DQ_LEN(drq) == 0) { /* "JK" trash DMA? */
2487 IF_DEQUEUE(&sc->rxslot[slot].indma, m);
2489 panic("enintr: drqsync: %s: lost mbuf in slot %d!",
2490 sc->sc_dev.dv_xname, slot);
2492 /* do something with this mbuf */
2493 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) { /* drain? */
2496 vci = sc->rxslot[slot].atm_vci;
2497 if (sc->rxslot[slot].indma.ifq_head == NULL &&
2498 sc->rxslot[slot].q.ifq_head == NULL &&
2499 (EN_READ(sc, MID_VC(vci)) & MIDV_INSERVICE) == 0 &&
2500 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2501 sc->rxslot[slot].oth_flags = ENOTHER_FREE; /* done drain */
2502 sc->rxslot[slot].atm_vci = RX_NONE;
2503 sc->rxvc2slot[vci] = RX_NONE;
2505 kprintf("%s: rx%d: VCI %d now free\n", sc->sc_dev.dv_xname,
2509 } else if (m != NULL) {
2510 ATM_PH_FLAGS(&ah) = sc->rxslot[slot].atm_flags;
2511 ATM_PH_VPI(&ah) = 0;
2512 ATM_PH_SETVCI(&ah, sc->rxslot[slot].atm_vci);
2514 kprintf("%s: rx%d: rxvci%d: atm_input, mbuf %p, len %d, hand %p\n",
2515 sc->sc_dev.dv_xname, slot, sc->rxslot[slot].atm_vci, m,
2516 EN_DQ_LEN(drq), sc->rxslot[slot].rxhand);
2520 IFNET_STAT_INC(ifp, ipackets, 1);
2524 atm_input(ifp, &ah, m, sc->rxslot[slot].rxhand);
2528 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
2530 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
2532 if (sc->need_drqs) { /* true if we had a DRQ shortage */
2536 kprintf("%s: cleared need DRQ condition\n", sc->sc_dev.dv_xname);
2542 * handle service interrupts
2545 if (reg & MID_INT_SERVICE) {
2546 chip = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
2548 while (sc->hwslistp != chip) {
2550 /* fetch and remove it from hardware service list */
2551 vci = EN_READ(sc, sc->hwslistp);
2552 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);/* advance hw ptr */
2553 slot = sc->rxvc2slot[vci];
2554 if (slot == RX_NONE) {
2556 kprintf("%s: unexpected rx interrupt on VCI %d\n",
2557 sc->sc_dev.dv_xname, vci);
2559 EN_WRITE(sc, MID_VC(vci), MIDV_TRASH); /* rx off, damn it! */
2560 continue; /* next */
2562 EN_WRITE(sc, MID_VC(vci), sc->rxslot[slot].mode); /* remove from hwsl */
2563 EN_COUNT(sc->hwpull);
2566 kprintf("%s: pulled VCI %d off hwslist\n", sc->sc_dev.dv_xname, vci);
2569 /* add it to the software service list (if needed) */
2570 if ((sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2571 EN_COUNT(sc->swadd);
2573 sc->rxslot[slot].oth_flags |= ENOTHER_SWSL;
2574 sc->swslist[sc->swsl_tail] = slot;
2575 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
2578 kprintf("%s: added VCI %d to swslist\n", sc->sc_dev.dv_xname, vci);
2585 * now service (function too big to include here)
2595 if (reg & MID_INT_DMA_OVR) {
2596 EN_COUNT(sc->dmaovr);
2598 kprintf("%s: MID_INT_DMA_OVR\n", sc->sc_dev.dv_xname);
2601 reg = EN_READ(sc, MID_STAT);
2603 sc->otrash += MID_OTRASH(reg);
2604 sc->vtrash += MID_VTRASH(reg);
2607 EN_INTR_RET(1); /* for us */
2612 * en_service: handle a service interrupt
2614 * Q: why do we need a software service list?
2616 * A: if we remove a VCI from the hardware list and we find that we are
2617 * out of DRQs we must defer processing until some DRQs become free.
2618 * so we must remember to look at this RX VCI/slot later, but we can't
2619 * put it back on the hardware service list (since that isn't allowed).
2620 * so we instead save it on the software service list. it would be nice
2621 * if we could peek at the VCI on top of the hwservice list without removing
2622 * it, however this leads to a race condition: if we peek at it and
2623 * decide we are done with it new data could come in before we have a
2624 * chance to remove it from the hwslist. by the time we get it out of
2625 * the list the interrupt for the new data will be lost. oops!
2630 en_service(struct en_softc *sc)
2632 struct mbuf *m, *tmp;
2633 u_int32_t cur, dstart, rbd, pdu, *sav, dma, bcode, count, *data, *datastop;
2634 u_int32_t start, stop, cnt, needalign;
2635 int slot, raw, aal5, vci, fill, mlen, tlen, drqneed, need, needfill, end;
2637 aal5 = 0; /* Silence gcc */
2639 if (sc->swsl_size == 0) {
2641 kprintf("%s: en_service done\n", sc->sc_dev.dv_xname);
2643 return; /* >>> exit here if swsl now empty <<< */
2647 * get slot/vci to service
2650 slot = sc->swslist[sc->swsl_head];
2651 vci = sc->rxslot[slot].atm_vci;
2653 if (sc->rxvc2slot[vci] != slot) panic("en_service rx slot/vci sync");
2657 * determine our mode and if we've got any work to do
2660 raw = sc->rxslot[slot].oth_flags & ENOTHER_RAW;
2661 start= sc->rxslot[slot].start;
2662 stop= sc->rxslot[slot].stop;
2663 cur = sc->rxslot[slot].cur;
2666 kprintf("%s: rx%d: service vci=%d raw=%d start/stop/cur=0x%x 0x%x 0x%x\n",
2667 sc->sc_dev.dv_xname, slot, vci, raw, start, stop, cur);
2671 dstart = MIDV_DSTART(EN_READ(sc, MID_DST_RP(vci)));
2672 dstart = (dstart * sizeof(u_int32_t)) + start;
2674 /* check to see if there is any data at all */
2675 if (dstart == cur) {
2676 defer: /* defer processing */
2677 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2678 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
2680 /* >>> remove from swslist <<< */
2682 kprintf("%s: rx%d: remove vci %d from swslist\n",
2683 sc->sc_dev.dv_xname, slot, vci);
2689 * figure out how many bytes we need
2690 * [mlen = # bytes to go in mbufs, fill = # bytes to dump (MIDDMA_JK)]
2695 /* raw mode (aka boodi mode) */
2698 mlen = dstart - cur;
2700 mlen = (dstart + (EN_RXSZ*1024)) - cur;
2702 if (mlen < sc->rxslot[slot].raw_threshold)
2703 goto defer; /* too little data to deal with */
2708 aal5 = (sc->rxslot[slot].atm_flags & ATM_PH_AAL5);
2709 rbd = EN_READ(sc, cur);
2710 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2711 panic("en_service: id mismatch");
2713 if (rbd & MID_RBD_T) {
2714 mlen = 0; /* we've got trash */
2715 fill = MID_RBD_SIZE;
2716 EN_COUNT(sc->ttrash);
2718 kprintf("RX overflow lost %d cells!\n", MID_RBD_CNT(rbd));
2721 mlen = MID_RBD_SIZE + MID_CHDR_SIZE + MID_ATMDATASZ; /* 1 cell (ick!) */
2726 tlen = (MID_RBD_CNT(rbd) * MID_ATMDATASZ) + MID_RBD_SIZE;
2727 pdu = cur + tlen - MID_PDU_SIZE;
2729 pdu -= (EN_RXSZ*1024);
2730 pdu = EN_READ(sc, pdu); /* get PDU in correct byte order */
2731 fill = tlen - MID_RBD_SIZE - MID_PDU_LEN(pdu);
2732 if (fill < 0 || (rbd & MID_RBD_CRCERR) != 0) {
2733 static int first = 1;
2736 kprintf("%s: %s, dropping frame\n", sc->sc_dev.dv_xname,
2737 (rbd & MID_RBD_CRCERR) ?
2738 "CRC error" : "invalid AAL5 PDU length");
2739 kprintf("%s: got %d cells (%d bytes), AAL5 len is %d bytes (pdu=0x%x)\n",
2740 sc->sc_dev.dv_xname, MID_RBD_CNT(rbd),
2741 tlen - MID_RBD_SIZE, MID_PDU_LEN(pdu), pdu);
2743 kprintf("CRC error report disabled from now on!\n");
2750 IFNET_STAT_INC(ifp, ierrors, 1);
2759 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2762 * 1. it is possible that we've already allocated an mbuf for this pkt
2763 * but ran out of DRQs, in which case we saved the allocated mbuf on
2765 * 2. if we save an mbuf in "q" we store the "cur" (pointer) in the front
2766 * of the mbuf as an identity (that we can check later), and we also
2767 * store drqneed (so we don't have to recompute it).
2768 * 3. after this block of code, if m is still NULL then we ran out of mbufs
2771 m = sc->rxslot[slot].q.ifq_head;
2774 sav = mtod(m, u_int32_t *);
2775 if (sav[0] != cur) {
2777 kprintf("%s: rx%d: q'ed mbuf %p not ours\n",
2778 sc->sc_dev.dv_xname, slot, m);
2780 m = NULL; /* wasn't ours */
2781 EN_COUNT(sc->rxqnotus);
2783 EN_COUNT(sc->rxqus);
2784 IF_DEQUEUE(&sc->rxslot[slot].q, m);
2787 kprintf("%s: rx%d: recovered q'ed mbuf %p (drqneed=%d)\n",
2788 sc->sc_dev.dv_xname, slot, m, drqneed);
2793 if (mlen != 0 && m == NULL) {
2794 m = en_mget(sc, mlen, &drqneed); /* allocate! */
2798 EN_COUNT(sc->rxmbufout);
2800 kprintf("%s: rx%d: out of mbufs\n", sc->sc_dev.dv_xname, slot);
2804 kprintf("%s: rx%d: allocate mbuf %p, mlen=%d, drqneed=%d\n",
2805 sc->sc_dev.dv_xname, slot, m, mlen, drqneed);
2810 kprintf("%s: rx%d: VCI %d, mbuf_chain %p, mlen %d, fill %d\n",
2811 sc->sc_dev.dv_xname, slot, vci, m, mlen, fill);
2815 * now check to see if we've got the DRQs needed. if we are out of
2816 * DRQs we must quit (saving our mbuf, if we've got one).
2819 needfill = (fill) ? 1 : 0;
2820 if (drqneed + needfill > sc->drq_free) {
2821 sc->need_drqs = 1; /* flag condition */
2823 EN_COUNT(sc->rxoutboth);
2825 kprintf("%s: rx%d: out of DRQs *and* mbufs!\n", sc->sc_dev.dv_xname, slot);
2827 return; /* >>> exit here if out of both mbufs and DRQs <<< */
2829 sav = mtod(m, u_int32_t *);
2832 IF_ENQUEUE(&sc->rxslot[slot].q, m);
2833 EN_COUNT(sc->rxdrqout);
2835 kprintf("%s: rx%d: out of DRQs\n", sc->sc_dev.dv_xname, slot);
2837 return; /* >>> exit here if out of DRQs <<< */
2841 * at this point all resources have been allocated and we are commited
2842 * to servicing this slot.
2844 * dma = last location we told chip about
2845 * cur = current location
2846 * mlen = space in the mbuf we want
2847 * need = bytes to xfer in (decrs to zero)
2848 * fill = how much fill we need
2849 * tlen = how much data to transfer to this mbuf
2850 * cnt/bcode/count = <same as xmit>
2852 * 'needfill' not used after this point
2855 dma = cur; /* dma = last location we told chip about */
2856 need = roundup(mlen, sizeof(u_int32_t));
2857 fill = fill - (need - mlen); /* note: may invalidate 'needfill' */
2859 for (tmp = m ; tmp != NULL && need > 0 ; tmp = tmp->m_next) {
2860 tlen = roundup(tmp->m_len, sizeof(u_int32_t)); /* m_len set by en_mget */
2861 data = mtod(tmp, u_int32_t *);
2864 kprintf("%s: rx%d: load mbuf %p, m_len=%d, m_data=%p, tlen=%d\n",
2865 sc->sc_dev.dv_xname, slot, tmp, tmp->m_len, tmp->m_data, tlen);
2869 if (EN_NORXDMA || !en_dma || tlen < EN_MINDMA) {
2870 datastop = (u_int32_t *)((u_char *) data + tlen);
2871 /* copy loop: preserve byte order!!! use READDAT */
2872 while (data != datastop) {
2873 *data = EN_READDAT(sc, cur);
2875 EN_WRAPADD(start, stop, cur, 4);
2879 kprintf("%s: rx%d: vci%d: copied %d bytes (%d left)\n",
2880 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2885 /* DMA data (check to see if we need to sync DRQ first) */
2887 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, 0, 0, 0);
2889 kprintf("%s: rx%d: vci%d: drq_sync: advance pointer to %d\n",
2890 sc->sc_dev.dv_xname, slot, vci, cur);
2894 #if !defined(MIDWAY_ENIONLY)
2897 * the adaptec DMA engine is smart and handles everything for us.
2900 if (sc->is_adaptec) {
2902 EN_WRAPADD(start, stop, cur, tlen);
2904 kprintf("%s: rx%d: vci%d: adp_dma %d bytes (%d left)\n",
2905 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2907 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2908 EN_DRQADD(sc, tlen, vci, 0, vtophys(data), mlen, slot, end);
2911 dma = cur; /* update dma pointer */
2914 #endif /* !MIDWAY_ENIONLY */
2917 #if !defined(MIDWAY_ADPONLY)
2920 * the ENI DMA engine is not so smart and need more help from us
2923 /* do we need to do a DMA op to align? */
2925 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0) {
2926 cnt = sc->bestburstlen - needalign;
2929 count = cnt / sizeof(u_int32_t);
2930 bcode = MIDDMA_WORD;
2932 count = cnt / sizeof(u_int32_t);
2933 bcode = en_dmaplan[count].bcode;
2934 count = cnt >> en_dmaplan[count].divshift;
2937 EN_WRAPADD(start, stop, cur, cnt);
2939 kprintf("%s: rx%d: vci%d: al_dma %d bytes (%d left)\n",
2940 sc->sc_dev.dv_xname, slot, vci, cnt, need);
2943 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2944 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2947 data = (u_int32_t *)((u_char *) data + cnt);
2950 /* do we need a max-sized burst? */
2951 if (tlen >= sc->bestburstlen) {
2952 count = tlen >> sc->bestburstshift;
2953 cnt = count << sc->bestburstshift;
2954 bcode = sc->bestburstcode;
2956 EN_WRAPADD(start, stop, cur, cnt);
2958 kprintf("%s: rx%d: vci%d: best_dma %d bytes (%d left)\n",
2959 sc->sc_dev.dv_xname, slot, vci, cnt, need);
2962 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2963 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2966 data = (u_int32_t *)((u_char *) data + cnt);
2969 /* do we need to do a cleanup burst? */
2971 count = tlen / sizeof(u_int32_t);
2972 bcode = en_dmaplan[count].bcode;
2973 count = tlen >> en_dmaplan[count].divshift;
2975 EN_WRAPADD(start, stop, cur, tlen);
2977 kprintf("%s: rx%d: vci%d: cleanup_dma %d bytes (%d left)\n",
2978 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2980 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2981 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2986 dma = cur; /* update dma pointer */
2988 #endif /* !MIDWAY_ADPONLY */
2993 if (fill || dma != cur) {
2996 kprintf("%s: rx%d: vci%d: skipping %d bytes of fill\n",
2997 sc->sc_dev.dv_xname, slot, vci, fill);
2999 kprintf("%s: rx%d: vci%d: syncing chip from 0x%x to 0x%x [cur]\n",
3000 sc->sc_dev.dv_xname, slot, vci, dma, cur);
3002 EN_WRAPADD(start, stop, cur, fill);
3003 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, mlen,
3005 /* dma = cur; */ /* not necessary since we are done */
3009 * done, remove stuff we don't want to pass up:
3010 * raw mode (boodi mode): pass everything up for later processing
3012 * aal0: remove RBD + cell header
3019 if (!aal5) cnt += MID_CHDR_SIZE;
3020 m->m_len -= cnt; /* chop! */
3021 m->m_pkthdr.len -= cnt;
3024 IF_ENQUEUE(&sc->rxslot[slot].indma, m);
3026 sc->rxslot[slot].cur = cur; /* update master copy of 'cur' */
3029 kprintf("%s: rx%d: vci%d: DONE! cur now =0x%x\n",
3030 sc->sc_dev.dv_xname, slot, vci, cur);
3033 goto same_vci; /* get next packet in this slot */
3039 * functions we can call from ddb
3043 * en_dump: dump the state
3046 #define END_SWSL 0x00000040 /* swsl state */
3047 #define END_DRQ 0x00000020 /* drq state */
3048 #define END_DTQ 0x00000010 /* dtq state */
3049 #define END_RX 0x00000008 /* rx state */
3050 #define END_TX 0x00000004 /* tx state */
3051 #define END_MREGS 0x00000002 /* registers */
3052 #define END_STATS 0x00000001 /* dump stats */
3054 #define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3056 /* Do not staticize - meant for calling from DDB! */
3058 en_dump(int unit, int level)
3060 struct en_softc *sc;
3064 for (lcv = 0 ; lcv < en_cd.cd_ndevs ; lcv++) {
3065 sc = (struct en_softc *) en_cd.cd_devs[lcv];
3066 if (sc == NULL) continue;
3067 if (unit != -1 && unit != lcv)
3070 kprintf("dumping device %s at level 0x%b\n", sc->sc_dev.dv_xname, level,
3073 if (sc->dtq_us == 0) {
3074 kprintf("<hasn't been en_init'd yet>\n");
3078 if (level & END_STATS) {
3079 kprintf(" en_stats:\n");
3080 kprintf(" %d mfix (%d failed); %d/%d head/tail byte DMAs, %d flushes\n",
3081 sc->mfix, sc->mfixfail, sc->headbyte, sc->tailbyte, sc->tailflush);
3082 kprintf(" %d rx dma overflow interrupts\n", sc->dmaovr);
3083 kprintf(" %d times we ran out of TX space and stalled\n",
3085 kprintf(" %d times we ran out of DTQs\n", sc->txdtqout);
3086 kprintf(" %d times we launched a packet\n", sc->launch);
3087 kprintf(" %d times we launched without on-board header\n", sc->lheader);
3088 kprintf(" %d times we launched without on-board tail\n", sc->ltail);
3089 kprintf(" %d times we pulled the hw service list\n", sc->hwpull);
3090 kprintf(" %d times we pushed a vci on the sw service list\n",
3092 kprintf(" %d times RX pulled an mbuf from Q that wasn't ours\n",
3094 kprintf(" %d times RX pulled a good mbuf from Q\n", sc->rxqus);
3095 kprintf(" %d times we ran out of mbufs *and* DRQs\n", sc->rxoutboth);
3096 kprintf(" %d times we ran out of DRQs\n", sc->rxdrqout);
3098 kprintf(" %d trasmit packets dropped due to mbsize\n", sc->txmbovr);
3099 kprintf(" %d cells trashed due to turned off rxvc\n", sc->vtrash);
3100 kprintf(" %d cells trashed due to totally full buffer\n", sc->otrash);
3101 kprintf(" %d cells trashed due almost full buffer\n", sc->ttrash);
3102 kprintf(" %d rx mbuf allocation failures\n", sc->rxmbufout);
3103 #if defined(NATM) && defined(NATM_STAT)
3104 kprintf(" natmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3105 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3109 if (level & END_MREGS) {
3110 kprintf("mregs:\n");
3111 kprintf("resid = 0x%lx\n", (u_long)EN_READ(sc, MID_RESID));
3112 kprintf("interrupt status = 0x%b\n",
3113 (int)EN_READ(sc, MID_INTSTAT), MID_INTBITS);
3114 kprintf("interrupt enable = 0x%b\n",
3115 (int)EN_READ(sc, MID_INTENA), MID_INTBITS);
3116 kprintf("mcsr = 0x%b\n", (int)EN_READ(sc, MID_MAST_CSR), MID_MCSRBITS);
3117 kprintf("serv_write = [chip=%ld] [us=%d]\n",
3118 (long)EN_READ(sc, MID_SERV_WRITE),
3119 MID_SL_A2REG(sc->hwslistp));
3120 kprintf("dma addr = 0x%lx\n", (u_long)EN_READ(sc, MID_DMA_ADDR));
3121 kprintf("DRQ: chip[rd=0x%lx,wr=0x%lx], sc[chip=0x%x,us=0x%x]\n",
3122 (u_long)MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX)),
3123 (u_long)MID_DRQ_REG2A(EN_READ(sc, MID_DMA_WRRX)),
3124 sc->drq_chip, sc->drq_us);
3125 kprintf("DTQ: chip[rd=0x%lx,wr=0x%lx], sc[chip=0x%x,us=0x%x]\n",
3126 (u_long)MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX)),
3127 (u_long)MID_DTQ_REG2A(EN_READ(sc, MID_DMA_WRTX)),
3128 sc->dtq_chip, sc->dtq_us);
3130 kprintf(" unusual txspeeds: ");
3131 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3132 if (sc->txspeed[cnt])
3133 kprintf(" vci%d=0x%x", cnt, sc->txspeed[cnt]);
3136 kprintf(" rxvc slot mappings: ");
3137 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3138 if (sc->rxvc2slot[cnt] != RX_NONE)
3139 kprintf(" %d->%d", cnt, sc->rxvc2slot[cnt]);
3144 if (level & END_TX) {
3146 for (slot = 0 ; slot < EN_NTX; slot++) {
3147 kprintf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3148 sc->txslot[slot].start, sc->txslot[slot].stop, sc->txslot[slot].cur,
3149 (sc->txslot[slot].cur - sc->txslot[slot].start)/4);
3150 kprintf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3151 sc->txslot[slot].bfree);
3152 kprintf("txhw: base_address=0x%lx, size=%ld, read=%ld, descstart=%ld\n",
3153 (u_long)MIDX_BASE(EN_READ(sc, MIDX_PLACE(slot))),
3154 (u_long)MIDX_SZ(EN_READ(sc, MIDX_PLACE(slot))),
3155 (long)EN_READ(sc, MIDX_READPTR(slot)),
3156 (long)EN_READ(sc, MIDX_DESCSTART(slot)));
3160 if (level & END_RX) {
3161 kprintf(" recv slots:\n");
3162 for (slot = 0 ; slot < sc->en_nrx; slot++) {
3163 kprintf("rx%d: vci=%d: start/stop/cur=0x%x/0x%x/0x%x ", slot,
3164 sc->rxslot[slot].atm_vci, sc->rxslot[slot].start,
3165 sc->rxslot[slot].stop, sc->rxslot[slot].cur);
3166 kprintf("mode=0x%x, atm_flags=0x%x, oth_flags=0x%x\n",
3167 sc->rxslot[slot].mode, sc->rxslot[slot].atm_flags,
3168 sc->rxslot[slot].oth_flags);
3169 kprintf("RXHW: mode=0x%lx, DST_RP=0x%lx, WP_ST_CNT=0x%lx\n",
3170 (u_long)EN_READ(sc, MID_VC(sc->rxslot[slot].atm_vci)),
3171 (u_long)EN_READ(sc, MID_DST_RP(sc->rxslot[slot].atm_vci)),
3172 (u_long)EN_READ(sc, MID_WP_ST_CNT(sc->rxslot[slot].atm_vci)));
3176 if (level & END_DTQ) {
3177 kprintf(" dtq [need_dtqs=%d,dtq_free=%d]:\n",
3178 sc->need_dtqs, sc->dtq_free);
3180 while (ptr != sc->dtq_us) {
3181 reg = EN_READ(sc, ptr);
3182 kprintf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%lx]\n",
3183 sc->dtq[MID_DTQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_TXCHAN(reg),
3184 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg),
3185 (u_long)EN_READ(sc, ptr+4));
3186 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3190 if (level & END_DRQ) {
3191 kprintf(" drq [need_drqs=%d,drq_free=%d]:\n",
3192 sc->need_drqs, sc->drq_free);
3194 while (ptr != sc->drq_us) {
3195 reg = EN_READ(sc, ptr);
3196 kprintf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%lx]\n",
3197 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_RXVCI(reg),
3198 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg),
3199 (u_long)EN_READ(sc, ptr+4));
3200 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3204 if (level & END_SWSL) {
3205 kprintf(" swslist [size=%d]: ", sc->swsl_size);
3206 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3207 cnt = (cnt + 1) % MID_SL_N)
3208 kprintf("0x%x ", sc->swslist[cnt]);
3216 * en_dumpmem: dump the memory
3219 /* Do not staticize - meant for calling from DDB! */
3221 en_dumpmem(int unit, int addr, int len)
3223 struct en_softc *sc;
3226 if (unit < 0 || unit > en_cd.cd_ndevs ||
3227 (sc = (struct en_softc *) en_cd.cd_devs[unit]) == NULL) {
3228 kprintf("invalid unit number: %d\n", unit);
3232 if (addr < MID_RAMOFF || addr + len*4 > MID_MAXOFF || len <= 0) {
3233 kprintf("invalid addr/len number: %d, %d\n", addr, len);
3236 kprintf("dumping %d words starting at offset 0x%x\n", len, addr);
3238 reg = EN_READ(sc, addr);
3239 kprintf("mem[0x%x] = 0x%x\n", addr, reg);