1 /* $NetBSD: midway.c,v 1.30 1997/09/29 17:40:38 chuck Exp $ */
2 /* (sync'd to midway.c 1.68) */
6 * Copyright (c) 1996 Charles D. Cranor and Washington University.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by Charles D. Cranor and
20 * Washington University.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * $FreeBSD: src/sys/dev/en/midway.c,v 1.19.2.1 2003/01/23 21:06:42 sam Exp $
40 * m i d w a y . c e n i 1 5 5 d r i v e r
42 * author: Chuck Cranor <chuck@ccrc.wustl.edu>
43 * started: spring, 1996 (written from scratch).
45 * notes from the author:
46 * Extra special thanks go to Werner Almesberger, EPFL LRC. Werner's
47 * ENI driver was especially useful in figuring out how this card works.
48 * I would also like to thank Werner for promptly answering email and being
53 #undef EN_DEBUG_RANGE /* check ranges on en_read/en_write's? */
54 #define EN_MBUF_OPT /* try and put more stuff in mbuf? */
58 #define EN_DMA 1 /* use dma? */
60 #define EN_NOTXDMA 0 /* hook to disable tx dma only */
61 #define EN_NORXDMA 0 /* hook to disable rx dma only */
62 #define EN_DDBHOOK 1 /* compile in ddb functions */
63 #if defined(MIDWAY_ADPONLY)
64 #define EN_ENIDMAFIX 0 /* no ENI cards to worry about */
66 #define EN_ENIDMAFIX 1 /* avoid byte DMA on the ENI card (see below) */
70 * note on EN_ENIDMAFIX: the byte aligner on the ENI version of the card
71 * appears to be broken. it works just fine if there is no load... however
72 * when the card is loaded the data get corrupted. to see this, one only
73 * has to use "telnet" over ATM. do the following command in "telnet":
74 * cat /usr/share/misc/termcap
75 * "telnet" seems to generate lots of 1023 byte mbufs (which make great
76 * use of the byte aligner). watch "netstat -s" for checksum errors.
78 * I further tested this by adding a function that compared the transmit
79 * data on the card's SRAM with the data in the mbuf chain _after_ the
80 * "transmit DMA complete" interrupt. using the "telnet" test I got data
81 * mismatches where the byte-aligned data should have been. using ddb
82 * and en_dumpmem() I verified that the DTQs fed into the card were
83 * absolutely correct. thus, we are forced to concluded that the ENI
84 * hardware is buggy. note that the Adaptec version of the card works
85 * just fine with byte DMA.
87 * bottom line: we set EN_ENIDMAFIX to 1 to avoid byte DMAs on the ENI
91 #if defined(DIAGNOSTIC) && !defined(EN_DIAG)
92 #define EN_DIAG /* link in with master DIAG option */
95 #define EN_COUNT(X) (X)++
97 #define EN_COUNT(X) /* nothing */
103 #define STATIC /* nothing */
104 #define INLINE /* nothing */
106 #define STATIC static
107 #define INLINE __inline
108 #endif /* EN_DEBUG */
110 #include "use_en.h" /* XXX for midwayvar.h's NEN */
111 #include "opt_inet.h"
112 #include "opt_natm.h"
114 /* enable DDBHOOK when DDB is available */
120 #include <sys/param.h>
121 #include <sys/systm.h>
122 #include <sys/queue.h>
123 #include <sys/sockio.h>
124 #include <sys/mbuf.h>
125 #include <sys/socket.h>
126 #include <sys/proc.h>
127 #include <sys/thread2.h>
130 #include <net/if_atm.h>
131 #include <net/ifq_var.h>
135 #if defined(INET) || defined(INET6)
136 #include <netinet/in.h>
137 #include <netinet/if_atm.h>
141 #include <netproto/natm/natm.h>
144 #include "midwayreg.h"
145 #include "midwayvar.h"
146 #include <vm/pmap.h> /* for vtophys proto */
148 #ifndef IFF_NOTRAILERS
149 #define IFF_NOTRAILERS 0
153 #define BPFATTACH(ifp, dlt, hlen) bpfattach((ifp), (dlt), (hlen))
160 #define EN_TXHIWAT (64*1024) /* max 64 KB waiting to be DMAd out */
164 #define EN_MINDMA 32 /* don't DMA anything less than this (bytes) */
167 #define RX_NONE 0xffff /* recv VC not in use */
169 #define EN_OBHDR ATM_PH_DRIVER7 /* TBD in first mbuf ! */
170 #define EN_OBTRL ATM_PH_DRIVER8 /* PDU trailier in last mbuf ! */
172 #define ENOTHER_FREE 0x01 /* free rxslot */
173 #define ENOTHER_DRAIN 0x02 /* almost free (drain DRQ dma) */
174 #define ENOTHER_RAW 0x04 /* 'raw' access (aka boodi mode) */
175 #define ENOTHER_SWSL 0x08 /* in software service list */
177 static int en_dma = EN_DMA; /* use DMA (switch off for dbg) */
180 * autoconfig attachments
183 struct cfdriver en_cd = {
192 * params to en_txlaunch() function
196 u_int32_t tbd1; /* TBD 1 */
197 u_int32_t tbd2; /* TBD 2 */
198 u_int32_t pdu1; /* PDU 1 (aal5) */
199 int nodma; /* don't use DMA */
200 int need; /* total space we need (pad out if less data) */
201 int mlen; /* length of mbuf (for dtq) */
202 struct mbuf *t; /* data */
203 u_int32_t aal; /* aal code */
204 u_int32_t atm_vci; /* vci */
205 u_int8_t atm_flags; /* flags */
210 * dma table (index by # of words)
212 * plan A: use WMAYBE (obsolete)
213 * plan B: avoid WMAYBE
217 u_int8_t bcode; /* code */
218 u_int8_t divshift; /* byte divisor */
221 static struct en_dmatab en_dma_planB[] = {
222 { 0, 0 }, /* 0 */ { MIDDMA_WORD, 2}, /* 1 */
223 { MIDDMA_2WORD, 3}, /* 2 */ { MIDDMA_WORD, 2}, /* 3 */
224 { MIDDMA_4WORD, 4}, /* 4 */ { MIDDMA_WORD, 2}, /* 5 */
225 { MIDDMA_2WORD, 3}, /* 6 */ { MIDDMA_WORD, 2}, /* 7 */
226 { MIDDMA_8WORD, 5}, /* 8 */ { MIDDMA_WORD, 2}, /* 9 */
227 { MIDDMA_2WORD, 3}, /* 10 */ { MIDDMA_WORD, 2}, /* 11 */
228 { MIDDMA_4WORD, 4}, /* 12 */ { MIDDMA_WORD, 2}, /* 13 */
229 { MIDDMA_2WORD, 3}, /* 14 */ { MIDDMA_WORD, 2}, /* 15 */
230 { MIDDMA_16WORD, 6}, /* 16 */
233 static struct en_dmatab *en_dmaplan = en_dma_planB;
239 STATIC INLINE int en_b2sz (int) __attribute__ ((unused));
241 int en_dump (int,int);
242 int en_dumpmem (int,int,int);
244 STATIC void en_dmaprobe (struct en_softc *);
245 STATIC int en_dmaprobe_doit (struct en_softc *, u_int8_t *,
247 STATIC INLINE int en_dqneed (struct en_softc *, caddr_t, u_int,
249 STATIC void en_init (struct en_softc *);
250 STATIC int en_ioctl (struct ifnet *, EN_IOCTL_CMDT, caddr_t,
252 STATIC INLINE int en_k2sz (int);
253 STATIC void en_loadvc (struct en_softc *, int);
254 STATIC int en_mfix (struct en_softc *, struct mbuf **,
256 STATIC INLINE struct mbuf *en_mget (struct en_softc *, u_int,
258 STATIC INLINE u_int32_t en_read (struct en_softc *,
260 STATIC int en_rxctl (struct en_softc *, struct atm_pseudoioctl *,
262 STATIC void en_txdma (struct en_softc *, int);
263 STATIC void en_txlaunch (struct en_softc *, int,
265 STATIC void en_service (struct en_softc *);
266 STATIC void en_start (struct ifnet *, struct ifaltq_subque *);
267 STATIC INLINE int en_sz2b (int);
268 STATIC INLINE void en_write (struct en_softc *, u_int32_t,
276 * raw read/write macros
279 #define EN_READDAT(SC,R) en_read(SC,R)
280 #define EN_WRITEDAT(SC,R,V) en_write(SC,R,V)
283 * cooked read/write macros
286 #define EN_READ(SC,R) ntohl(en_read(SC,R))
287 #define EN_WRITE(SC,R,V) en_write(SC,R, htonl(V))
289 #define EN_WRAPADD(START,STOP,CUR,VAL) { \
290 (CUR) = (CUR) + (VAL); \
291 if ((CUR) >= (STOP)) \
292 (CUR) = (START) + ((CUR) - (STOP)); \
295 #define WORD_IDX(START, X) (((X) - (START)) / sizeof(u_int32_t))
297 /* we store sc->dtq and sc->drq data in the following format... */
298 #define EN_DQ_MK(SLOT,LEN) (((SLOT) << 20)|(LEN)|(0x80000))
299 /* the 0x80000 ensures we != 0 */
300 #define EN_DQ_SLOT(X) ((X) >> 20)
301 #define EN_DQ_LEN(X) ((X) & 0x3ffff)
303 /* format of DTQ/DRQ word 1 differs between ENI and ADP */
304 #if defined(MIDWAY_ENIONLY)
306 #define MID_MK_TXQ(SC,CNT,CHAN,END,BCODE) \
307 EN_WRITE((SC), (SC)->dtq_us, \
308 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (BCODE)));
310 #define MID_MK_RXQ(SC,CNT,VCI,END,BCODE) \
311 EN_WRITE((SC), (SC)->drq_us, \
312 MID_MK_RXQ_ENI((CNT), (VCI), (END), (BCODE)));
314 #elif defined(MIDWAY_ADPONLY)
316 #define MID_MK_TXQ(SC,CNT,CHAN,END,JK) \
317 EN_WRITE((SC), (SC)->dtq_us, \
318 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK)));
320 #define MID_MK_RXQ(SC,CNT,VCI,END,JK) \
321 EN_WRITE((SC), (SC)->drq_us, \
322 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK)));
326 #define MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE) { \
327 if ((SC)->is_adaptec) \
328 EN_WRITE((SC), (SC)->dtq_us, \
329 MID_MK_TXQ_ADP((CNT), (CHAN), (END), (JK_OR_BCODE))); \
331 EN_WRITE((SC), (SC)->dtq_us, \
332 MID_MK_TXQ_ENI((CNT), (CHAN), (END), (JK_OR_BCODE))); \
335 #define MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE) { \
336 if ((SC)->is_adaptec) \
337 EN_WRITE((SC), (SC)->drq_us, \
338 MID_MK_RXQ_ADP((CNT), (VCI), (END), (JK_OR_BCODE))); \
340 EN_WRITE((SC), (SC)->drq_us, \
341 MID_MK_RXQ_ENI((CNT), (VCI), (END), (JK_OR_BCODE))); \
346 /* add an item to the DTQ */
347 #define EN_DTQADD(SC,CNT,CHAN,JK_OR_BCODE,ADDR,LEN,END) { \
349 (SC)->dtq[MID_DTQ_A2REG((SC)->dtq_us)] = EN_DQ_MK(CHAN,LEN); \
350 MID_MK_TXQ(SC,CNT,CHAN,END,JK_OR_BCODE); \
352 EN_WRITE((SC), (SC)->dtq_us, (ADDR)); \
353 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, (SC)->dtq_us, 4); \
356 EN_WRITE((SC), MID_DMA_WRTX, MID_DTQ_A2REG((SC)->dtq_us)); \
360 #define EN_DRQADD(SC,CNT,VCI,JK_OR_BCODE,ADDR,LEN,SLOT,END) { \
362 (SC)->drq[MID_DRQ_A2REG((SC)->drq_us)] = EN_DQ_MK(SLOT,LEN); \
363 MID_MK_RXQ(SC,CNT,VCI,END,JK_OR_BCODE); \
365 EN_WRITE((SC), (SC)->drq_us, (ADDR)); \
366 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, (SC)->drq_us, 4); \
369 EN_WRITE((SC), MID_DMA_WRRX, MID_DRQ_A2REG((SC)->drq_us)); \
375 * the code is arranged in a specific way:
376 * [1] short/inline functions
377 * [2] autoconfig stuff
379 * [4] reset -> init -> trasmit -> intr -> receive functions
383 /***********************************************************************/
386 * en_read: read a word from the card. this is the only function
387 * that reads from the card.
390 STATIC INLINE u_int32_t
391 en_read(struct en_softc *sc, u_int32_t r)
393 #ifdef EN_DEBUG_RANGE
394 if (r > MID_MAXOFF || (r % 4))
395 panic("en_read out of range, r=0x%x", r);
398 return(bus_space_read_4(sc->en_memt, sc->en_base, r));
402 * en_write: write a word to the card. this is the only function that
403 * writes to the card.
407 en_write(struct en_softc *sc, u_int32_t r, u_int32_t v)
409 #ifdef EN_DEBUG_RANGE
410 if (r > MID_MAXOFF || (r % 4))
411 panic("en_write out of range, r=0x%x", r);
414 bus_space_write_4(sc->en_memt, sc->en_base, r, v);
418 * en_k2sz: convert KBytes to a size parameter (a log2)
433 default: panic("en_k2sz");
437 #define en_log2(X) en_k2sz(X)
441 * en_b2sz: convert a DMA burst code to its byte size
448 case MIDDMA_WORD: return(1*4);
450 case MIDDMA_2WORD: return(2*4);
452 case MIDDMA_4WORD: return(4*4);
454 case MIDDMA_8WORD: return(8*4);
455 case MIDDMA_16WMAYBE:
456 case MIDDMA_16WORD: return(16*4);
457 default: panic("en_b2sz");
464 * en_sz2b: convert a burst size (bytes) to DMA burst code
471 case 1*4: return(MIDDMA_WORD);
472 case 2*4: return(MIDDMA_2WORD);
473 case 4*4: return(MIDDMA_4WORD);
474 case 8*4: return(MIDDMA_8WORD);
475 case 16*4: return(MIDDMA_16WORD);
476 default: panic("en_sz2b");
483 * en_dqneed: calculate number of DTQ/DRQ's needed for a buffer
487 en_dqneed(struct en_softc *sc, caddr_t data, u_int len, u_int tx)
489 int result, needalign, sz;
491 #if !defined(MIDWAY_ENIONLY)
492 #if !defined(MIDWAY_ADPONLY)
494 #endif /* !MIDWAY_ADPONLY */
495 return(1); /* adaptec can DMA anything in one go */
498 #if !defined(MIDWAY_ADPONLY)
500 if (len < EN_MINDMA) {
501 if (!tx) /* XXX: conservative */
502 return(1); /* will copy/DMA_JK */
505 if (tx) { /* byte burst? */
506 needalign = (((uintptr_t) (void *) data) % sizeof(u_int32_t));
509 sz = min(len, sizeof(u_int32_t) - needalign);
515 if (sc->alburst && len) {
516 needalign = (((uintptr_t) (void *) data) & sc->bestburstmask);
518 result++; /* alburst */
519 sz = min(len, sc->bestburstlen - needalign);
524 if (len >= sc->bestburstlen) {
525 sz = len / sc->bestburstlen;
526 sz = sz * sc->bestburstlen;
528 result++; /* best shot */
532 result++; /* clean up */
533 if (tx && (len % sizeof(u_int32_t)) != 0)
534 result++; /* byte cleanup */
538 #endif /* !MIDWAY_ADPONLY */
543 * en_mget: get an mbuf chain that can hold totlen bytes and return it
544 * (for recv) [based on am7990_get from if_le and ieget from if_ie]
545 * after this call the sum of all the m_len's in the chain will be totlen.
548 STATIC INLINE struct mbuf *
549 en_mget(struct en_softc *sc, u_int totlen, u_int *drqneed)
552 struct mbuf *top, **mp;
555 MGETHDR(m, MB_DONTWAIT, MT_DATA);
558 m->m_pkthdr.rcvif = &sc->enif;
559 m->m_pkthdr.len = totlen;
564 /* if (top != NULL) then we've already got 1 mbuf on the chain */
567 MGET(m, MB_DONTWAIT, MT_DATA);
570 return(NULL); /* out of mbufs */
574 if (totlen >= MINCLSIZE) {
575 MCLGET(m, MB_DONTWAIT);
576 if ((m->m_flags & M_EXT) == 0) {
579 return(NULL); /* out of mbuf clusters */
583 m->m_len = min(totlen, m->m_len);
588 *drqneed += en_dqneed(sc, m->m_data, m->m_len, 0);
594 /***********************************************************************/
601 en_attach(struct en_softc *sc)
603 struct ifnet *ifp = &sc->enif;
604 char ethstr[ETHER_ADDRSTRLEN + 1];
606 u_int32_t reg, lcv, check, ptr, sav, midvloc;
609 * probe card to determine memory size. the stupid ENI card always
610 * reports to PCI that it needs 4MB of space (2MB regs and 2MB RAM).
611 * if it has less than 2MB RAM the addresses wrap in the RAM address space.
612 * (i.e. on a 512KB card addresses 0x3ffffc, 0x37fffc, and 0x2ffffc
613 * are aliases for 0x27fffc [note that RAM starts at offset 0x200000]).
618 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
619 for (lcv = MID_PROBEOFF; lcv <= MID_MAXOFF ; lcv += MID_PROBSIZE) {
620 EN_WRITE(sc, lcv, lcv); /* data[address] = address */
621 for (check = MID_PROBEOFF ; check < lcv ; check += MID_PROBSIZE) {
622 reg = EN_READ(sc, check);
623 if (reg != check) { /* found an alias! */
624 goto done_probe; /* and quit */
629 lcv -= MID_PROBSIZE; /* take one step back */
630 sc->en_obmemsz = (lcv + 4) - MID_RAMOFF;
633 * determine the largest DMA burst supported
644 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
645 for (lcv = MID_RAMOFF ; lcv < MID_RAMOFF + sc->en_obmemsz ; lcv += 4)
646 EN_WRITE(sc, lcv, 0); /* zero memory */
648 reg = EN_READ(sc, MID_RESID);
650 kprintf("%s: ATM midway v%d, board IDs %d.%d, %s%s%s, %ldKB on-board RAM\n",
651 sc->sc_dev.dv_xname, MID_VER(reg), MID_MID(reg), MID_DID(reg),
652 (MID_IS_SABRE(reg)) ? "sabre controller, " : "",
653 (MID_IS_SUNI(reg)) ? "SUNI" : "Utopia",
654 (!MID_IS_SUNI(reg) && MID_IS_UPIPE(reg)) ? " (pipelined)" : "",
655 (long)(sc->en_obmemsz / 1024));
657 if (sc->is_adaptec) {
658 if (sc->bestburstlen == 64 && sc->alburst == 0)
659 kprintf("%s: passed 64 byte DMA test\n", sc->sc_dev.dv_xname);
661 kprintf("%s: FAILED DMA TEST: burst=%d, alburst=%d\n",
662 sc->sc_dev.dv_xname, sc->bestburstlen, sc->alburst);
664 kprintf("%s: maximum DMA burst length = %d bytes%s\n", sc->sc_dev.dv_xname,
665 sc->bestburstlen, (sc->alburst) ? " (must align)" : "");
669 * link into network subsystem and prepare card
672 sc->enif.if_softc = sc;
673 ifp->if_flags = IFF_SIMPLEX|IFF_NOTRAILERS;
674 ifp->if_ioctl = en_ioctl;
675 ifp->if_output = atm_output;
676 ifp->if_start = en_start;
682 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
683 sc->rxvc2slot[lcv] = RX_NONE;
684 sc->txspeed[lcv] = 0; /* full */
685 sc->txvc2slot[lcv] = 0; /* full speed == slot 0 */
688 sz = sc->en_obmemsz - (MID_BUFOFF - MID_RAMOFF);
689 ptr = sav = MID_BUFOFF;
690 ptr = roundup(ptr, EN_TXSZ * 1024); /* align */
691 sz = sz - (ptr - sav);
692 if (EN_TXSZ*1024 * EN_NTX > sz) {
693 kprintf("%s: EN_NTX/EN_TXSZ too big\n", sc->sc_dev.dv_xname);
696 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
697 sc->txslot[lcv].mbsize = 0;
698 sc->txslot[lcv].start = ptr;
699 ptr += (EN_TXSZ * 1024);
700 sz -= (EN_TXSZ * 1024);
701 sc->txslot[lcv].stop = ptr;
702 sc->txslot[lcv].nref = 0;
703 bzero(&sc->txslot[lcv].indma, sizeof(sc->txslot[lcv].indma));
704 bzero(&sc->txslot[lcv].q, sizeof(sc->txslot[lcv].q));
706 kprintf("%s: tx%d: start 0x%x, stop 0x%x\n", sc->sc_dev.dv_xname, lcv,
707 sc->txslot[lcv].start, sc->txslot[lcv].stop);
712 ptr = roundup(ptr, EN_RXSZ * 1024); /* align */
713 sz = sz - (ptr - sav);
714 sc->en_nrx = sz / (EN_RXSZ * 1024);
715 if (sc->en_nrx <= 0) {
716 kprintf("%s: EN_NTX/EN_TXSZ/EN_RXSZ too big\n", sc->sc_dev.dv_xname);
721 * ensure that there is always one VC slot on the service list free
722 * so that we can tell the difference between a full and empty list.
724 if (sc->en_nrx >= MID_N_VC)
725 sc->en_nrx = MID_N_VC - 1;
727 for (lcv = 0 ; lcv < sc->en_nrx ; lcv++) {
728 sc->rxslot[lcv].rxhand = NULL;
729 sc->rxslot[lcv].oth_flags = ENOTHER_FREE;
730 bzero(&sc->rxslot[lcv].indma, sizeof(sc->rxslot[lcv].indma));
731 bzero(&sc->rxslot[lcv].q, sizeof(sc->rxslot[lcv].q));
732 midvloc = sc->rxslot[lcv].start = ptr;
733 ptr += (EN_RXSZ * 1024);
734 sz -= (EN_RXSZ * 1024);
735 sc->rxslot[lcv].stop = ptr;
736 midvloc = midvloc - MID_RAMOFF;
737 midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
738 midvloc = midvloc >> MIDV_LOCTOPSHFT; /* we only want the top 11 bits */
739 midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
740 sc->rxslot[lcv].mode = midvloc |
741 (en_k2sz(EN_RXSZ) << MIDV_SZSHIFT) | MIDV_TRASH;
744 kprintf("%s: rx%d: start 0x%x, stop 0x%x, mode 0x%x\n", sc->sc_dev.dv_xname,
745 lcv, sc->rxslot[lcv].start, sc->rxslot[lcv].stop, sc->rxslot[lcv].mode);
750 sc->vtrash = sc->otrash = sc->mfix = sc->txmbovr = sc->dmaovr = 0;
751 sc->txoutspace = sc->txdtqout = sc->launch = sc->lheader = sc->ltail = 0;
752 sc->hwpull = sc->swadd = sc->rxqnotus = sc->rxqus = sc->rxoutboth = 0;
753 sc->rxdrqout = sc->ttrash = sc->rxmbufout = sc->mfixfail = 0;
754 sc->headbyte = sc->tailbyte = sc->tailflush = 0;
756 sc->need_drqs = sc->need_dtqs = 0;
758 kprintf("%s: %d %dKB receive buffers, %d %dKB transmit buffers allocated\n",
759 sc->sc_dev.dv_xname, sc->en_nrx, EN_RXSZ, EN_NTX, EN_TXSZ);
761 kprintf("%s: End Station Identifier (mac address) %s\n",
762 sc->sc_dev.dv_xname, kether_ntoa(sc->macaddr, ethstr));
767 atm_ifattach(ifp, NULL);
772 * en_dmaprobe: helper function for en_attach.
774 * see how the card handles DMA by running a few DMA tests. we need
775 * to figure out the largest number of bytes we can DMA in one burst
776 * ("bestburstlen"), and if the starting address for a burst needs to
777 * be aligned on any sort of boundary or not ("alburst").
780 * sparc1: bestburstlen=4, alburst=0 (ick, broken DMA!)
781 * sparc2: bestburstlen=64, alburst=1
782 * p166: bestburstlen=64, alburst=0
785 #define NBURSTS 3 /* number of bursts to use for dmaprobe */
786 #define BOUNDARY 1024 /* test misaligned dma crossing the bounday.
787 should be n * 64. at least 64*(NBURSTS+1).
788 dell P6 with EDO DRAM has 1K bounday problem */
791 en_dmaprobe(struct en_softc *sc)
794 /* be careful. kernel stack is only 8K */
795 u_int8_t buffer[BOUNDARY * 2 + 64 * (NBURSTS + 1)];
797 u_int32_t srcbuf[64], dstbuf[64];
800 int bestalgn, bestnotalgn, lcv, try;
805 /* setup src and dst buf at the end of the boundary */
806 sp = (u_int8_t *)roundup((uintptr_t)(void *)buffer, 64);
807 while (((uintptr_t)(void *)sp & (BOUNDARY - 1)) != (BOUNDARY - 64))
812 * we can't dma across page boundary so that, if buf is at a page
813 * boundary, move it to the next page. but still either src or dst
814 * will be at the boundary, which should be ok.
816 if ((((uintptr_t)(void *)sp + 64) & PAGE_MASK) == 0)
818 if ((((uintptr_t)(void *)dp + 64) & PAGE_MASK) == 0)
821 sp = (u_int8_t *) srcbuf;
822 while ((((unsigned long) sp) % MIDDMA_MAXBURST) != 0)
824 dp = (u_int8_t *) dstbuf;
825 while ((((unsigned long) dp) % MIDDMA_MAXBURST) != 0)
827 #endif /* !NBURSTS */
829 bestalgn = bestnotalgn = en_dmaprobe_doit(sc, sp, dp, 0);
831 for (lcv = 4 ; lcv < MIDDMA_MAXBURST ; lcv += 4) {
832 try = en_dmaprobe_doit(sc, sp+lcv, dp+lcv, 0);
834 if (try < bestnotalgn) {
839 if (try < bestnotalgn)
844 if (bestalgn != bestnotalgn) /* need bursts aligned */
847 sc->bestburstlen = bestalgn;
848 sc->bestburstshift = en_log2(bestalgn);
849 sc->bestburstmask = sc->bestburstlen - 1; /* must be power of 2 */
850 sc->bestburstcode = en_sz2b(bestalgn);
853 * correct pci chipsets should be able to handle misaligned-64-byte DMA.
854 * but there are too many broken chipsets around. we try to work around
855 * by finding the best workable dma size, but still some broken machines
856 * exhibit the problem later. so warn it here.
858 if (bestalgn != 64 || sc->alburst != 0) {
859 kprintf("%s: WARNING: DMA test detects a broken PCI chipset!\n",
860 sc->sc_dev.dv_xname);
861 kprintf(" trying to work around the problem... but if this doesn't\n");
862 kprintf(" work for you, you'd better switch to a newer motherboard.\n");
869 * en_dmaprobe_doit: do actual testing
873 en_dmaprobe_doit(struct en_softc *sc, u_int8_t *sp, u_int8_t *dp, int wmtry)
875 int lcv, retval = 4, cnt, count;
876 u_int32_t reg, bcode, midvloc;
879 * set up a 1k buffer at MID_BUFOFF
884 EN_WRITE(sc, MID_RESID, 0x0); /* reset card before touching RAM */
886 midvloc = ((MID_BUFOFF - MID_RAMOFF) / sizeof(u_int32_t)) >> MIDV_LOCTOPSHFT;
887 EN_WRITE(sc, MIDX_PLACE(0), MIDX_MKPLACE(en_k2sz(1), midvloc));
888 EN_WRITE(sc, MID_VC(0), (midvloc << MIDV_LOCSHIFT)
889 | (en_k2sz(1) << MIDV_SZSHIFT) | MIDV_TRASH);
890 EN_WRITE(sc, MID_DST_RP(0), 0);
891 EN_WRITE(sc, MID_WP_ST_CNT(0), 0);
894 for (lcv = 0 ; lcv < 64*NBURSTS; lcv++) /* set up sample data */
896 for (lcv = 0 ; lcv < 68 ; lcv++) /* set up sample data */
899 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* enable DMA (only) */
901 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
902 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
905 * try it now . . . DMA it out, then DMA it back in and compare
907 * note: in order to get the dma stuff to reverse directions it wants
908 * the "end" flag set! since we are not dma'ing valid data we may
909 * get an ident mismatch interrupt (which we will ignore).
911 * note: we've got two different tests rolled up in the same loop
913 * then we are doing a wmaybe test and wmtry is a byte count
914 * else we are doing a burst test
917 for (lcv = 8 ; lcv <= MIDDMA_MAXBURST ; lcv = lcv * 2) {
920 kprintf("DMA test lcv=%d, sp=0x%x, dp=0x%x, wmtry=%d\n",
924 /* zero SRAM and dest buffer */
925 for (cnt = 0 ; cnt < 1024; cnt += 4)
926 EN_WRITE(sc, MID_BUFOFF+cnt, 0); /* zero memory */
928 for (cnt = 0 ; cnt < 64*NBURSTS; cnt++)
930 for (cnt = 0 ; cnt < 68 ; cnt++)
935 count = (sc->bestburstlen - sizeof(u_int32_t)) / sizeof(u_int32_t);
936 bcode = en_dmaplan[count].bcode;
937 count = wmtry >> en_dmaplan[count].divshift;
939 bcode = en_sz2b(lcv);
943 /* build lcv-byte-DMA x NBURSTS */
945 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ADP(lcv*NBURSTS, 0, MID_DMA_END, 0));
947 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ENI(count*NBURSTS, 0, MID_DMA_END, bcode));
948 EN_WRITE(sc, sc->dtq_chip+4, vtophys(sp));
949 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
950 EN_WRITE(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip));
952 while (EN_READ(sc, MID_DMA_RDTX) != MID_DTQ_A2REG(sc->dtq_chip)) {
956 kprintf("%s: unexpected timeout in tx DMA test\n", sc->sc_dev.dv_xname);
958 kprintf(" alignment=0x%x, burst size=%d, dma addr reg=0x%x\n",
959 (u_long)sp & 63, lcv, EN_READ(sc, MID_DMA_ADDR));
961 return(retval); /* timeout, give up */
966 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ADP(lcv, 0, MID_DMA_END, 0));
968 EN_WRITE(sc, sc->dtq_chip, MID_MK_TXQ_ENI(count, 0, MID_DMA_END, bcode));
969 EN_WRITE(sc, sc->dtq_chip+4, vtophys(sp));
970 EN_WRITE(sc, MID_DMA_WRTX, MID_DTQ_A2REG(sc->dtq_chip+8));
972 while (EN_READ(sc, MID_DMA_RDTX) == MID_DTQ_A2REG(sc->dtq_chip)) {
976 kprintf("%s: unexpected timeout in tx DMA test\n", sc->sc_dev.dv_xname);
977 return(retval); /* timeout, give up */
980 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, sc->dtq_chip, 8);
981 #endif /* !NBURSTS */
982 reg = EN_READ(sc, MID_INTACK);
983 if ((reg & MID_INT_DMA_TX) != MID_INT_DMA_TX) {
984 kprintf("%s: unexpected status in tx DMA test: 0x%x\n",
985 sc->sc_dev.dv_xname, reg);
988 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
990 /* "return to sender..." address is known ... */
993 /* build lcv-byte-DMA x NBURSTS */
995 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ADP(lcv*NBURSTS, 0, MID_DMA_END, 0));
997 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ENI(count*NBURSTS, 0, MID_DMA_END, bcode));
998 EN_WRITE(sc, sc->drq_chip+4, vtophys(dp));
999 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
1000 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1002 while (EN_READ(sc, MID_DMA_RDRX) != MID_DRQ_A2REG(sc->drq_chip)) {
1006 kprintf("%s: unexpected timeout in rx DMA test\n", sc->sc_dev.dv_xname);
1007 return(retval); /* timeout, give up */
1010 #else /* !NBURSTS */
1012 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ADP(lcv, 0, MID_DMA_END, 0));
1014 EN_WRITE(sc, sc->drq_chip, MID_MK_RXQ_ENI(count, 0, MID_DMA_END, bcode));
1015 EN_WRITE(sc, sc->drq_chip+4, vtophys(dp));
1016 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip+8));
1018 while (EN_READ(sc, MID_DMA_RDRX) == MID_DRQ_A2REG(sc->drq_chip)) {
1022 kprintf("%s: unexpected timeout in rx DMA test\n", sc->sc_dev.dv_xname);
1023 return(retval); /* timeout, give up */
1026 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, sc->drq_chip, 8);
1027 #endif /* !NBURSTS */
1028 reg = EN_READ(sc, MID_INTACK);
1029 if ((reg & MID_INT_DMA_RX) != MID_INT_DMA_RX) {
1030 kprintf("%s: unexpected status in rx DMA test: 0x%x\n",
1031 sc->sc_dev.dv_xname, reg);
1034 EN_WRITE(sc, MID_MAST_CSR, MID_MCSR_ENDMA); /* re-enable DMA (only) */
1037 return(bcmp(sp, dp, wmtry)); /* wmtry always exits here, no looping */
1041 if (bcmp(sp, dp, lcv * NBURSTS)) {
1042 /* kprintf("DMA test failed! lcv=%d, sp=0x%x, dp=0x%x\n", lcv, sp, dp); */
1043 return(retval); /* failed, use last value */
1046 if (bcmp(sp, dp, lcv))
1047 return(retval); /* failed, use last value */
1053 return(retval); /* studly 64 byte DMA present! oh baby!! */
1056 /***********************************************************************/
1059 * en_ioctl: handle ioctl requests
1061 * NOTE: if you add an ioctl to set txspeed, you should choose a new
1062 * TX channel/slot. Choose the one with the lowest sc->txslot[slot].nref
1063 * value, subtract one from sc->txslot[0].nref, add one to the
1064 * sc->txslot[slot].nref, set sc->txvc2slot[vci] = slot, and then set
1069 en_ioctl(struct ifnet *ifp, EN_IOCTL_CMDT cmd, caddr_t data, struct ucred *cr)
1071 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1072 struct ifaddr *ifa = (struct ifaddr *) data;
1073 struct ifreq *ifr = (struct ifreq *) data;
1074 struct atm_pseudoioctl *api = (struct atm_pseudoioctl *)data;
1076 struct atm_rawioctl *ario = (struct atm_rawioctl *)data;
1084 case SIOCATMENA: /* enable circuit for recv */
1085 error = en_rxctl(sc, api, 1);
1088 case SIOCATMDIS: /* disable circuit for recv */
1089 error = en_rxctl(sc, api, 0);
1094 if ((slot = sc->rxvc2slot[ario->npcb->npcb_vci]) == RX_NONE) {
1098 if (ario->rawvalue > EN_RXSZ*1024)
1099 ario->rawvalue = EN_RXSZ*1024;
1100 if (ario->rawvalue) {
1101 sc->rxslot[slot].oth_flags |= ENOTHER_RAW;
1102 sc->rxslot[slot].raw_threshold = ario->rawvalue;
1104 sc->rxslot[slot].oth_flags &= (~ENOTHER_RAW);
1105 sc->rxslot[slot].raw_threshold = 0;
1108 kprintf("%s: rxvci%d: turn %s raw (boodi) mode\n",
1109 sc->sc_dev.dv_xname, ario->npcb->npcb_vci,
1110 (ario->rawvalue) ? "on" : "off");
1115 ifp->if_flags |= IFF_UP;
1116 #if defined(INET) || defined(INET6)
1117 if (ifa->ifa_addr->sa_family == AF_INET
1118 || ifa->ifa_addr->sa_family == AF_INET6) {
1121 ifa->ifa_rtrequest = atm_rtrequest; /* ??? */
1125 /* what to do if not INET? */
1138 #if defined(SIOCSIFMTU) /* ??? copied from if_de */
1139 #if !defined(ifr_mtu)
1140 #define ifr_mtu ifr_metric
1144 * Set the interface MTU.
1147 if (ifr->ifr_mtu > ATMMTU) {
1152 ifp->if_mtu = ifr->ifr_mtu;
1153 /* XXXCDC: do we really need to reset on MTU size change? */
1157 #endif /* SIOCSIFMTU */
1169 * en_rxctl: turn on and off VCs for recv.
1173 en_rxctl(struct en_softc *sc, struct atm_pseudoioctl *pi, int on)
1175 u_int vci, flags, slot;
1176 u_int32_t oldmode, newmode;
1178 vci = ATM_PH_VCI(&pi->aph);
1179 flags = ATM_PH_FLAGS(&pi->aph);
1182 kprintf("%s: %s vpi=%d, vci=%d, flags=%d\n", sc->sc_dev.dv_xname,
1183 (on) ? "enable" : "disable", ATM_PH_VPI(&pi->aph), vci, flags);
1186 if (ATM_PH_VPI(&pi->aph) || vci >= MID_N_VC)
1194 if (sc->rxvc2slot[vci] != RX_NONE)
1196 for (slot = 0 ; slot < sc->en_nrx ; slot++)
1197 if (sc->rxslot[slot].oth_flags & ENOTHER_FREE)
1199 if (slot == sc->en_nrx)
1201 sc->rxvc2slot[vci] = slot;
1202 sc->rxslot[slot].rxhand = NULL;
1203 oldmode = sc->rxslot[slot].mode;
1204 newmode = (flags & ATM_PH_AAL5) ? MIDV_AAL5 : MIDV_NOAAL;
1205 sc->rxslot[slot].mode = MIDV_SETMODE(oldmode, newmode);
1206 sc->rxslot[slot].atm_vci = vci;
1207 sc->rxslot[slot].atm_flags = flags;
1208 sc->rxslot[slot].oth_flags = 0;
1209 sc->rxslot[slot].rxhand = pi->rxhand;
1210 if (sc->rxslot[slot].indma.ifq_head || sc->rxslot[slot].q.ifq_head)
1211 panic("en_rxctl: left over mbufs on enable");
1212 sc->txspeed[vci] = 0; /* full speed to start */
1213 sc->txvc2slot[vci] = 0; /* init value */
1214 sc->txslot[0].nref++; /* bump reference count */
1215 en_loadvc(sc, vci); /* does debug kprintf for us */
1223 if (sc->rxvc2slot[vci] == RX_NONE)
1225 slot = sc->rxvc2slot[vci];
1226 if ((sc->rxslot[slot].oth_flags & (ENOTHER_FREE|ENOTHER_DRAIN)) != 0)
1228 crit_enter(); /* block out enintr() */
1229 oldmode = EN_READ(sc, MID_VC(vci));
1230 newmode = MIDV_SETMODE(oldmode, MIDV_TRASH) & ~MIDV_INSERVICE;
1231 EN_WRITE(sc, MID_VC(vci), (newmode | (oldmode & MIDV_INSERVICE)));
1232 /* halt in tracks, be careful to preserve inserivce bit */
1234 sc->rxslot[slot].rxhand = NULL;
1235 sc->rxslot[slot].mode = newmode;
1237 sc->txslot[sc->txvc2slot[vci]].nref--;
1238 sc->txspeed[vci] = 0;
1239 sc->txvc2slot[vci] = 0;
1241 /* if stuff is still going on we are going to have to drain it out */
1242 if (sc->rxslot[slot].indma.ifq_head ||
1243 sc->rxslot[slot].q.ifq_head ||
1244 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) != 0) {
1245 sc->rxslot[slot].oth_flags |= ENOTHER_DRAIN;
1247 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1248 sc->rxslot[slot].atm_vci = RX_NONE;
1249 sc->rxvc2slot[vci] = RX_NONE;
1251 crit_exit(); /* enable enintr() */
1253 kprintf("%s: rx%d: VCI %d is now %s\n", sc->sc_dev.dv_xname, slot, vci,
1254 (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) ? "draining" : "free");
1259 /***********************************************************************/
1262 * en_reset: reset the board, throw away work in progress.
1263 * must en_init to recover.
1267 en_reset(struct en_softc *sc)
1273 kprintf("%s: reset\n", sc->sc_dev.dv_xname);
1276 if (sc->en_busreset)
1277 sc->en_busreset(sc);
1278 EN_WRITE(sc, MID_RESID, 0x0); /* reset hardware */
1281 * recv: dump any mbufs we are dma'ing into, if DRAINing, then a reset
1285 for (lcv = 0 ; lcv < MID_N_VC ; lcv++) {
1286 if (sc->rxvc2slot[lcv] == RX_NONE)
1288 slot = sc->rxvc2slot[lcv];
1290 IF_DEQUEUE(&sc->rxslot[slot].indma, m);
1292 break; /* >>> exit 'while(1)' here <<< */
1296 IF_DEQUEUE(&sc->rxslot[slot].q, m);
1298 break; /* >>> exit 'while(1)' here <<< */
1301 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
1302 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) {
1303 sc->rxslot[slot].oth_flags = ENOTHER_FREE;
1304 sc->rxvc2slot[lcv] = RX_NONE;
1306 kprintf("%s: rx%d: VCI %d is now free\n", sc->sc_dev.dv_xname, slot, lcv);
1312 * xmit: dump everything
1315 for (lcv = 0 ; lcv < EN_NTX ; lcv++) {
1317 IF_DEQUEUE(&sc->txslot[lcv].indma, m);
1319 break; /* >>> exit 'while(1)' here <<< */
1323 IF_DEQUEUE(&sc->txslot[lcv].q, m);
1325 break; /* >>> exit 'while(1)' here <<< */
1329 sc->txslot[lcv].mbsize = 0;
1337 * en_init: init board and sync the card with the data in the softc.
1341 en_init(struct en_softc *sc)
1346 if ((sc->enif.if_flags & IFF_UP) == 0) {
1348 kprintf("%s: going down\n", sc->sc_dev.dv_xname);
1350 en_reset(sc); /* to be safe */
1351 sc->enif.if_flags &= ~IFF_RUNNING; /* disable */
1356 kprintf("%s: going up\n", sc->sc_dev.dv_xname);
1358 sc->enif.if_flags |= IFF_RUNNING; /* enable */
1360 if (sc->en_busreset)
1361 sc->en_busreset(sc);
1362 EN_WRITE(sc, MID_RESID, 0x0); /* reset */
1365 * init obmem data structures: vc tab, dma q's, slist.
1367 * note that we set drq_free/dtq_free to one less than the total number
1368 * of DTQ/DRQs present. we do this because the card uses the condition
1369 * (drq_chip == drq_us) to mean "list is empty"... but if you allow the
1370 * circular list to be completely full then (drq_chip == drq_us) [i.e.
1371 * the drq_us pointer will wrap all the way around]. by restricting
1372 * the number of active requests to (N - 1) we prevent the list from
1373 * becoming completely full. note that the card will sometimes give
1374 * us an interrupt for a DTQ/DRQ we have already processes... this helps
1375 * keep that interrupt from messing us up.
1378 for (vc = 0 ; vc < MID_N_VC ; vc++)
1381 bzero(&sc->drq, sizeof(sc->drq));
1382 sc->drq_free = MID_DRQ_N - 1; /* N - 1 */
1383 sc->drq_chip = MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX));
1384 EN_WRITE(sc, MID_DMA_WRRX, MID_DRQ_A2REG(sc->drq_chip));
1385 /* ensure zero queue */
1386 sc->drq_us = sc->drq_chip;
1388 bzero(&sc->dtq, sizeof(sc->dtq));
1389 sc->dtq_free = MID_DTQ_N - 1; /* N - 1 */
1390 sc->dtq_chip = MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX));
1391 EN_WRITE(sc, MID_DMA_WRTX, MID_DRQ_A2REG(sc->dtq_chip));
1392 /* ensure zero queue */
1393 sc->dtq_us = sc->dtq_chip;
1395 sc->hwslistp = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
1396 sc->swsl_size = sc->swsl_head = sc->swsl_tail = 0;
1399 kprintf("%s: drq free/chip: %d/0x%x, dtq free/chip: %d/0x%x, hwslist: 0x%x\n",
1400 sc->sc_dev.dv_xname, sc->drq_free, sc->drq_chip,
1401 sc->dtq_free, sc->dtq_chip, sc->hwslistp);
1404 for (slot = 0 ; slot < EN_NTX ; slot++) {
1405 sc->txslot[slot].bfree = EN_TXSZ * 1024;
1406 EN_WRITE(sc, MIDX_READPTR(slot), 0);
1407 EN_WRITE(sc, MIDX_DESCSTART(slot), 0);
1408 loc = sc->txslot[slot].cur = sc->txslot[slot].start;
1409 loc = loc - MID_RAMOFF;
1410 loc = (loc & ~((EN_TXSZ*1024) - 1)) >> 2; /* mask, cvt to words */
1411 loc = loc >> MIDV_LOCTOPSHFT; /* top 11 bits */
1412 EN_WRITE(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ), loc));
1414 kprintf("%s: tx%d: place 0x%x\n", sc->sc_dev.dv_xname, slot,
1415 EN_READ(sc, MIDX_PLACE(slot)));
1423 EN_WRITE(sc, MID_INTENA, MID_INT_TX|MID_INT_DMA_OVR|MID_INT_IDENT|
1424 MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_DMA_RX|MID_INT_DMA_TX|
1425 MID_INT_SERVICE| /* >>> MID_INT_SUNI| XXXCDC<<< */ MID_INT_STATS);
1426 EN_WRITE(sc, MID_MAST_CSR, MID_SETIPL(sc->ipl)|MID_MCSR_ENDMA|
1427 MID_MCSR_ENTX|MID_MCSR_ENRX);
1433 * en_loadvc: load a vc tab entry from a slot
1437 en_loadvc(struct en_softc *sc, int vc)
1440 u_int32_t reg = EN_READ(sc, MID_VC(vc));
1442 reg = MIDV_SETMODE(reg, MIDV_TRASH);
1443 EN_WRITE(sc, MID_VC(vc), reg);
1446 if ((slot = sc->rxvc2slot[vc]) == RX_NONE)
1449 /* no need to set CRC */
1450 EN_WRITE(sc, MID_DST_RP(vc), 0); /* read pointer = 0, desc. start = 0 */
1451 EN_WRITE(sc, MID_WP_ST_CNT(vc), 0); /* write pointer = 0 */
1452 EN_WRITE(sc, MID_VC(vc), sc->rxslot[slot].mode); /* set mode, size, loc */
1453 sc->rxslot[slot].cur = sc->rxslot[slot].start;
1456 kprintf("%s: rx%d: assigned to VCI %d\n", sc->sc_dev.dv_xname, slot, vc);
1462 * en_start: start transmitting the next packet that needs to go out
1463 * if there is one. note that atm_output() has already locked us.
1467 en_start(struct ifnet *ifp, struct ifaltq_subque *ifsq __unused)
1469 struct en_softc *sc = (struct en_softc *) ifp->if_softc;
1470 struct mbuf *m, *lastm, *prev;
1471 struct atm_pseudohdr *ap, *new_ap;
1472 int txchan, mlen, got, need, toadd, cellcnt, first;
1473 u_int32_t atm_vpi, atm_vci, atm_flags, *dat, aal;
1476 if ((ifp->if_flags & IFF_RUNNING) == 0)
1480 * remove everything from interface queue since we handle all queueing
1486 m = ifq_dequeue(&ifp->if_snd, NULL);
1488 return; /* EMPTY: >>> exit here <<< */
1491 * calculate size of packet (in bytes)
1492 * also, if we are not doing transmit DMA we eliminate all stupid
1493 * (non-word) alignments here using en_mfix(). calls to en_mfix()
1494 * seem to be due to tcp retransmits for the most part.
1496 * after this loop mlen total length of mbuf chain (including atm_ph),
1497 * and lastm is a pointer to the last mbuf on the chain.
1505 if ((!sc->is_adaptec && EN_ENIDMAFIX) || EN_NOTXDMA || !en_dma) {
1506 if ( ((uintptr_t)mtod(lastm, void *) % sizeof(u_int32_t)) != 0 ||
1507 ((lastm->m_len % sizeof(u_int32_t)) != 0 && lastm->m_next)) {
1508 first = (lastm == m);
1509 if (en_mfix(sc, &lastm, prev) == 0) { /* failed? */
1515 m = lastm; /* update */
1520 mlen += lastm->m_len;
1521 if (lastm->m_next == NULL)
1523 lastm = lastm->m_next;
1526 if (m == NULL) /* happens only if mfix fails */
1529 ap = mtod(m, struct atm_pseudohdr *);
1531 atm_vpi = ATM_PH_VPI(ap);
1532 atm_vci = ATM_PH_VCI(ap);
1533 atm_flags = ATM_PH_FLAGS(ap) & ~(EN_OBHDR|EN_OBTRL);
1534 aal = ((atm_flags & ATM_PH_AAL5) != 0)
1535 ? MID_TBD_AAL5 : MID_TBD_NOAAL5;
1538 * check that vpi/vci is one we can use
1541 if (atm_vpi || atm_vci > MID_N_VC) {
1542 kprintf("%s: output vpi=%d, vci=%d out of card range, dropping...\n",
1543 sc->sc_dev.dv_xname, atm_vpi, atm_vci);
1549 * computing how much padding we need on the end of the mbuf, then
1550 * see if we can put the TBD at the front of the mbuf where the
1551 * link header goes (well behaved protocols will reserve room for us).
1552 * last, check if room for PDU tail.
1554 * got = number of bytes of data we have
1555 * cellcnt = number of cells in this mbuf
1556 * need = number of bytes of data + padding we need (excludes TBD)
1557 * toadd = number of bytes of data we need to add to end of mbuf,
1558 * [including AAL5 PDU, if AAL5]
1561 got = mlen - sizeof(struct atm_pseudohdr *);
1562 toadd = (aal == MID_TBD_AAL5) ? MID_PDU_SIZE : 0; /* PDU */
1563 cellcnt = (got + toadd + (MID_ATMDATASZ - 1)) / MID_ATMDATASZ;
1564 need = cellcnt * MID_ATMDATASZ;
1565 toadd = need - got; /* recompute, including zero padding */
1568 kprintf("%s: txvci%d: mlen=%d, got=%d, need=%d, toadd=%d, cell#=%d\n",
1569 sc->sc_dev.dv_xname, atm_vci, mlen, got, need, toadd, cellcnt);
1570 kprintf(" leading_space=%d, trailing_space=%d\n",
1571 M_LEADINGSPACE(m), M_TRAILINGSPACE(lastm));
1577 * note: external storage (M_EXT) can be shared between mbufs
1578 * to avoid copying (see m_copym()). this means that the same
1579 * data buffer could be shared by several mbufs, and thus it isn't
1580 * a good idea to try and write TBDs or PDUs to M_EXT data areas.
1583 if (M_LEADINGSPACE(m) >= MID_TBD_SIZE && (m->m_flags & M_EXT) == 0) {
1584 m->m_data -= MID_TBD_SIZE;
1585 m->m_len += MID_TBD_SIZE;
1586 mlen += MID_TBD_SIZE;
1587 new_ap = mtod(m, struct atm_pseudohdr *);
1588 *new_ap = *ap; /* move it back */
1590 dat = ((u_int32_t *) ap) + 1;
1591 /* make sure the TBD is in proper byte order */
1592 *dat++ = htonl(MID_TBD_MK1(aal, sc->txspeed[atm_vci], cellcnt));
1593 *dat = htonl(MID_TBD_MK2(atm_vci, 0, 0));
1594 atm_flags |= EN_OBHDR;
1597 if (toadd && (lastm->m_flags & M_EXT) == 0 &&
1598 M_TRAILINGSPACE(lastm) >= toadd) {
1599 cp = mtod(lastm, u_int8_t *) + lastm->m_len;
1600 lastm->m_len += toadd;
1602 if (aal == MID_TBD_AAL5) {
1603 bzero(cp, toadd - MID_PDU_SIZE);
1604 dat = (u_int32_t *)(cp + toadd - MID_PDU_SIZE);
1605 /* make sure the PDU is in proper byte order */
1606 *dat = htonl(MID_PDU_MK1(0, 0, got));
1610 atm_flags |= EN_OBTRL;
1612 ATM_PH_FLAGS(ap) = atm_flags; /* update EN_OBHDR/EN_OBTRL bits */
1613 #endif /* EN_MBUF_OPT */
1616 * get assigned channel (will be zero unless txspeed[atm_vci] is set)
1619 txchan = sc->txvc2slot[atm_vci];
1621 if (sc->txslot[txchan].mbsize > EN_TXHIWAT) {
1622 EN_COUNT(sc->txmbovr);
1625 kprintf("%s: tx%d: buffer space shortage\n", sc->sc_dev.dv_xname,
1631 sc->txslot[txchan].mbsize += mlen;
1634 kprintf("%s: tx%d: VPI=%d, VCI=%d, FLAGS=0x%x, speed=0x%x\n",
1635 sc->sc_dev.dv_xname, txchan, atm_vpi, atm_vci, atm_flags,
1636 sc->txspeed[atm_vci]);
1637 kprintf(" adjusted mlen=%d, mbsize=%d\n", mlen,
1638 sc->txslot[txchan].mbsize);
1641 IF_ENQUEUE(&sc->txslot[txchan].q, m);
1643 en_txdma(sc, txchan);
1651 * en_mfix: fix a stupid mbuf
1654 STATIC int en_makeexclusive(struct en_softc *, struct mbuf **, struct mbuf *);
1657 en_makeexclusive(struct en_softc *sc, struct mbuf **mm, struct mbuf *prev)
1659 struct mbuf *m, *new;
1663 if (m->m_flags & M_EXT) {
1664 if (!(m->m_flags & M_EXT_CLUSTER)) {
1665 /* external buffer isn't an ordinary mbuf cluster! */
1666 kprintf("%s: mfix: special buffer! can't make a copy!\n",
1667 sc->sc_dev.dv_xname);
1671 if (m_sharecount(m) > 1) {
1672 /* make a real copy of the M_EXT mbuf since it is shared */
1673 new = m_getcl(MB_DONTWAIT, MT_DATA, m->m_flags & M_PKTHDR);
1676 EN_COUNT(sc->mfixfail);
1679 if (m->m_flags & M_PKTHDR)
1680 M_MOVE_PKTHDR(new, m);
1681 bcopy(m->m_data, new->m_data, m->m_len);
1682 new->m_len = m->m_len;
1683 new->m_next = m->m_next;
1690 /* the buffer is not shared, align the data offset using
1692 u_char *d = mtod(m, u_char *);
1693 int off = ((uintptr_t)(void *)d) % sizeof(u_int32_t);
1696 bcopy(d, d - off, m->m_len);
1697 m->m_data = (caddr_t)d - off;
1705 en_mfix(struct en_softc *sc, struct mbuf **mm, struct mbuf *prev)
1714 EN_COUNT(sc->mfix); /* count # of calls */
1716 kprintf("%s: mfix mbuf m_data=0x%x, m_len=%d\n", sc->sc_dev.dv_xname,
1717 m->m_data, m->m_len);
1720 d = mtod(m, u_char *);
1721 off = ((uintptr_t) (void *) d) % sizeof(u_int32_t);
1724 if ((m->m_flags & M_EXT) == 0) {
1725 bcopy(d, d - off, m->m_len); /* ALIGN! (with costly data copy...) */
1727 m->m_data = (caddr_t)d;
1729 /* can't write to an M_EXT mbuf since it may be shared */
1730 if (en_makeexclusive(sc, &m, prev) == 0)
1732 *mm = m; /* note: 'd' now invalid */
1736 off = m->m_len % sizeof(u_int32_t);
1740 if (m->m_flags & M_EXT) {
1741 /* can't write to an M_EXT mbuf since it may be shared */
1742 if (en_makeexclusive(sc, &m, prev) == 0)
1744 *mm = m; /* note: 'd' now invalid */
1747 d = mtod(m, u_char *) + m->m_len;
1748 off = sizeof(u_int32_t) - off;
1752 if (nxt != NULL && nxt->m_len == 0) {
1753 /* remove an empty mbuf. this avoids odd byte padding to an empty
1755 m->m_next = nxt = m_free(nxt);
1757 if (nxt == NULL) { /* out of data, zero fill */
1759 continue; /* next "off" */
1761 cp = mtod(nxt, u_char *);
1765 nxt->m_data = (caddr_t)cp;
1767 if (nxt != NULL && nxt->m_len == 0)
1768 m->m_next = m_free(nxt);
1773 * en_txdma: start trasmit DMA, if possible
1777 en_txdma(struct en_softc *sc, int chan)
1780 struct atm_pseudohdr *ap;
1781 struct en_launch launch = { .tbd1 = 0 };
1782 int datalen = 0, dtqneed, len, ncells;
1787 kprintf("%s: tx%d: starting...\n", sc->sc_dev.dv_xname, chan);
1791 * note: now that txlaunch handles non-word aligned/sized requests
1792 * the only time you can safely set launch.nodma is if you've en_mfix()'d
1793 * the mbuf chain. this happens only if EN_NOTXDMA || !en_dma.
1796 launch.nodma = (EN_NOTXDMA || !en_dma);
1801 * get an mbuf waiting for DMA
1804 launch.t = sc->txslot[chan].q.ifq_head; /* peek at head of queue */
1806 if (launch.t == NULL) {
1808 kprintf("%s: tx%d: ...done!\n", sc->sc_dev.dv_xname, chan);
1810 return; /* >>> exit here if no data waiting for DMA <<< */
1816 * note: launch.need = # bytes we need to get on the card
1817 * dtqneed = # of DTQs we need for this packet
1818 * launch.mlen = # of bytes in in mbuf chain (<= launch.need)
1821 ap = mtod(launch.t, struct atm_pseudohdr *);
1822 launch.atm_vci = ATM_PH_VCI(ap);
1823 launch.atm_flags = ATM_PH_FLAGS(ap);
1824 launch.aal = ((launch.atm_flags & ATM_PH_AAL5) != 0) ?
1825 MID_TBD_AAL5 : MID_TBD_NOAAL5;
1828 * XXX: have to recompute the length again, even though we already did
1829 * it in en_start(). might as well compute dtqneed here as well, so
1830 * this isn't that bad.
1833 if ((launch.atm_flags & EN_OBHDR) == 0) {
1834 dtqneed = 1; /* header still needs to be added */
1835 launch.need = MID_TBD_SIZE; /* not includeded with mbuf */
1837 dtqneed = 0; /* header on-board, dma with mbuf */
1842 for (tmp = launch.t ; tmp != NULL ; tmp = tmp->m_next) {
1845 cp = mtod(tmp, u_int8_t *);
1846 if (tmp == launch.t) {
1847 len -= sizeof(struct atm_pseudohdr); /* don't count this! */
1848 cp += sizeof(struct atm_pseudohdr);
1852 continue; /* atm_pseudohdr alone in first mbuf */
1854 dtqneed += en_dqneed(sc, (caddr_t) cp, len, 1);
1857 if ((launch.need % sizeof(u_int32_t)) != 0)
1858 dtqneed++; /* need DTQ to FLUSH internal buffer */
1860 if ((launch.atm_flags & EN_OBTRL) == 0) {
1861 if (launch.aal == MID_TBD_AAL5) {
1862 datalen = launch.need - MID_TBD_SIZE;
1863 launch.need += MID_PDU_SIZE; /* AAL5: need PDU tail */
1865 dtqneed++; /* need to work on the end a bit */
1869 * finish calculation of launch.need (need to figure out how much padding
1870 * we will need). launch.need includes MID_TBD_SIZE, but we need to
1871 * remove that to so we can round off properly. we have to add
1872 * MID_TBD_SIZE back in after calculating ncells.
1875 launch.need = roundup(launch.need - MID_TBD_SIZE, MID_ATMDATASZ);
1876 ncells = launch.need / MID_ATMDATASZ;
1877 launch.need += MID_TBD_SIZE;
1879 if (launch.need > EN_TXSZ * 1024) {
1880 kprintf("%s: tx%d: packet larger than xmit buffer (%d > %d)\n",
1881 sc->sc_dev.dv_xname, chan, launch.need, EN_TXSZ * 1024);
1886 * note: don't use the entire buffer space. if WRTX becomes equal
1887 * to RDTX, the transmitter stops assuming the buffer is empty! --kjc
1889 if (launch.need >= sc->txslot[chan].bfree) {
1890 EN_COUNT(sc->txoutspace);
1892 kprintf("%s: tx%d: out of transmit space\n", sc->sc_dev.dv_xname, chan);
1894 return; /* >>> exit here if out of obmem buffer space <<< */
1898 * ensure we have enough dtqs to go, if not, wait for more.
1904 if (dtqneed > sc->dtq_free) {
1906 EN_COUNT(sc->txdtqout);
1908 kprintf("%s: tx%d: out of transmit DTQs\n", sc->sc_dev.dv_xname, chan);
1910 return; /* >>> exit here if out of dtqs <<< */
1914 * it is a go, commit! dequeue mbuf start working on the xfer.
1917 IF_DEQUEUE(&sc->txslot[chan].q, tmp);
1919 if (launch.t != tmp)
1920 panic("en dequeue");
1921 #endif /* EN_DIAG */
1927 EN_COUNT(sc->launch);
1929 IFNET_STAT_INC(ifp, opackets, 1);
1931 if ((launch.atm_flags & EN_OBHDR) == 0) {
1932 EN_COUNT(sc->lheader);
1933 /* store tbd1/tbd2 in host byte order */
1934 launch.tbd1 = MID_TBD_MK1(launch.aal, sc->txspeed[launch.atm_vci], ncells);
1935 launch.tbd2 = MID_TBD_MK2(launch.atm_vci, 0, 0);
1937 if ((launch.atm_flags & EN_OBTRL) == 0 && launch.aal == MID_TBD_AAL5) {
1938 EN_COUNT(sc->ltail);
1939 launch.pdu1 = MID_PDU_MK1(0, 0, datalen); /* host byte order */
1942 en_txlaunch(sc, chan, &launch);
1946 * adjust the top of the mbuf to skip the pseudo atm header
1947 * (and TBD, if present) before passing the packet to bpf,
1948 * restore it afterwards.
1950 int size = sizeof(struct atm_pseudohdr);
1951 if (launch.atm_flags & EN_OBHDR)
1952 size += MID_TBD_SIZE;
1954 launch.t->m_data += size;
1955 launch.t->m_len -= size;
1957 BPF_MTAP(ifp, launch.t);
1959 launch.t->m_data -= size;
1960 launch.t->m_len += size;
1963 * do some housekeeping and get the next packet
1966 sc->txslot[chan].bfree -= launch.need;
1967 IF_ENQUEUE(&sc->txslot[chan].indma, launch.t);
1971 * END of txdma loop!
1979 IF_DEQUEUE(&sc->txslot[chan].q, tmp);
1980 if (launch.t != tmp)
1981 panic("en dequeue drop");
1983 sc->txslot[chan].mbsize -= launch.mlen;
1989 * en_txlaunch: launch an mbuf into the dma pool!
1993 en_txlaunch(struct en_softc *sc, int chan, struct en_launch *l)
1996 u_int32_t cur = sc->txslot[chan].cur,
1997 start = sc->txslot[chan].start,
1998 stop = sc->txslot[chan].stop,
1999 dma, *data, *datastop, count, bcode;
2000 int pad, addtail, need, len, needalign, cnt, end, mx;
2005 * need = # bytes card still needs (decr. to zero)
2006 * len = # of bytes left in current mbuf
2007 * cur = our current pointer
2008 * dma = last place we programmed into the DMA
2009 * data = pointer into data area of mbuf that needs to go next
2010 * cnt = # of bytes to transfer in this DTQ
2011 * bcode/count = DMA burst code, and chip's version of cnt
2013 * a single buffer can require up to 5 DTQs depending on its size
2014 * and alignment requirements. the 5 possible requests are:
2015 * [1] 1, 2, or 3 byte DMA to align src data pointer to word boundary
2016 * [2] alburst DMA to align src data pointer to bestburstlen
2017 * [3] 1 or more bestburstlen DMAs
2018 * [4] clean up burst (to last word boundary)
2019 * [5] 1, 2, or 3 byte final clean up DMA
2024 addtail = (l->atm_flags & EN_OBTRL) == 0; /* add a tail? */
2027 if ((need - MID_TBD_SIZE) % MID_ATMDATASZ)
2028 kprintf("%s: tx%d: bogus trasmit needs (%d)\n", sc->sc_dev.dv_xname, chan,
2032 kprintf("%s: tx%d: launch mbuf %p! cur=0x%x[%d], need=%d, addtail=%d\n",
2033 sc->sc_dev.dv_xname, chan, l->t, cur, (cur-start)/4, need, addtail);
2034 count = EN_READ(sc, MIDX_PLACE(chan));
2035 kprintf(" HW: base_address=0x%x, size=%d, read=%d, descstart=%d\n",
2036 MIDX_BASE(count), MIDX_SZ(count), EN_READ(sc, MIDX_READPTR(chan)),
2037 EN_READ(sc, MIDX_DESCSTART(chan)));
2041 * do we need to insert the TBD by hand?
2042 * note that tbd1/tbd2/pdu1 are in host byte order.
2045 if ((l->atm_flags & EN_OBHDR) == 0) {
2047 kprintf("%s: tx%d: insert header 0x%x 0x%x\n", sc->sc_dev.dv_xname,
2048 chan, l->tbd1, l->tbd2);
2050 EN_WRITE(sc, cur, l->tbd1);
2051 EN_WRAPADD(start, stop, cur, 4);
2052 EN_WRITE(sc, cur, l->tbd2);
2053 EN_WRAPADD(start, stop, cur, 4);
2058 * now do the mbufs...
2061 for (tmp = l->t ; tmp != NULL ; tmp = tmp->m_next) {
2063 /* get pointer to data and length */
2064 data = mtod(tmp, u_int32_t *);
2067 data += sizeof(struct atm_pseudohdr)/sizeof(u_int32_t);
2068 len -= sizeof(struct atm_pseudohdr);
2071 /* now, determine if we should copy it */
2072 if (l->nodma || (len < EN_MINDMA &&
2073 (len % 4) == 0 && ((uintptr_t) (void *) data % 4) == 0 &&
2077 * roundup len: the only time this will change the value of len
2078 * is when l->nodma is true, tmp is the last mbuf, and there is
2079 * a non-word number of bytes to transmit. in this case it is
2080 * safe to round up because we've en_mfix'd the mbuf (so the first
2081 * byte is word aligned there must be enough free bytes at the end
2082 * to round off to the next word boundary)...
2084 len = roundup(len, sizeof(u_int32_t));
2085 datastop = data + (len / sizeof(u_int32_t));
2086 /* copy loop: preserve byte order!!! use WRITEDAT */
2087 while (data != datastop) {
2088 EN_WRITEDAT(sc, cur, *data);
2090 EN_WRAPADD(start, stop, cur, 4);
2094 kprintf("%s: tx%d: copied %d bytes (%d left, cur now 0x%x)\n",
2095 sc->sc_dev.dv_xname, chan, len, need, cur);
2097 continue; /* continue on to next mbuf */
2100 /* going to do DMA, first make sure the dtq is in sync. */
2102 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0, 0, 0);
2104 kprintf("%s: tx%d: dtq_sync: advance pointer to %d\n",
2105 sc->sc_dev.dv_xname, chan, cur);
2110 * if this is the last buffer, and it looks like we are going to need to
2111 * flush the internal buffer, can we extend the length of this mbuf to
2115 if (tmp->m_next == NULL) {
2116 cnt = (need - len) % sizeof(u_int32_t);
2117 if (cnt && M_TRAILINGSPACE(tmp) >= cnt)
2118 len += cnt; /* pad for FLUSH */
2121 #if !defined(MIDWAY_ENIONLY)
2124 * the adaptec DMA engine is smart and handles everything for us.
2127 if (sc->is_adaptec) {
2128 /* need to DMA "len" bytes out to card */
2130 EN_WRAPADD(start, stop, cur, len);
2132 kprintf("%s: tx%d: adp_dma %d bytes (%d left, cur now 0x%x)\n",
2133 sc->sc_dev.dv_xname, chan, len, need, cur);
2135 end = (need == 0) ? MID_DMA_END : 0;
2136 EN_DTQADD(sc, len, chan, 0, vtophys(data), l->mlen, end);
2139 dma = cur; /* update dma pointer */
2142 #endif /* !MIDWAY_ENIONLY */
2144 #if !defined(MIDWAY_ADPONLY)
2147 * the ENI DMA engine is not so smart and need more help from us
2150 /* do we need to do a DMA op to align to word boundary? */
2151 needalign = (uintptr_t) (void *) data % sizeof(u_int32_t);
2153 EN_COUNT(sc->headbyte);
2154 cnt = sizeof(u_int32_t) - needalign;
2155 if (cnt == 2 && len >= cnt) {
2157 bcode = MIDDMA_2BYTE;
2159 cnt = min(cnt, len); /* prevent overflow */
2161 bcode = MIDDMA_BYTE;
2164 EN_WRAPADD(start, stop, cur, cnt);
2166 kprintf("%s: tx%d: small al_dma %d bytes (%d left, cur now 0x%x)\n",
2167 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2170 end = (need == 0) ? MID_DMA_END : 0;
2171 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2174 data = (u_int32_t *) ((u_char *)data + cnt);
2177 /* do we need to do a DMA op to align? */
2179 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0
2180 && len >= sizeof(u_int32_t)) {
2181 cnt = sc->bestburstlen - needalign;
2182 mx = len & ~(sizeof(u_int32_t)-1); /* don't go past end */
2185 count = cnt / sizeof(u_int32_t);
2186 bcode = MIDDMA_WORD;
2188 count = cnt / sizeof(u_int32_t);
2189 bcode = en_dmaplan[count].bcode;
2190 count = cnt >> en_dmaplan[count].divshift;
2193 EN_WRAPADD(start, stop, cur, cnt);
2195 kprintf("%s: tx%d: al_dma %d bytes (%d left, cur now 0x%x)\n",
2196 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2199 end = (need == 0) ? MID_DMA_END : 0;
2200 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2203 data = (u_int32_t *) ((u_char *)data + cnt);
2206 /* do we need to do a max-sized burst? */
2207 if (len >= sc->bestburstlen) {
2208 count = len >> sc->bestburstshift;
2209 cnt = count << sc->bestburstshift;
2210 bcode = sc->bestburstcode;
2212 EN_WRAPADD(start, stop, cur, cnt);
2214 kprintf("%s: tx%d: best_dma %d bytes (%d left, cur now 0x%x)\n",
2215 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2218 end = (need == 0) ? MID_DMA_END : 0;
2219 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2222 data = (u_int32_t *) ((u_char *)data + cnt);
2225 /* do we need to do a cleanup burst? */
2226 cnt = len & ~(sizeof(u_int32_t)-1);
2228 count = cnt / sizeof(u_int32_t);
2229 bcode = en_dmaplan[count].bcode;
2230 count = cnt >> en_dmaplan[count].divshift;
2232 EN_WRAPADD(start, stop, cur, cnt);
2234 kprintf("%s: tx%d: cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2235 sc->sc_dev.dv_xname, chan, cnt, need, cur);
2238 end = (need == 0) ? MID_DMA_END : 0;
2239 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2242 data = (u_int32_t *) ((u_char *)data + cnt);
2245 /* any word fragments left? */
2247 EN_COUNT(sc->tailbyte);
2250 bcode = MIDDMA_2BYTE; /* use 2byte mode */
2253 bcode = MIDDMA_BYTE; /* use 1 byte mode */
2256 EN_WRAPADD(start, stop, cur, len);
2258 kprintf("%s: tx%d: byte cleanup_dma %d bytes (%d left, cur now 0x%x)\n",
2259 sc->sc_dev.dv_xname, chan, len, need, cur);
2261 end = (need == 0) ? MID_DMA_END : 0;
2262 EN_DTQADD(sc, count, chan, bcode, vtophys(data), l->mlen, end);
2267 dma = cur; /* update dma pointer */
2268 #endif /* !MIDWAY_ADPONLY */
2270 } /* next mbuf, please */
2273 * all mbuf data has been copied out to the obmem (or set up to be DMAd).
2274 * if the trailer or padding needs to be put in, do it now.
2276 * NOTE: experimental results reveal the following fact:
2277 * if you DMA "X" bytes to the card, where X is not a multiple of 4,
2278 * then the card will internally buffer the last (X % 4) bytes (in
2279 * hopes of getting (4 - (X % 4)) more bytes to make a complete word).
2280 * it is imporant to make sure we don't leave any important data in
2281 * this internal buffer because it is discarded on the last (end) DTQ.
2282 * one way to do this is to DMA in (4 - (X % 4)) more bytes to flush
2283 * the darn thing out.
2288 pad = need % sizeof(u_int32_t);
2291 * FLUSH internal data buffer. pad out with random data from the front
2292 * of the mbuf chain...
2294 bcode = (sc->is_adaptec) ? 0 : MIDDMA_BYTE;
2295 EN_COUNT(sc->tailflush);
2296 EN_WRAPADD(start, stop, cur, pad);
2297 EN_DTQADD(sc, pad, chan, bcode, vtophys(l->t->m_data), 0, 0);
2300 kprintf("%s: tx%d: pad/FLUSH dma %d bytes (%d left, cur now 0x%x)\n",
2301 sc->sc_dev.dv_xname, chan, pad, need, cur);
2306 pad = need / sizeof(u_int32_t); /* round *down* */
2307 if (l->aal == MID_TBD_AAL5)
2310 kprintf("%s: tx%d: padding %d bytes (cur now 0x%x)\n",
2311 sc->sc_dev.dv_xname, chan, pad * sizeof(u_int32_t), cur);
2314 EN_WRITEDAT(sc, cur, 0); /* no byte order issues with zero */
2315 EN_WRAPADD(start, stop, cur, 4);
2317 if (l->aal == MID_TBD_AAL5) {
2318 EN_WRITE(sc, cur, l->pdu1); /* in host byte order */
2319 EN_WRAPADD(start, stop, cur, 8);
2323 if (addtail || dma != cur) {
2324 /* write final descritor */
2325 EN_DTQADD(sc, WORD_IDX(start,cur), chan, MIDDMA_JK, 0,
2326 l->mlen, MID_DMA_END);
2327 /* dma = cur; */ /* not necessary since we are done */
2331 /* update current pointer */
2332 sc->txslot[chan].cur = cur;
2334 kprintf("%s: tx%d: DONE! cur now = 0x%x\n",
2335 sc->sc_dev.dv_xname, chan, cur);
2349 struct en_softc *sc = (struct en_softc *) arg;
2351 struct atm_pseudohdr ah;
2353 u_int32_t reg, kick, val, mask, chip, vci, slot, dtq, drq;
2354 int lcv, idx, need_softserv = 0;
2356 reg = EN_READ(sc, MID_INTACK);
2358 if ((reg & MID_INT_ANY) == 0)
2359 EN_INTR_RET(0); /* not us */
2362 kprintf("%s: interrupt=0x%b\n", sc->sc_dev.dv_xname, reg, MID_INTBITS);
2366 * unexpected errors that need a reset
2369 if ((reg & (MID_INT_IDENT|MID_INT_LERR|MID_INT_DMA_ERR|MID_INT_SUNI)) != 0) {
2370 kprintf("%s: unexpected interrupt=0x%b, resetting card\n",
2371 sc->sc_dev.dv_xname, reg, MID_INTBITS);
2374 Debugger("en: unexpected error");
2376 sc->enif.if_flags &= ~IFF_RUNNING; /* FREEZE! */
2381 EN_INTR_RET(1); /* for us */
2384 /*******************
2388 kick = 0; /* bitmask of channels to kick */
2389 if (reg & MID_INT_TX) { /* TX done! */
2392 * check for tx complete, if detected then this means that some space
2393 * has come free on the card. we must account for it and arrange to
2394 * kick the channel to life (in case it is stalled waiting on the card).
2396 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2397 if (reg & MID_TXCHAN(lcv)) {
2398 kick = kick | mask; /* want to kick later */
2399 val = EN_READ(sc, MIDX_READPTR(lcv)); /* current read pointer */
2400 val = (val * sizeof(u_int32_t)) + sc->txslot[lcv].start;
2401 /* convert to offset */
2402 if (val > sc->txslot[lcv].cur)
2403 sc->txslot[lcv].bfree = val - sc->txslot[lcv].cur;
2405 sc->txslot[lcv].bfree = (val + (EN_TXSZ*1024)) - sc->txslot[lcv].cur;
2407 kprintf("%s: tx%d: trasmit done. %d bytes now free in buffer\n",
2408 sc->sc_dev.dv_xname, lcv, sc->txslot[lcv].bfree);
2414 if (reg & MID_INT_DMA_TX) { /* TX DMA done! */
2417 * check for TX DMA complete, if detected then this means that some DTQs
2418 * are now free. it also means some indma mbufs can be freed.
2419 * if we needed DTQs, kick all channels.
2421 val = EN_READ(sc, MID_DMA_RDTX); /* chip's current location */
2422 idx = MID_DTQ_A2REG(sc->dtq_chip);/* where we last saw chip */
2423 if (sc->need_dtqs) {
2424 kick = MID_NTX_CH - 1; /* assume power of 2, kick all! */
2425 sc->need_dtqs = 0; /* recalculated in "kick" loop below */
2427 kprintf("%s: cleared need DTQ condition\n", sc->sc_dev.dv_xname);
2430 while (idx != val) {
2432 if ((dtq = sc->dtq[idx]) != 0) {
2433 sc->dtq[idx] = 0; /* don't forget to zero it out when done */
2434 slot = EN_DQ_SLOT(dtq);
2435 IF_DEQUEUE(&sc->txslot[slot].indma, m);
2436 if (!m) panic("enintr: dtqsync");
2437 sc->txslot[slot].mbsize -= EN_DQ_LEN(dtq);
2439 kprintf("%s: tx%d: free %d dma bytes, mbsize now %d\n",
2440 sc->sc_dev.dv_xname, slot, EN_DQ_LEN(dtq),
2441 sc->txslot[slot].mbsize);
2445 EN_WRAPADD(0, MID_DTQ_N, idx, 1);
2447 sc->dtq_chip = MID_DTQ_REG2A(val); /* sync softc */
2452 * kick xmit channels as needed
2457 kprintf("%s: tx kick mask = 0x%x\n", sc->sc_dev.dv_xname, kick);
2459 for (mask = 1, lcv = 0 ; lcv < EN_NTX ; lcv++, mask = mask * 2) {
2460 if ((kick & mask) && sc->txslot[lcv].q.ifq_head) {
2461 en_txdma(sc, lcv); /* kick it! */
2463 } /* for each slot */
2467 /*******************
2472 * check for RX DMA complete, and pass the data "upstairs"
2475 if (reg & MID_INT_DMA_RX) {
2476 val = EN_READ(sc, MID_DMA_RDRX); /* chip's current location */
2477 idx = MID_DRQ_A2REG(sc->drq_chip);/* where we last saw chip */
2478 while (idx != val) {
2480 if ((drq = sc->drq[idx]) != 0) {
2481 sc->drq[idx] = 0; /* don't forget to zero it out when done */
2482 slot = EN_DQ_SLOT(drq);
2483 if (EN_DQ_LEN(drq) == 0) { /* "JK" trash DMA? */
2486 IF_DEQUEUE(&sc->rxslot[slot].indma, m);
2488 panic("enintr: drqsync: %s: lost mbuf in slot %d!",
2489 sc->sc_dev.dv_xname, slot);
2491 /* do something with this mbuf */
2492 if (sc->rxslot[slot].oth_flags & ENOTHER_DRAIN) { /* drain? */
2495 vci = sc->rxslot[slot].atm_vci;
2496 if (sc->rxslot[slot].indma.ifq_head == NULL &&
2497 sc->rxslot[slot].q.ifq_head == NULL &&
2498 (EN_READ(sc, MID_VC(vci)) & MIDV_INSERVICE) == 0 &&
2499 (sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2500 sc->rxslot[slot].oth_flags = ENOTHER_FREE; /* done drain */
2501 sc->rxslot[slot].atm_vci = RX_NONE;
2502 sc->rxvc2slot[vci] = RX_NONE;
2504 kprintf("%s: rx%d: VCI %d now free\n", sc->sc_dev.dv_xname,
2508 } else if (m != NULL) {
2509 ATM_PH_FLAGS(&ah) = sc->rxslot[slot].atm_flags;
2510 ATM_PH_VPI(&ah) = 0;
2511 ATM_PH_SETVCI(&ah, sc->rxslot[slot].atm_vci);
2513 kprintf("%s: rx%d: rxvci%d: atm_input, mbuf %p, len %d, hand %p\n",
2514 sc->sc_dev.dv_xname, slot, sc->rxslot[slot].atm_vci, m,
2515 EN_DQ_LEN(drq), sc->rxslot[slot].rxhand);
2519 IFNET_STAT_INC(ifp, ipackets, 1);
2523 atm_input(ifp, &ah, m, sc->rxslot[slot].rxhand);
2527 EN_WRAPADD(0, MID_DRQ_N, idx, 1);
2529 sc->drq_chip = MID_DRQ_REG2A(val); /* sync softc */
2531 if (sc->need_drqs) { /* true if we had a DRQ shortage */
2535 kprintf("%s: cleared need DRQ condition\n", sc->sc_dev.dv_xname);
2541 * handle service interrupts
2544 if (reg & MID_INT_SERVICE) {
2545 chip = MID_SL_REG2A(EN_READ(sc, MID_SERV_WRITE));
2547 while (sc->hwslistp != chip) {
2549 /* fetch and remove it from hardware service list */
2550 vci = EN_READ(sc, sc->hwslistp);
2551 EN_WRAPADD(MID_SLOFF, MID_SLEND, sc->hwslistp, 4);/* advance hw ptr */
2552 slot = sc->rxvc2slot[vci];
2553 if (slot == RX_NONE) {
2555 kprintf("%s: unexpected rx interrupt on VCI %d\n",
2556 sc->sc_dev.dv_xname, vci);
2558 EN_WRITE(sc, MID_VC(vci), MIDV_TRASH); /* rx off, damn it! */
2559 continue; /* next */
2561 EN_WRITE(sc, MID_VC(vci), sc->rxslot[slot].mode); /* remove from hwsl */
2562 EN_COUNT(sc->hwpull);
2565 kprintf("%s: pulled VCI %d off hwslist\n", sc->sc_dev.dv_xname, vci);
2568 /* add it to the software service list (if needed) */
2569 if ((sc->rxslot[slot].oth_flags & ENOTHER_SWSL) == 0) {
2570 EN_COUNT(sc->swadd);
2572 sc->rxslot[slot].oth_flags |= ENOTHER_SWSL;
2573 sc->swslist[sc->swsl_tail] = slot;
2574 EN_WRAPADD(0, MID_SL_N, sc->swsl_tail, 1);
2577 kprintf("%s: added VCI %d to swslist\n", sc->sc_dev.dv_xname, vci);
2584 * now service (function too big to include here)
2594 if (reg & MID_INT_DMA_OVR) {
2595 EN_COUNT(sc->dmaovr);
2597 kprintf("%s: MID_INT_DMA_OVR\n", sc->sc_dev.dv_xname);
2600 reg = EN_READ(sc, MID_STAT);
2602 sc->otrash += MID_OTRASH(reg);
2603 sc->vtrash += MID_VTRASH(reg);
2606 EN_INTR_RET(1); /* for us */
2611 * en_service: handle a service interrupt
2613 * Q: why do we need a software service list?
2615 * A: if we remove a VCI from the hardware list and we find that we are
2616 * out of DRQs we must defer processing until some DRQs become free.
2617 * so we must remember to look at this RX VCI/slot later, but we can't
2618 * put it back on the hardware service list (since that isn't allowed).
2619 * so we instead save it on the software service list. it would be nice
2620 * if we could peek at the VCI on top of the hwservice list without removing
2621 * it, however this leads to a race condition: if we peek at it and
2622 * decide we are done with it new data could come in before we have a
2623 * chance to remove it from the hwslist. by the time we get it out of
2624 * the list the interrupt for the new data will be lost. oops!
2629 en_service(struct en_softc *sc)
2631 struct mbuf *m, *tmp;
2632 u_int32_t cur, dstart, rbd, pdu, *sav, dma, bcode, count, *data, *datastop;
2633 u_int32_t start, stop, cnt, needalign;
2634 int slot, raw, aal5, vci, fill, mlen, tlen, drqneed, need, needfill, end;
2636 aal5 = 0; /* Silence gcc */
2638 if (sc->swsl_size == 0) {
2640 kprintf("%s: en_service done\n", sc->sc_dev.dv_xname);
2642 return; /* >>> exit here if swsl now empty <<< */
2646 * get slot/vci to service
2649 slot = sc->swslist[sc->swsl_head];
2650 vci = sc->rxslot[slot].atm_vci;
2652 if (sc->rxvc2slot[vci] != slot) panic("en_service rx slot/vci sync");
2656 * determine our mode and if we've got any work to do
2659 raw = sc->rxslot[slot].oth_flags & ENOTHER_RAW;
2660 start= sc->rxslot[slot].start;
2661 stop= sc->rxslot[slot].stop;
2662 cur = sc->rxslot[slot].cur;
2665 kprintf("%s: rx%d: service vci=%d raw=%d start/stop/cur=0x%x 0x%x 0x%x\n",
2666 sc->sc_dev.dv_xname, slot, vci, raw, start, stop, cur);
2670 dstart = MIDV_DSTART(EN_READ(sc, MID_DST_RP(vci)));
2671 dstart = (dstart * sizeof(u_int32_t)) + start;
2673 /* check to see if there is any data at all */
2674 if (dstart == cur) {
2675 defer: /* defer processing */
2676 EN_WRAPADD(0, MID_SL_N, sc->swsl_head, 1);
2677 sc->rxslot[slot].oth_flags &= ~ENOTHER_SWSL;
2679 /* >>> remove from swslist <<< */
2681 kprintf("%s: rx%d: remove vci %d from swslist\n",
2682 sc->sc_dev.dv_xname, slot, vci);
2688 * figure out how many bytes we need
2689 * [mlen = # bytes to go in mbufs, fill = # bytes to dump (MIDDMA_JK)]
2694 /* raw mode (aka boodi mode) */
2697 mlen = dstart - cur;
2699 mlen = (dstart + (EN_RXSZ*1024)) - cur;
2701 if (mlen < sc->rxslot[slot].raw_threshold)
2702 goto defer; /* too little data to deal with */
2707 aal5 = (sc->rxslot[slot].atm_flags & ATM_PH_AAL5);
2708 rbd = EN_READ(sc, cur);
2709 if (MID_RBD_ID(rbd) != MID_RBD_STDID)
2710 panic("en_service: id mismatch");
2712 if (rbd & MID_RBD_T) {
2713 mlen = 0; /* we've got trash */
2714 fill = MID_RBD_SIZE;
2715 EN_COUNT(sc->ttrash);
2717 kprintf("RX overflow lost %d cells!\n", MID_RBD_CNT(rbd));
2720 mlen = MID_RBD_SIZE + MID_CHDR_SIZE + MID_ATMDATASZ; /* 1 cell (ick!) */
2725 tlen = (MID_RBD_CNT(rbd) * MID_ATMDATASZ) + MID_RBD_SIZE;
2726 pdu = cur + tlen - MID_PDU_SIZE;
2728 pdu -= (EN_RXSZ*1024);
2729 pdu = EN_READ(sc, pdu); /* get PDU in correct byte order */
2730 fill = tlen - MID_RBD_SIZE - MID_PDU_LEN(pdu);
2731 if (fill < 0 || (rbd & MID_RBD_CRCERR) != 0) {
2732 static int first = 1;
2735 kprintf("%s: %s, dropping frame\n", sc->sc_dev.dv_xname,
2736 (rbd & MID_RBD_CRCERR) ?
2737 "CRC error" : "invalid AAL5 PDU length");
2738 kprintf("%s: got %d cells (%d bytes), AAL5 len is %d bytes (pdu=0x%x)\n",
2739 sc->sc_dev.dv_xname, MID_RBD_CNT(rbd),
2740 tlen - MID_RBD_SIZE, MID_PDU_LEN(pdu), pdu);
2742 kprintf("CRC error report disabled from now on!\n");
2749 IFNET_STAT_INC(ifp, ierrors, 1);
2758 * now allocate mbufs for mlen bytes of data, if out of mbufs, trash all
2761 * 1. it is possible that we've already allocated an mbuf for this pkt
2762 * but ran out of DRQs, in which case we saved the allocated mbuf on
2764 * 2. if we save an mbuf in "q" we store the "cur" (pointer) in the front
2765 * of the mbuf as an identity (that we can check later), and we also
2766 * store drqneed (so we don't have to recompute it).
2767 * 3. after this block of code, if m is still NULL then we ran out of mbufs
2770 m = sc->rxslot[slot].q.ifq_head;
2773 sav = mtod(m, u_int32_t *);
2774 if (sav[0] != cur) {
2776 kprintf("%s: rx%d: q'ed mbuf %p not ours\n",
2777 sc->sc_dev.dv_xname, slot, m);
2779 m = NULL; /* wasn't ours */
2780 EN_COUNT(sc->rxqnotus);
2782 EN_COUNT(sc->rxqus);
2783 IF_DEQUEUE(&sc->rxslot[slot].q, m);
2786 kprintf("%s: rx%d: recovered q'ed mbuf %p (drqneed=%d)\n",
2787 sc->sc_dev.dv_xname, slot, m, drqneed);
2792 if (mlen != 0 && m == NULL) {
2793 m = en_mget(sc, mlen, &drqneed); /* allocate! */
2797 EN_COUNT(sc->rxmbufout);
2799 kprintf("%s: rx%d: out of mbufs\n", sc->sc_dev.dv_xname, slot);
2803 kprintf("%s: rx%d: allocate mbuf %p, mlen=%d, drqneed=%d\n",
2804 sc->sc_dev.dv_xname, slot, m, mlen, drqneed);
2809 kprintf("%s: rx%d: VCI %d, mbuf_chain %p, mlen %d, fill %d\n",
2810 sc->sc_dev.dv_xname, slot, vci, m, mlen, fill);
2814 * now check to see if we've got the DRQs needed. if we are out of
2815 * DRQs we must quit (saving our mbuf, if we've got one).
2818 needfill = (fill) ? 1 : 0;
2819 if (drqneed + needfill > sc->drq_free) {
2820 sc->need_drqs = 1; /* flag condition */
2822 EN_COUNT(sc->rxoutboth);
2824 kprintf("%s: rx%d: out of DRQs *and* mbufs!\n", sc->sc_dev.dv_xname, slot);
2826 return; /* >>> exit here if out of both mbufs and DRQs <<< */
2828 sav = mtod(m, u_int32_t *);
2831 IF_ENQUEUE(&sc->rxslot[slot].q, m);
2832 EN_COUNT(sc->rxdrqout);
2834 kprintf("%s: rx%d: out of DRQs\n", sc->sc_dev.dv_xname, slot);
2836 return; /* >>> exit here if out of DRQs <<< */
2840 * at this point all resources have been allocated and we are commited
2841 * to servicing this slot.
2843 * dma = last location we told chip about
2844 * cur = current location
2845 * mlen = space in the mbuf we want
2846 * need = bytes to xfer in (decrs to zero)
2847 * fill = how much fill we need
2848 * tlen = how much data to transfer to this mbuf
2849 * cnt/bcode/count = <same as xmit>
2851 * 'needfill' not used after this point
2854 dma = cur; /* dma = last location we told chip about */
2855 need = roundup(mlen, sizeof(u_int32_t));
2856 fill = fill - (need - mlen); /* note: may invalidate 'needfill' */
2858 for (tmp = m ; tmp != NULL && need > 0 ; tmp = tmp->m_next) {
2859 tlen = roundup(tmp->m_len, sizeof(u_int32_t)); /* m_len set by en_mget */
2860 data = mtod(tmp, u_int32_t *);
2863 kprintf("%s: rx%d: load mbuf %p, m_len=%d, m_data=%p, tlen=%d\n",
2864 sc->sc_dev.dv_xname, slot, tmp, tmp->m_len, tmp->m_data, tlen);
2868 if (EN_NORXDMA || !en_dma || tlen < EN_MINDMA) {
2869 datastop = (u_int32_t *)((u_char *) data + tlen);
2870 /* copy loop: preserve byte order!!! use READDAT */
2871 while (data != datastop) {
2872 *data = EN_READDAT(sc, cur);
2874 EN_WRAPADD(start, stop, cur, 4);
2878 kprintf("%s: rx%d: vci%d: copied %d bytes (%d left)\n",
2879 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2884 /* DMA data (check to see if we need to sync DRQ first) */
2886 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, 0, 0, 0);
2888 kprintf("%s: rx%d: vci%d: drq_sync: advance pointer to %d\n",
2889 sc->sc_dev.dv_xname, slot, vci, cur);
2893 #if !defined(MIDWAY_ENIONLY)
2896 * the adaptec DMA engine is smart and handles everything for us.
2899 if (sc->is_adaptec) {
2901 EN_WRAPADD(start, stop, cur, tlen);
2903 kprintf("%s: rx%d: vci%d: adp_dma %d bytes (%d left)\n",
2904 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2906 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2907 EN_DRQADD(sc, tlen, vci, 0, vtophys(data), mlen, slot, end);
2910 dma = cur; /* update dma pointer */
2913 #endif /* !MIDWAY_ENIONLY */
2916 #if !defined(MIDWAY_ADPONLY)
2919 * the ENI DMA engine is not so smart and need more help from us
2922 /* do we need to do a DMA op to align? */
2924 (needalign = (((uintptr_t) (void *) data) & sc->bestburstmask)) != 0) {
2925 cnt = sc->bestburstlen - needalign;
2928 count = cnt / sizeof(u_int32_t);
2929 bcode = MIDDMA_WORD;
2931 count = cnt / sizeof(u_int32_t);
2932 bcode = en_dmaplan[count].bcode;
2933 count = cnt >> en_dmaplan[count].divshift;
2936 EN_WRAPADD(start, stop, cur, cnt);
2938 kprintf("%s: rx%d: vci%d: al_dma %d bytes (%d left)\n",
2939 sc->sc_dev.dv_xname, slot, vci, cnt, need);
2942 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2943 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2946 data = (u_int32_t *)((u_char *) data + cnt);
2949 /* do we need a max-sized burst? */
2950 if (tlen >= sc->bestburstlen) {
2951 count = tlen >> sc->bestburstshift;
2952 cnt = count << sc->bestburstshift;
2953 bcode = sc->bestburstcode;
2955 EN_WRAPADD(start, stop, cur, cnt);
2957 kprintf("%s: rx%d: vci%d: best_dma %d bytes (%d left)\n",
2958 sc->sc_dev.dv_xname, slot, vci, cnt, need);
2961 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2962 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2965 data = (u_int32_t *)((u_char *) data + cnt);
2968 /* do we need to do a cleanup burst? */
2970 count = tlen / sizeof(u_int32_t);
2971 bcode = en_dmaplan[count].bcode;
2972 count = tlen >> en_dmaplan[count].divshift;
2974 EN_WRAPADD(start, stop, cur, tlen);
2976 kprintf("%s: rx%d: vci%d: cleanup_dma %d bytes (%d left)\n",
2977 sc->sc_dev.dv_xname, slot, vci, tlen, need);
2979 end = (need == 0 && !fill) ? MID_DMA_END : 0;
2980 EN_DRQADD(sc, count, vci, bcode, vtophys(data), mlen, slot, end);
2985 dma = cur; /* update dma pointer */
2987 #endif /* !MIDWAY_ADPONLY */
2992 if (fill || dma != cur) {
2995 kprintf("%s: rx%d: vci%d: skipping %d bytes of fill\n",
2996 sc->sc_dev.dv_xname, slot, vci, fill);
2998 kprintf("%s: rx%d: vci%d: syncing chip from 0x%x to 0x%x [cur]\n",
2999 sc->sc_dev.dv_xname, slot, vci, dma, cur);
3001 EN_WRAPADD(start, stop, cur, fill);
3002 EN_DRQADD(sc, WORD_IDX(start,cur), vci, MIDDMA_JK, 0, mlen,
3004 /* dma = cur; */ /* not necessary since we are done */
3008 * done, remove stuff we don't want to pass up:
3009 * raw mode (boodi mode): pass everything up for later processing
3011 * aal0: remove RBD + cell header
3018 if (!aal5) cnt += MID_CHDR_SIZE;
3019 m->m_len -= cnt; /* chop! */
3020 m->m_pkthdr.len -= cnt;
3023 IF_ENQUEUE(&sc->rxslot[slot].indma, m);
3025 sc->rxslot[slot].cur = cur; /* update master copy of 'cur' */
3028 kprintf("%s: rx%d: vci%d: DONE! cur now =0x%x\n",
3029 sc->sc_dev.dv_xname, slot, vci, cur);
3032 goto same_vci; /* get next packet in this slot */
3038 * functions we can call from ddb
3042 * en_dump: dump the state
3045 #define END_SWSL 0x00000040 /* swsl state */
3046 #define END_DRQ 0x00000020 /* drq state */
3047 #define END_DTQ 0x00000010 /* dtq state */
3048 #define END_RX 0x00000008 /* rx state */
3049 #define END_TX 0x00000004 /* tx state */
3050 #define END_MREGS 0x00000002 /* registers */
3051 #define END_STATS 0x00000001 /* dump stats */
3053 #define END_BITS "\20\7SWSL\6DRQ\5DTQ\4RX\3TX\2MREGS\1STATS"
3055 /* Do not staticize - meant for calling from DDB! */
3057 en_dump(int unit, int level)
3059 struct en_softc *sc;
3063 for (lcv = 0 ; lcv < en_cd.cd_ndevs ; lcv++) {
3064 sc = (struct en_softc *) en_cd.cd_devs[lcv];
3065 if (sc == NULL) continue;
3066 if (unit != -1 && unit != lcv)
3069 kprintf("dumping device %s at level 0x%b\n", sc->sc_dev.dv_xname, level,
3072 if (sc->dtq_us == 0) {
3073 kprintf("<hasn't been en_init'd yet>\n");
3077 if (level & END_STATS) {
3078 kprintf(" en_stats:\n");
3079 kprintf(" %d mfix (%d failed); %d/%d head/tail byte DMAs, %d flushes\n",
3080 sc->mfix, sc->mfixfail, sc->headbyte, sc->tailbyte, sc->tailflush);
3081 kprintf(" %d rx dma overflow interrupts\n", sc->dmaovr);
3082 kprintf(" %d times we ran out of TX space and stalled\n",
3084 kprintf(" %d times we ran out of DTQs\n", sc->txdtqout);
3085 kprintf(" %d times we launched a packet\n", sc->launch);
3086 kprintf(" %d times we launched without on-board header\n", sc->lheader);
3087 kprintf(" %d times we launched without on-board tail\n", sc->ltail);
3088 kprintf(" %d times we pulled the hw service list\n", sc->hwpull);
3089 kprintf(" %d times we pushed a vci on the sw service list\n",
3091 kprintf(" %d times RX pulled an mbuf from Q that wasn't ours\n",
3093 kprintf(" %d times RX pulled a good mbuf from Q\n", sc->rxqus);
3094 kprintf(" %d times we ran out of mbufs *and* DRQs\n", sc->rxoutboth);
3095 kprintf(" %d times we ran out of DRQs\n", sc->rxdrqout);
3097 kprintf(" %d trasmit packets dropped due to mbsize\n", sc->txmbovr);
3098 kprintf(" %d cells trashed due to turned off rxvc\n", sc->vtrash);
3099 kprintf(" %d cells trashed due to totally full buffer\n", sc->otrash);
3100 kprintf(" %d cells trashed due almost full buffer\n", sc->ttrash);
3101 kprintf(" %d rx mbuf allocation failures\n", sc->rxmbufout);
3102 #if defined(NATM) && defined(NATM_STAT)
3103 kprintf(" natmintr so_rcv: ok/drop cnt: %d/%d, ok/drop bytes: %d/%d\n",
3104 natm_sookcnt, natm_sodropcnt, natm_sookbytes, natm_sodropbytes);
3108 if (level & END_MREGS) {
3109 kprintf("mregs:\n");
3110 kprintf("resid = 0x%lx\n", (u_long)EN_READ(sc, MID_RESID));
3111 kprintf("interrupt status = 0x%b\n",
3112 (int)EN_READ(sc, MID_INTSTAT), MID_INTBITS);
3113 kprintf("interrupt enable = 0x%b\n",
3114 (int)EN_READ(sc, MID_INTENA), MID_INTBITS);
3115 kprintf("mcsr = 0x%b\n", (int)EN_READ(sc, MID_MAST_CSR), MID_MCSRBITS);
3116 kprintf("serv_write = [chip=%ld] [us=%d]\n",
3117 (long)EN_READ(sc, MID_SERV_WRITE),
3118 MID_SL_A2REG(sc->hwslistp));
3119 kprintf("dma addr = 0x%lx\n", (u_long)EN_READ(sc, MID_DMA_ADDR));
3120 kprintf("DRQ: chip[rd=0x%lx,wr=0x%lx], sc[chip=0x%x,us=0x%x]\n",
3121 (u_long)MID_DRQ_REG2A(EN_READ(sc, MID_DMA_RDRX)),
3122 (u_long)MID_DRQ_REG2A(EN_READ(sc, MID_DMA_WRRX)),
3123 sc->drq_chip, sc->drq_us);
3124 kprintf("DTQ: chip[rd=0x%lx,wr=0x%lx], sc[chip=0x%x,us=0x%x]\n",
3125 (u_long)MID_DTQ_REG2A(EN_READ(sc, MID_DMA_RDTX)),
3126 (u_long)MID_DTQ_REG2A(EN_READ(sc, MID_DMA_WRTX)),
3127 sc->dtq_chip, sc->dtq_us);
3129 kprintf(" unusual txspeeds: ");
3130 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3131 if (sc->txspeed[cnt])
3132 kprintf(" vci%d=0x%x", cnt, sc->txspeed[cnt]);
3135 kprintf(" rxvc slot mappings: ");
3136 for (cnt = 0 ; cnt < MID_N_VC ; cnt++)
3137 if (sc->rxvc2slot[cnt] != RX_NONE)
3138 kprintf(" %d->%d", cnt, sc->rxvc2slot[cnt]);
3143 if (level & END_TX) {
3145 for (slot = 0 ; slot < EN_NTX; slot++) {
3146 kprintf("tx%d: start/stop/cur=0x%x/0x%x/0x%x [%d] ", slot,
3147 sc->txslot[slot].start, sc->txslot[slot].stop, sc->txslot[slot].cur,
3148 (sc->txslot[slot].cur - sc->txslot[slot].start)/4);
3149 kprintf("mbsize=%d, bfree=%d\n", sc->txslot[slot].mbsize,
3150 sc->txslot[slot].bfree);
3151 kprintf("txhw: base_address=0x%lx, size=%ld, read=%ld, descstart=%ld\n",
3152 (u_long)MIDX_BASE(EN_READ(sc, MIDX_PLACE(slot))),
3153 (u_long)MIDX_SZ(EN_READ(sc, MIDX_PLACE(slot))),
3154 (long)EN_READ(sc, MIDX_READPTR(slot)),
3155 (long)EN_READ(sc, MIDX_DESCSTART(slot)));
3159 if (level & END_RX) {
3160 kprintf(" recv slots:\n");
3161 for (slot = 0 ; slot < sc->en_nrx; slot++) {
3162 kprintf("rx%d: vci=%d: start/stop/cur=0x%x/0x%x/0x%x ", slot,
3163 sc->rxslot[slot].atm_vci, sc->rxslot[slot].start,
3164 sc->rxslot[slot].stop, sc->rxslot[slot].cur);
3165 kprintf("mode=0x%x, atm_flags=0x%x, oth_flags=0x%x\n",
3166 sc->rxslot[slot].mode, sc->rxslot[slot].atm_flags,
3167 sc->rxslot[slot].oth_flags);
3168 kprintf("RXHW: mode=0x%lx, DST_RP=0x%lx, WP_ST_CNT=0x%lx\n",
3169 (u_long)EN_READ(sc, MID_VC(sc->rxslot[slot].atm_vci)),
3170 (u_long)EN_READ(sc, MID_DST_RP(sc->rxslot[slot].atm_vci)),
3171 (u_long)EN_READ(sc, MID_WP_ST_CNT(sc->rxslot[slot].atm_vci)));
3175 if (level & END_DTQ) {
3176 kprintf(" dtq [need_dtqs=%d,dtq_free=%d]:\n",
3177 sc->need_dtqs, sc->dtq_free);
3179 while (ptr != sc->dtq_us) {
3180 reg = EN_READ(sc, ptr);
3181 kprintf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%lx]\n",
3182 sc->dtq[MID_DTQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_TXCHAN(reg),
3183 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg),
3184 (u_long)EN_READ(sc, ptr+4));
3185 EN_WRAPADD(MID_DTQOFF, MID_DTQEND, ptr, 8);
3189 if (level & END_DRQ) {
3190 kprintf(" drq [need_drqs=%d,drq_free=%d]:\n",
3191 sc->need_drqs, sc->drq_free);
3193 while (ptr != sc->drq_us) {
3194 reg = EN_READ(sc, ptr);
3195 kprintf("\t0x%x=[cnt=%d, chan=%d, end=%d, type=%d @ 0x%lx]\n",
3196 sc->drq[MID_DRQ_A2REG(ptr)], MID_DMA_CNT(reg), MID_DMA_RXVCI(reg),
3197 (reg & MID_DMA_END) != 0, MID_DMA_TYPE(reg),
3198 (u_long)EN_READ(sc, ptr+4));
3199 EN_WRAPADD(MID_DRQOFF, MID_DRQEND, ptr, 8);
3203 if (level & END_SWSL) {
3204 kprintf(" swslist [size=%d]: ", sc->swsl_size);
3205 for (cnt = sc->swsl_head ; cnt != sc->swsl_tail ;
3206 cnt = (cnt + 1) % MID_SL_N)
3207 kprintf("0x%x ", sc->swslist[cnt]);
3215 * en_dumpmem: dump the memory
3218 /* Do not staticize - meant for calling from DDB! */
3220 en_dumpmem(int unit, int addr, int len)
3222 struct en_softc *sc;
3225 if (unit < 0 || unit > en_cd.cd_ndevs ||
3226 (sc = (struct en_softc *) en_cd.cd_devs[unit]) == NULL) {
3227 kprintf("invalid unit number: %d\n", unit);
3231 if (addr < MID_RAMOFF || addr + len*4 > MID_MAXOFF || len <= 0) {
3232 kprintf("invalid addr/len number: %d, %d\n", addr, len);
3235 kprintf("dumping %d words starting at offset 0x%x\n", len, addr);
3237 reg = EN_READ(sc, addr);
3238 kprintf("mem[0x%x] = 0x%x\n", addr, reg);