2 * Copyright (c) 1994-2000
3 * Paul Richards. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * verbatim and that no modifications are made prior to this
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The name Paul Richards may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * $FreeBSD: src/sys/dev/lnc/if_lnc.c,v 1.89 2001/07/04 13:00:19 nyan Exp $
31 * $DragonFly: src/sys/dev/netif/lnc/Attic/if_lnc.c,v 1.6 2003/12/07 19:23:39 dillon Exp $
40 * Check all the XXX comments -- some of them are just things I've left
41 * unfinished rather than "difficult" problems that were hacked around.
45 * Check how all the arpcom flags get set and used.
47 * Re-inline and re-static all routines after debugging.
49 * Remember to assign iobase in SHMEM probe routines.
51 * Replace all occurences of LANCE-controller-card etc in prints by the name
52 * strings of the appropriate type -- nifty window dressing
54 * Add DEPCA support -- mostly done.
60 /* Some defines that should really be in generic locations */
62 #define MULTICAST_FILTER_LEN 8
64 #include <sys/param.h>
65 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/malloc.h>
70 #include <sys/module.h>
71 #include <sys/socket.h>
72 #include <sys/sockio.h>
73 #include <sys/syslog.h>
75 #include <machine/bus.h>
76 #include <machine/resource.h>
79 #include <net/ethernet.h>
81 #include <net/if_dl.h>
82 #include <net/if_types.h>
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
89 #include <machine/md_var.h>
91 #include <dev/netif/lnc/if_lncvar.h>
92 #include <dev/netif/lnc/if_lncreg.h>
94 DECLARE_DUMMY_MODULE(if_lnc);
96 devclass_t lnc_devclass;
98 static char const * const nic_ident[] = {
103 "CNET98S", /* PC-98 */
106 static char const * const ic_ident[] = {
121 static void lnc_setladrf(struct lnc_softc *sc);
122 static void lnc_reset(struct lnc_softc *sc);
123 static void lnc_free_mbufs(struct lnc_softc *sc);
124 static __inline int alloc_mbuf_cluster(struct lnc_softc *sc,
125 struct host_ring_entry *desc);
126 static __inline struct mbuf *chain_mbufs(struct lnc_softc *sc,
129 static __inline struct mbuf *mbuf_packet(struct lnc_softc *sc,
132 static __inline void lnc_rint(struct lnc_softc *sc);
133 static __inline void lnc_tint(struct lnc_softc *sc);
135 static void lnc_init(void *);
136 static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer);
137 static __inline struct mbuf *chain_to_cluster(struct mbuf *m);
138 static void lnc_start(struct ifnet *ifp);
139 static int lnc_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
140 static void lnc_watchdog(struct ifnet *ifp);
142 void lnc_dump_state(struct lnc_softc *sc);
143 void mbuf_dump_chain(struct mbuf *m);
147 read_csr(struct lnc_softc *sc, u_short port)
149 lnc_outw(sc->rap, port);
150 return (lnc_inw(sc->rdp));
154 write_csr(struct lnc_softc *sc, u_short port, u_short val)
156 lnc_outw(sc->rap, port);
157 lnc_outw(sc->rdp, val);
161 write_bcr(struct lnc_softc *sc, u_short port, u_short val)
163 lnc_outw(sc->rap, port);
164 lnc_outw(sc->bdp, val);
167 static __inline u_short
168 read_bcr(struct lnc_softc *sc, u_short port)
170 lnc_outw(sc->rap, port);
171 return (lnc_inw(sc->bdp));
175 lance_probe(struct lnc_softc *sc)
177 write_csr(sc, CSR0, STOP);
179 if ((lnc_inw(sc->rdp) & STOP) && ! (read_csr(sc, CSR3))) {
181 * Check to see if it's a C-LANCE. For the LANCE the INEA bit
182 * cannot be set while the STOP bit is. This restriction is
183 * removed for the C-LANCE.
185 write_csr(sc, CSR0, INEA);
186 if (read_csr(sc, CSR0) & INEA)
194 static __inline u_long
195 ether_crc(const u_char *ether_addr)
197 #define POLYNOMIAL 0xEDB88320UL
199 u_int crc = 0xFFFFFFFFUL;
201 for (i = 0; i < ETHER_ADDR_LEN; i++) {
202 addr = *ether_addr++;
203 for (j = 0; j < MULTICAST_FILTER_LEN; j++) {
204 crc = (crc >> 1) ^ (((crc ^ addr) & 1) ? POLYNOMIAL : 0);
213 lnc_release_resources(device_t dev)
215 lnc_softc_t *sc = device_get_softc(dev);
218 bus_teardown_intr(dev, sc->irqres, sc->intrhand);
219 bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres);
223 bus_release_resource(dev, SYS_RES_IOPORT,
224 sc->portrid, sc->portres);
226 bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres);
230 bus_dmamap_unload(sc->dmat, sc->dmamap);
231 bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap);
233 bus_dma_tag_destroy(sc->dmat);
238 * Set up the logical address filter for multicast packets
241 lnc_setladrf(struct lnc_softc *sc)
243 struct ifnet *ifp = &sc->arpcom.ac_if;
244 struct ifmultiaddr *ifma;
248 if (sc->flags & IFF_ALLMULTI) {
249 for (i=0; i < MULTICAST_FILTER_LEN; i++)
250 sc->init_block->ladrf[i] = 0xFF;
255 * For each multicast address, calculate a crc for that address and
256 * then use the high order 6 bits of the crc as a hash code where
257 * bits 3-5 select the byte of the address filter and bits 0-2 select
258 * the bit within that byte.
261 bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN);
262 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
263 if (ifma->ifma_addr->sa_family != AF_LINK)
266 index = ether_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr))
268 sc->init_block->ladrf[index >> 3] |= 1 << (index & 7);
273 lnc_stop(struct lnc_softc *sc)
275 write_csr(sc, CSR0, STOP);
279 lnc_reset(struct lnc_softc *sc)
285 lnc_free_mbufs(struct lnc_softc *sc)
290 * We rely on other routines to keep the buff.mbuf field valid. If
291 * it's not NULL then we assume it points to an allocated mbuf.
294 for (i = 0; i < NDESC(sc->nrdre); i++)
295 if ((sc->recv_ring + i)->buff.mbuf)
296 m_free((sc->recv_ring + i)->buff.mbuf);
298 for (i = 0; i < NDESC(sc->ntdre); i++)
299 if ((sc->trans_ring + i)->buff.mbuf)
300 m_free((sc->trans_ring + i)->buff.mbuf);
307 alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc)
309 register struct mds *md = desc->md;
313 /* Try and get cluster off local cache */
314 if (sc->mbuf_count) {
317 sc->mbufs = m->m_next;
318 /* XXX m->m_data = m->m_ext.ext_buf;*/
320 MGET(m, M_DONTWAIT, MT_DATA);
323 MCLGET(m, M_DONTWAIT);
324 if (!m->m_ext.ext_buf) {
331 addr = kvtop(m->m_data);
333 md->md1= ((addr >> 16) & 0xff) | OWN;
334 md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr));
339 static __inline struct mbuf *
340 chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len)
342 struct mbuf *head, *m;
343 struct host_ring_entry *desc;
346 * Turn head into a pkthdr mbuf --
347 * assumes a pkthdr type mbuf was
348 * allocated to the descriptor
352 desc = sc->recv_ring + start_of_packet;
354 head = desc->buff.mbuf;
355 head->m_flags |= M_PKTHDR;
356 bzero(&head->m_pkthdr, sizeof(head->m_pkthdr));
361 m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len);
363 if (alloc_mbuf_cluster(sc, desc))
364 return((struct mbuf *)NULL);
365 INC_MD_PTR(start_of_packet, sc->nrdre)
366 desc = sc->recv_ring + start_of_packet;
367 m->m_next = desc->buff.mbuf;
368 } while (start_of_packet != sc->recv_next);
374 static __inline struct mbuf *
375 mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len)
378 struct host_ring_entry *start;
379 struct mbuf *head,*m,*m_prev;
380 char *data,*mbuf_data;
384 /* Get a pkthdr mbuf for the start of packet */
385 MGETHDR(head, M_DONTWAIT, MT_DATA);
387 LNCSTATS(drop_packet)
393 start = sc->recv_ring + start_of_packet;
394 /*blen = -(start->md->md2);*/
395 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
396 data = start->buff.data;
397 mbuf_data = m->m_data;
399 while (start_of_packet != sc->recv_next) {
401 * If the data left fits in a single buffer then set
402 * blen to the size of the data left.
408 * amount is least of data in current ring buffer and
409 * amount of space left in current mbuf.
411 amount = min(blen, M_TRAILINGSPACE(m));
413 /* mbuf must be empty */
415 MGET(m, M_DONTWAIT, MT_DATA);
420 if (pkt_len >= MINCLSIZE)
421 MCLGET(m, M_DONTWAIT);
424 amount = min(blen, M_TRAILINGSPACE(m));
425 mbuf_data = m->m_data;
427 bcopy(data, mbuf_data, amount);
435 start->md->md1 &= HADR;
436 start->md->md1 |= OWN;
437 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
438 INC_MD_PTR(start_of_packet, sc->nrdre)
439 start = sc->recv_ring + start_of_packet;
440 data = start->buff.data;
441 /*blen = -(start->md->md2);*/
442 blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */
450 lnc_rint(struct lnc_softc *sc)
452 struct host_ring_entry *next, *start;
455 struct ether_header *eh;
461 * The LANCE will issue a RINT interrupt when the ownership of the
462 * last buffer of a receive packet has been relinquished by the LANCE.
463 * Therefore, it can be assumed that a complete packet can be found
464 * before hitting buffers that are still owned by the LANCE, if not
465 * then there is a bug in the driver that is causing the descriptors
466 * to get out of sync.
470 if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) {
471 int unit = sc->arpcom.ac_if.if_unit;
472 log(LOG_ERR, "lnc%d: Receive interrupt with buffer still owned by controller -- Resetting\n", unit);
476 if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) {
477 int unit = sc->arpcom.ac_if.if_unit;
478 log(LOG_ERR, "lnc%d: Receive interrupt but not start of packet -- Resetting\n", unit);
485 next = sc->recv_ring + sc->recv_next;
486 while ((flags = next->md->md1) & STP) {
488 /* Make a note of the start of the packet */
489 start_of_packet = sc->recv_next;
492 * Find the end of the packet. Even if not data chaining,
493 * jabber packets can overrun into a second descriptor.
494 * If there is no error, then the ENP flag is set in the last
495 * descriptor of the packet. If there is an error then the ERR
496 * flag will be set in the descriptor where the error occured.
497 * Therefore, to find the last buffer of a packet we search for
501 if (!(flags & (ENP | MDERR))) {
503 INC_MD_PTR(sc->recv_next, sc->nrdre)
504 next = sc->recv_ring + sc->recv_next;
505 flags = next->md->md1;
506 } while (!(flags & (STP | OWN | ENP | MDERR)));
509 int unit = sc->arpcom.ac_if.if_unit;
510 log(LOG_ERR, "lnc%d: Start of packet found before end of previous in receive ring -- Resetting\n", unit);
517 * Looked ahead into a packet still
520 sc->recv_next = start_of_packet;
523 int unit = sc->arpcom.ac_if.if_unit;
524 log(LOG_ERR, "lnc%d: End of received packet not found-- Resetting\n", unit);
531 pkt_len = (next->md->md3 & MCNT) - FCS_LEN;
533 /* Move pointer onto start of next packet */
534 INC_MD_PTR(sc->recv_next, sc->nrdre)
535 next = sc->recv_ring + sc->recv_next;
538 int unit = sc->arpcom.ac_if.if_unit;
541 log(LOG_ERR, "lnc%d: Receive buffer error\n", unit);
544 /* OFLO only valid if ENP is not set */
545 if (!(flags & ENP)) {
547 log(LOG_ERR, "lnc%d: Receive overflow error \n", unit);
549 } else if (flags & ENP) {
550 if ((sc->arpcom.ac_if.if_flags & IFF_PROMISC)==0) {
552 * FRAM and CRC are valid only if ENP
553 * is set and OFLO is not.
557 log(LOG_ERR, "lnc%d: Framing error\n", unit);
559 * FRAM is only set if there's a CRC
560 * error so avoid multiple messages
562 } else if (flags & CRC) {
564 log(LOG_ERR, "lnc%d: Receive CRC error\n", unit);
571 sc->arpcom.ac_if.if_ierrors++;
572 while (start_of_packet != sc->recv_next) {
573 start = sc->recv_ring + start_of_packet;
574 start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */
575 start->md->md1 &= HADR;
576 start->md->md1 |= OWN;
577 INC_MD_PTR(start_of_packet, sc->nrdre)
579 } else { /* Valid packet */
581 sc->arpcom.ac_if.if_ipackets++;
584 if (sc->nic.mem_mode == DMA_MBUF)
585 head = chain_mbufs(sc, start_of_packet, pkt_len);
587 head = mbuf_packet(sc, start_of_packet, pkt_len);
591 * First mbuf in packet holds the
592 * ethernet and packet headers
594 head->m_pkthdr.rcvif = &sc->arpcom.ac_if;
595 head->m_pkthdr.len = pkt_len ;
596 eh = (struct ether_header *) head->m_data;
599 * vmware ethernet hardware emulation loops
600 * packets back to itself, violates IFF_SIMPLEX.
601 * drop it if it is from myself.
603 if (bcmp(eh->ether_shost,
604 sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == 0) {
607 /* Skip over the ether header */
608 head->m_data += sizeof *eh;
609 head->m_len -= sizeof *eh;
610 head->m_pkthdr.len -= sizeof *eh;
612 ether_input(&sc->arpcom.ac_if, eh, head);
615 int unit = sc->arpcom.ac_if.if_unit;
616 log(LOG_ERR,"lnc%d: Packet dropped, no mbufs\n",unit);
617 LNCSTATS(drop_packet)
625 * At this point all completely received packets have been processed
626 * so clear RINT since any packets that have arrived while we were in
627 * here have been dealt with.
630 lnc_outw(sc->rdp, RINT | INEA);
634 lnc_tint(struct lnc_softc *sc)
636 struct host_ring_entry *next, *start;
641 * If the driver is reset in this routine then we return immediately to
642 * the interrupt driver routine. Any interrupts that have occured
643 * since the reset will be dealt with there. sc->trans_next
644 * should point to the start of the first packet that was awaiting
645 * transmission after the last transmit interrupt was dealt with. The
646 * LANCE should have relinquished ownership of that descriptor before
647 * the interrupt. Therefore, sc->trans_next should point to a
648 * descriptor with STP set and OWN cleared. If not then the driver's
649 * pointers are out of sync with the LANCE, which signifies a bug in
650 * the driver. Therefore, the following two checks are really
651 * diagnostic, since if the driver is working correctly they should
656 if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) {
657 int unit = sc->arpcom.ac_if.if_unit;
658 log(LOG_ERR, "lnc%d: Transmit interrupt with buffer still owned by controller -- Resetting\n", unit);
666 * The LANCE will write the status information for the packet it just
667 * tried to transmit in one of two places. If the packet was
668 * transmitted successfully then the status will be written into the
669 * last descriptor of the packet. If the transmit failed then the
670 * status will be written into the descriptor that was being accessed
671 * when the error occured and all subsequent descriptors in that
672 * packet will have been relinquished by the LANCE.
674 * At this point we know that sc->trans_next points to the start
675 * of a packet that the LANCE has just finished trying to transmit.
676 * We now search for a buffer with either ENP or ERR set.
682 start_of_packet = sc->trans_next;
683 next = sc->trans_ring + sc->trans_next;
686 if (!(next->md->md1 & STP)) {
687 int unit = sc->arpcom.ac_if.if_unit;
688 log(LOG_ERR, "lnc%d: Transmit interrupt but not start of packet -- Resetting\n", unit);
695 * Find end of packet.
698 if (!(next->md->md1 & (ENP | MDERR))) {
700 INC_MD_PTR(sc->trans_next, sc->ntdre)
701 next = sc->trans_ring + sc->trans_next;
702 } while (!(next->md->md1 & (STP | OWN | ENP | MDERR)));
704 if (next->md->md1 & STP) {
705 int unit = sc->arpcom.ac_if.if_unit;
706 log(LOG_ERR, "lnc%d: Start of packet found before end of previous in transmit ring -- Resetting\n", unit);
710 if (next->md->md1 & OWN) {
713 * Looked ahead into a packet still
716 sc->trans_next = start_of_packet;
719 int unit = sc->arpcom.ac_if.if_unit;
720 log(LOG_ERR, "lnc%d: End of transmitted packet not found -- Resetting\n", unit);
727 * Check for ERR first since other flags are irrelevant if an
730 if (next->md->md1 & MDERR) {
732 int unit = sc->arpcom.ac_if.if_unit;
735 sc->arpcom.ac_if.if_oerrors++;
737 if (next->md->md3 & LCOL) {
739 log(LOG_ERR, "lnc%d: Transmit late collision -- Net error?\n", unit);
740 sc->arpcom.ac_if.if_collisions++;
742 * Clear TBUFF since it's not valid when LCOL
745 next->md->md3 &= ~TBUFF;
747 if (next->md->md3 & LCAR) {
749 log(LOG_ERR, "lnc%d: Loss of carrier during transmit -- Net error?\n", unit);
751 if (next->md->md3 & RTRY) {
753 log(LOG_ERR, "lnc%d: Transmit of packet failed after 16 attempts -- TDR = %d\n", unit, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR));
754 sc->arpcom.ac_if.if_collisions += 16;
756 * Clear TBUFF since it's not valid when RTRY
759 next->md->md3 &= ~TBUFF;
762 * TBUFF is only valid if neither LCOL nor RTRY are set.
763 * We need to check UFLO after LCOL and RTRY so that we
764 * know whether or not TBUFF is valid. If either are
765 * set then TBUFF will have been cleared above. A
766 * UFLO error will turn off the transmitter so we
771 if (next->md->md3 & UFLO) {
774 * If an UFLO has occured it's possibly due
777 if (next->md->md3 & TBUFF) {
779 log(LOG_ERR, "lnc%d: Transmit buffer error -- Resetting\n", unit);
781 log(LOG_ERR, "lnc%d: Transmit underflow error -- Resetting\n", unit);
786 INC_MD_PTR(sc->trans_next, sc->ntdre)
787 next = sc->trans_ring + sc->trans_next;
788 } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send));
792 * Since we check for ERR first then if we get here
793 * the packet was transmitted correctly. There may
794 * still have been non-fatal errors though.
795 * Don't bother checking for DEF, waste of time.
798 sc->arpcom.ac_if.if_opackets++;
800 if (next->md->md1 & MORE) {
802 sc->arpcom.ac_if.if_collisions += 2;
806 * ONE is invalid if LCOL is set. If LCOL was set then
807 * ERR would have also been set and we would have
808 * returned from lnc_tint above. Therefore we can
809 * assume if we arrive here that ONE is valid.
813 if (next->md->md1 & ONE) {
815 sc->arpcom.ac_if.if_collisions++;
817 INC_MD_PTR(sc->trans_next, sc->ntdre)
818 next = sc->trans_ring + sc->trans_next;
822 * Clear descriptors and free any mbufs.
826 start = sc->trans_ring + start_of_packet;
827 start->md->md1 &= HADR;
828 if (sc->nic.mem_mode == DMA_MBUF) {
829 /* Cache clusters on a local queue */
830 if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) {
831 if (sc->mbuf_count) {
832 start->buff.mbuf->m_next = sc->mbufs;
833 sc->mbufs = start->buff.mbuf;
835 sc->mbufs = start->buff.mbuf;
837 start->buff.mbuf = 0;
839 m_free(start->buff.mbuf);
840 start->buff.mbuf = NULL;
843 sc->pending_transmits--;
844 INC_MD_PTR(start_of_packet, sc->ntdre)
845 }while (start_of_packet != sc->trans_next);
848 * There's now at least one free descriptor
849 * in the ring so indicate that we can accept
850 * more packets again.
853 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
857 } while (sc->pending_transmits && !(next->md->md1 & OWN));
860 * Clear TINT since we've dealt with all
861 * the completed transmissions.
864 lnc_outw(sc->rdp, TINT | INEA);
868 lnc_attach_common(device_t dev)
870 int unit = device_get_unit(dev);
871 lnc_softc_t *sc = device_get_softc(dev);
875 switch (sc->nic.ident) {
885 /* Set default mode */
886 sc->nic.mode = NORMAL;
888 /* Fill in arpcom structure entries */
890 sc->arpcom.ac_if.if_softc = sc;
891 sc->arpcom.ac_if.if_name = "lnc";
892 sc->arpcom.ac_if.if_unit = unit;
893 sc->arpcom.ac_if.if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
894 sc->arpcom.ac_if.if_timer = 0;
895 sc->arpcom.ac_if.if_output = ether_output;
896 sc->arpcom.ac_if.if_start = lnc_start;
897 sc->arpcom.ac_if.if_ioctl = lnc_ioctl;
898 sc->arpcom.ac_if.if_watchdog = lnc_watchdog;
899 sc->arpcom.ac_if.if_init = lnc_init;
900 sc->arpcom.ac_if.if_snd.ifq_maxlen = IFQ_MAXLEN;
902 /* Extract MAC address from PROM */
903 for (i = 0; i < ETHER_ADDR_LEN; i++)
904 sc->arpcom.ac_enaddr[i] = lnc_inb(i * skip);
907 * XXX -- should check return status of if_attach
910 ether_ifattach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED);
912 printf("lnc%d: ", unit);
913 if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE)
915 nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]);
917 printf("%s", ic_ident[sc->nic.ic]);
918 printf(" address %6D\n", sc->arpcom.ac_enaddr, ":");
927 struct lnc_softc *sc = xsc;
931 /* Check that interface has valid address */
933 if (TAILQ_EMPTY(&sc->arpcom.ac_if.if_addrhead)) { /* XXX unlikely */
934 printf("XXX no address?\n");
938 /* Shut down interface */
942 sc->arpcom.ac_if.if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */
945 * This sets up the memory area for the controller. Memory is set up for
946 * the initialisation block (12 words of contiguous memory starting
947 * on a word boundary),the transmit and receive ring structures (each
948 * entry is 4 words long and must start on a quadword boundary) and
951 * The alignment tests are particularly paranoid.
955 sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre);
958 if (sc->nic.mem_mode == SHMEM)
959 lnc_mem = (char *) sc->nic.iobase;
961 lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre));
963 lnc_mem = (char *)(((int)lnc_mem + 1) & ~1);
964 sc->init_block = (struct init_block *) ((int) lnc_mem & ~1);
965 lnc_mem = (char *) (sc->init_block + 1);
966 lnc_mem = (char *)(((int)lnc_mem + 7) & ~7);
968 /* Initialise pointers to descriptor entries */
969 for (i = 0; i < NDESC(sc->nrdre); i++) {
970 (sc->recv_ring + i)->md = (struct mds *) lnc_mem;
971 lnc_mem += sizeof(struct mds);
973 for (i = 0; i < NDESC(sc->ntdre); i++) {
974 (sc->trans_ring + i)->md = (struct mds *) lnc_mem;
975 lnc_mem += sizeof(struct mds);
978 /* Initialise the remaining ring entries */
980 if (sc->nic.mem_mode == DMA_MBUF) {
985 /* Free previously allocated mbufs */
986 if (sc->flags & LNC_INITIALISED)
990 for (i = 0; i < NDESC(sc->nrdre); i++) {
991 if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) {
992 log(LOG_ERR, "Initialisation failed -- no mbufs\n");
998 for (i = 0; i < NDESC(sc->ntdre); i++) {
999 (sc->trans_ring + i)->buff.mbuf = 0;
1000 (sc->trans_ring + i)->md->md0 = 0;
1001 (sc->trans_ring + i)->md->md1 = 0;
1002 (sc->trans_ring + i)->md->md2 = 0;
1003 (sc->trans_ring + i)->md->md3 = 0;
1006 for (i = 0; i < NDESC(sc->nrdre); i++) {
1007 (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem);
1008 (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN;
1009 (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE;
1010 (sc->recv_ring + i)->md->md3 = 0;
1011 (sc->recv_ring + i)->buff.data = lnc_mem;
1012 lnc_mem += RECVBUFSIZE;
1014 for (i = 0; i < NDESC(sc->ntdre); i++) {
1015 (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem);
1016 (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff);
1017 (sc->trans_ring + i)->md->md2 = 0;
1018 (sc->trans_ring + i)->md->md3 = 0;
1019 (sc->trans_ring + i)->buff.data = lnc_mem;
1020 lnc_mem += TRANSBUFSIZE;
1024 sc->next_to_send = 0;
1026 /* Set up initialisation block */
1028 sc->init_block->mode = sc->nic.mode;
1030 for (i = 0; i < ETHER_ADDR_LEN; i++)
1031 sc->init_block->padr[i] = sc->arpcom.ac_enaddr[i];
1035 sc->init_block->rdra = kvtop(sc->recv_ring->md);
1036 sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13);
1037 sc->init_block->tdra = kvtop(sc->trans_ring->md);
1038 sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13);
1041 /* Set flags to show that the memory area is valid */
1042 sc->flags |= LNC_INITIALISED;
1044 sc->pending_transmits = 0;
1046 /* Give the LANCE the physical address of the initialisation block */
1048 if (sc->nic.ic == PCnet_Home) {
1050 /* Set PHY_SEL to HomeRun */
1051 media = read_bcr(sc, BCR49);
1054 write_bcr(sc, BCR49, media);
1057 write_csr(sc, CSR1, kvtop(sc->init_block));
1058 write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff);
1061 * Depending on which controller this is, CSR3 has different meanings.
1062 * For the Am7990 it controls DMA operations, for the Am79C960 it
1063 * controls interrupt masks and transmitter algorithms. In either
1064 * case, none of the flags are set.
1068 write_csr(sc, CSR3, 0);
1070 /* Let's see if it starts */
1072 printf("Enabling lnc interrupts\n");
1073 sc->arpcom.ac_if.if_timer = 10;
1074 write_csr(sc, CSR0, INIT|INEA);
1078 * Now that the initialisation is complete there's no reason to
1079 * access anything except CSR0, so we leave RAP pointing there
1080 * so we can just access RDP from now on, saving an outw each
1084 write_csr(sc, CSR0, INIT);
1085 for(i=0; i < 1000; i++)
1086 if (read_csr(sc, CSR0) & IDON)
1089 if (read_csr(sc, CSR0) & IDON) {
1091 * Enable interrupts, start the LANCE, mark the interface as
1092 * running and transmit any pending packets.
1094 write_csr(sc, CSR0, STRT | INEA);
1095 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1096 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1097 lnc_start(&sc->arpcom.ac_if);
1099 log(LOG_ERR, "lnc%d: Initialisation failed\n",
1100 sc->arpcom.ac_if.if_unit);
1106 * The interrupt flag (INTR) will be set and provided that the interrupt enable
1107 * flag (INEA) is also set, the interrupt pin will be driven low when any of
1108 * the following occur:
1110 * 1) Completion of the initialisation routine (IDON). 2) The reception of a
1111 * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter
1112 * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR).
1114 * The interrupt flag is cleared when all of the above conditions are cleared.
1116 * If the driver is reset from this routine then it first checks to see if any
1117 * interrupts have ocurred since the reset and handles them before returning.
1118 * This is because the NIC may signify a pending interrupt in CSR0 using the
1119 * INTR flag even if a hardware interrupt is currently inhibited (at least I
1120 * think it does from reading the data sheets). We may as well deal with
1121 * these pending interrupts now rather than get the overhead of another
1122 * hardware interrupt immediately upon returning from the interrupt handler.
1129 lnc_softc_t *sc = arg;
1130 int unit = sc->arpcom.ac_if.if_unit;
1134 * INEA is the only bit that can be cleared by writing a 0 to it so
1135 * we have to include it in any writes that clear other flags.
1138 while ((csr0 = lnc_inw(sc->rdp)) & INTR) {
1141 * Clear interrupt flags early to avoid race conditions. The
1142 * controller can still set these flags even while we're in
1143 * this interrupt routine. If the flag is still set from the
1144 * event that caused this interrupt any new events will
1148 lnc_outw(sc->rdp, csr0);
1149 /*lnc_outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/
1154 sc->arpcom.ac_if.if_timer = 0;
1155 write_csr(sc, CSR0, STRT | INEA);
1156 sc->arpcom.ac_if.if_flags |= IFF_RUNNING;
1157 sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE;
1158 lnc_start(&sc->arpcom.ac_if);
1165 log(LOG_ERR, "lnc%d: Heartbeat error -- SQE test failed\n", unit);
1169 log(LOG_ERR, "lnc%d: Babble error - more than 1519 bytes transmitted\n", unit);
1171 sc->arpcom.ac_if.if_oerrors++;
1174 log(LOG_ERR, "lnc%d: Missed packet -- no receive buffer\n", unit);
1176 sc->arpcom.ac_if.if_ierrors++;
1179 log(LOG_ERR, "lnc%d: Memory error -- Resetting\n", unit);
1191 sc->arpcom.ac_if.if_timer = 0;
1196 * If there's room in the transmit descriptor ring then queue
1197 * some more transmit packets.
1200 if (!(sc->arpcom.ac_if.if_flags & IFF_OACTIVE))
1201 lnc_start(&sc->arpcom.ac_if);
1206 mbuf_to_buffer(struct mbuf *m, char *buffer)
1211 for( ; m; m = m->m_next) {
1212 bcopy(mtod(m, caddr_t), buffer, m->m_len);
1220 static __inline struct mbuf *
1221 chain_to_cluster(struct mbuf *m)
1225 MGET(new, M_DONTWAIT, MT_DATA);
1227 MCLGET(new, M_DONTWAIT);
1228 if (new->m_ext.ext_buf) {
1229 new->m_len = mbuf_to_buffer(m, new->m_data);
1239 * IFF_OACTIVE and IFF_RUNNING are checked in ether_output so it's redundant
1240 * to check them again since we wouldn't have got here if they were not
1241 * appropriately set. This is also called from lnc_init and lncintr but the
1242 * flags should be ok at those points too.
1246 lnc_start(struct ifnet *ifp)
1249 struct lnc_softc *sc = ifp->if_softc;
1250 struct host_ring_entry *desc;
1253 struct mbuf *head, *m;
1256 int no_entries_needed;
1260 IF_DEQUEUE(&sc->arpcom.ac_if.if_snd, head);
1264 if (sc->nic.mem_mode == DMA_MBUF) {
1266 no_entries_needed = 0;
1267 for (m=head; m; m = m->m_next)
1268 no_entries_needed++;
1271 * We try and avoid bcopy as much as possible
1272 * but there are two cases when we use it.
1274 * 1) If there are not enough free entries in the ring
1275 * to hold each mbuf in the chain then compact the
1276 * chain into a single cluster.
1278 * 2) The Am7990 and Am79C90 must not have less than
1279 * 100 bytes in the first descriptor of a chained
1280 * packet so it's necessary to shuffle the mbuf
1281 * contents to ensure this.
1285 if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) {
1286 if (!(head = chain_to_cluster(head))) {
1287 log(LOG_ERR, "lnc%d: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_unit);
1291 } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) {
1292 if ((head->m_len < 100) && (head->m_next)) {
1293 len = 100 - head->m_len;
1294 if (M_TRAILINGSPACE(head) < len) {
1296 * Move data to start of data
1297 * area. We assume the first
1298 * mbuf has a packet header
1299 * and is not a cluster.
1301 bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len);
1302 head->m_data = head->m_pktdat;
1305 while (m && (len > 0)) {
1306 chunk = min(len, m->m_len);
1307 bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk);
1309 head->m_len += chunk;
1312 if (m->m_len <= 0) {
1320 tmp = sc->next_to_send;
1323 * On entering this loop we know that tmp points to a
1324 * descriptor with a clear OWN bit.
1327 desc = sc->trans_ring + tmp;
1328 len = ETHER_MIN_LEN;
1329 for (m = head; m; m = m->m_next) {
1330 desc->buff.mbuf = m;
1331 addr = kvtop(m->m_data);
1332 desc->md->md0 = addr;
1333 desc->md->md1 = ((addr >> 16) & 0xff);
1335 desc->md->md2 = -m->m_len;
1336 sc->pending_transmits++;
1339 INC_MD_PTR(tmp, sc->ntdre)
1340 desc = sc->trans_ring + tmp;
1343 end_of_packet = tmp;
1344 DEC_MD_PTR(tmp, sc->ntdre)
1345 desc = sc->trans_ring + tmp;
1346 desc->md->md1 |= ENP;
1349 desc->md->md2 -= len;
1352 * Set OWN bits in reverse order, otherwise the Lance
1353 * could start sending the packet before all the
1354 * buffers have been relinquished by the host.
1357 while (tmp != sc->next_to_send) {
1358 desc->md->md1 |= OWN;
1359 DEC_MD_PTR(tmp, sc->ntdre)
1360 desc = sc->trans_ring + tmp;
1362 sc->next_to_send = end_of_packet;
1363 desc->md->md1 |= STP | OWN;
1365 sc->pending_transmits++;
1366 desc = sc->trans_ring + sc->next_to_send;
1367 len = mbuf_to_buffer(head, desc->buff.data);
1369 desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN);
1370 desc->md->md1 |= OWN | STP | ENP;
1371 INC_MD_PTR(sc->next_to_send, sc->ntdre)
1374 /* Force an immediate poll of the transmit ring */
1375 lnc_outw(sc->rdp, TDMD | INEA);
1378 * Set a timer so if the buggy Am7990.h shuts
1379 * down we can wake it up.
1384 if (sc->arpcom.ac_if.if_bpf)
1385 bpf_mtap(&sc->arpcom.ac_if, head);
1387 if (sc->nic.mem_mode != DMA_MBUF)
1390 } while (sc->pending_transmits < NDESC(sc->ntdre));
1393 * Transmit ring is full so set IFF_OACTIVE
1394 * since we can't buffer any more packets.
1397 sc->arpcom.ac_if.if_flags |= IFF_OACTIVE;
1398 LNCSTATS(trans_ring_full)
1402 lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
1405 struct lnc_softc *sc = ifp->if_softc;
1414 error = ether_ioctl(ifp, command, data);
1419 if (ifp->if_flags & IFF_DEBUG)
1424 if (ifp->if_flags & IFF_PROMISC) {
1425 if (!(sc->nic.mode & PROM)) {
1426 sc->nic.mode |= PROM;
1429 } else if (sc->nic.mode & PROM) {
1430 sc->nic.mode &= ~PROM;
1434 if ((ifp->if_flags & IFF_ALLMULTI) &&
1435 !(sc->flags & LNC_ALLMULTI)) {
1436 sc->flags |= LNC_ALLMULTI;
1438 } else if (!(ifp->if_flags & IFF_ALLMULTI) &&
1439 (sc->flags & LNC_ALLMULTI)) {
1440 sc->flags &= ~LNC_ALLMULTI;
1444 if ((ifp->if_flags & IFF_UP) == 0 &&
1445 (ifp->if_flags & IFF_RUNNING) != 0) {
1447 * If interface is marked down and it is running,
1451 ifp->if_flags &= ~IFF_RUNNING;
1452 } else if ((ifp->if_flags & IFF_UP) != 0 &&
1453 (ifp->if_flags & IFF_RUNNING) == 0) {
1455 * If interface is marked up and it is stopped, then
1474 lnc_watchdog(struct ifnet *ifp)
1476 log(LOG_ERR, "lnc%d: Device timeout -- Resetting\n", ifp->if_unit);
1478 lnc_reset(ifp->if_softc);
1483 lnc_dump_state(struct lnc_softc *sc)
1487 printf("\nDriver/NIC [%d] state dump\n", sc->arpcom.ac_if.if_unit);
1488 printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES);
1489 printf("Host memory\n");
1490 printf("-----------\n");
1492 printf("Receive ring: base = %p, next = %p\n",
1493 (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next));
1494 for (i = 0; i < NDESC(sc->nrdre); i++)
1495 printf("\t%d:%p md = %p buff = %p\n",
1496 i, (void *)(sc->recv_ring + i),
1497 (void *)(sc->recv_ring + i)->md,
1498 (void *)(sc->recv_ring + i)->buff.data);
1500 printf("Transmit ring: base = %p, next = %p\n",
1501 (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next));
1502 for (i = 0; i < NDESC(sc->ntdre); i++)
1503 printf("\t%d:%p md = %p buff = %p\n",
1504 i, (void *)(sc->trans_ring + i),
1505 (void *)(sc->trans_ring + i)->md,
1506 (void *)(sc->trans_ring + i)->buff.data);
1507 printf("Lance memory (may be on host(DMA) or card(SHMEM))\n");
1508 printf("Init block = %p\n", (void *)sc->init_block);
1509 printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n",
1510 sc->init_block->mode, INIT_MODE, sc->init_block->rlen,
1511 sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra);
1512 printf("Receive descriptor ring\n");
1513 for (i = 0; i < NDESC(sc->nrdre); i++)
1514 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n",
1515 i, ((sc->recv_ring + i)->md->md1 & HADR),
1516 (sc->recv_ring + i)->md->md0,
1517 -(short) (sc->recv_ring + i)->md->md2,
1518 (sc->recv_ring + i)->md->md3,
1519 (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1);
1520 printf("Transmit descriptor ring\n");
1521 for (i = 0; i < NDESC(sc->ntdre); i++)
1522 printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n",
1523 i, ((sc->trans_ring + i)->md->md1 & HADR),
1524 (sc->trans_ring + i)->md->md0,
1525 -(short) (sc->trans_ring + i)->md->md2,
1526 ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1,
1527 ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3);
1528 printf("\nnext_to_send = %x\n", sc->next_to_send);
1529 printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n",
1530 read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1),
1531 read_csr(sc, CSR2), read_csr(sc, CSR3));
1533 /* Set RAP back to CSR0 */
1534 lnc_outw(sc->rap, CSR0);
1538 mbuf_dump_chain(struct mbuf * m)
1541 #define MBUF_FLAGS \
1542 "\20\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST"
1545 log(LOG_DEBUG, "m == NULL\n");
1547 log(LOG_DEBUG, "m = %p\n", (void *)m);
1548 log(LOG_DEBUG, "m_hdr.mh_next = %p\n",
1549 (void *)m->m_hdr.mh_next);
1550 log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n",
1551 (void *)m->m_hdr.mh_nextpkt);
1552 log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len);
1553 log(LOG_DEBUG, "m_hdr.mh_data = %p\n",
1554 (void *)m->m_hdr.mh_data);
1555 log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type);
1556 log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags,
1558 if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT)))
1559 log(LOG_DEBUG, "M_dat.M_databuf = %p\n",
1560 (void *)m->M_dat.M_databuf);
1562 if (m->m_hdr.mh_flags & M_PKTHDR) {
1563 log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n",
1564 m->M_dat.MH.MH_pkthdr.len);
1566 "M_dat.MH.MH_pkthdr.rcvif = %p\n",
1567 (void *)m->M_dat.MH.MH_pkthdr.rcvif);
1568 if (!(m->m_hdr.mh_flags & M_EXT))
1570 "M_dat.MH.MH_dat.MH_databuf = %p\n",
1571 (void *)m->M_dat.MH.MH_dat.MH_databuf);
1573 if (m->m_hdr.mh_flags & M_EXT) {
1575 "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n",
1576 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf);
1578 "M_dat.MH.MH_dat.MH_ext.ext_free %p\n",
1579 (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free);
1581 "M_dat.MH.MH_dat.MH_ext.ext_size %d\n",
1582 m->M_dat.MH.MH_dat.MH_ext.ext_size);
1585 } while ((m = m->m_next) != NULL);