2 * Copyright (c) 1997, 1998, 1999, 2000
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
32 * $OpenBSD: if_sk.c,v 1.129 2006/10/16 12:30:08 tom Exp $
33 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
34 * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.50 2006/12/20 18:14:39 dillon Exp $
38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
40 * Permission to use, copy, modify, and distribute this software for any
41 * purpose with or without fee is hereby granted, provided that the above
42 * copyright notice and this permission notice appear in all copies.
44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55 * the SK-984x series adapters, both single port and dual port.
57 * The XaQti XMAC II datasheet,
58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59 * The SysKonnect GEnesis manual, http://www.syskonnect.com
61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63 * convenience to others until Vitesse corrects this problem:
65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
67 * Written by Bill Paul <wpaul@ee.columbia.edu>
68 * Department of Electrical Engineering
69 * Columbia University, New York City
73 * The SysKonnect gigabit ethernet adapters consist of two main
74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76 * components and a PHY while the GEnesis controller provides a PCI
77 * interface with DMA support. Each card may have between 512K and
78 * 2MB of SRAM on board depending on the configuration.
80 * The SysKonnect GEnesis controller can have either one or two XMAC
81 * chips connected to it, allowing single or dual port NIC configurations.
82 * SysKonnect has the distinction of being the only vendor on the market
83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85 * XMAC registers. This driver takes advantage of these features to allow
86 * both XMACs to operate as independent interfaces.
89 #include <sys/param.h>
91 #include <sys/endian.h>
92 #include <sys/in_cksum.h>
93 #include <sys/kernel.h>
95 #include <sys/malloc.h>
96 #include <sys/queue.h>
98 #include <sys/serialize.h>
99 #include <sys/socket.h>
100 #include <sys/sockio.h>
103 #include <net/ethernet.h>
105 #include <net/if_arp.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/ifq_var.h>
109 #include <net/vlan/if_vlan_var.h>
111 #include <netinet/ip.h>
112 #include <netinet/udp.h>
114 #include <dev/netif/mii_layer/mii.h>
115 #include <dev/netif/mii_layer/miivar.h>
116 #include <dev/netif/mii_layer/brgphyreg.h>
118 #include <bus/pci/pcireg.h>
119 #include <bus/pci/pcivar.h>
120 #include <bus/pci/pcidevs.h>
122 #include <dev/netif/sk/if_skreg.h>
123 #include <dev/netif/sk/yukonreg.h>
124 #include <dev/netif/sk/xmaciireg.h>
125 #include <dev/netif/sk/if_skvar.h>
127 #include "miibus_if.h"
137 /* supported device vendors */
138 static const struct skc_type {
141 const char *skc_name;
143 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940,
145 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940B,
148 { PCI_VENDOR_CNET, PCI_PRODUCT_CNET_GIGACARD,
151 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_A1,
152 "D-Link DGE-530T A1" },
153 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_B1,
154 "D-Link DGE-530T B1" },
156 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032,
157 "Linksys EG1032 v2" },
158 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064,
161 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON,
162 "Marvell Yukon 88E8001/8003/8010" },
163 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_BELKIN,
166 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SKNET_GE,
167 "SysKonnect SK-NET" },
168 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2,
169 "SysKonnect SK9821 v2" },
174 static int skc_probe(device_t);
175 static int skc_attach(device_t);
176 static int skc_detach(device_t);
177 static void skc_shutdown(device_t);
178 static int sk_probe(device_t);
179 static int sk_attach(device_t);
180 static int sk_detach(device_t);
181 static void sk_tick(void *);
182 static void sk_yukon_tick(void *);
183 static void sk_intr(void *);
184 static void sk_intr_bcom(struct sk_if_softc *);
185 static void sk_intr_xmac(struct sk_if_softc *);
186 static void sk_intr_yukon(struct sk_if_softc *);
187 static void sk_rxeof(struct sk_if_softc *);
188 static void sk_txeof(struct sk_if_softc *);
189 static int sk_encap(struct sk_if_softc *, struct mbuf *, uint32_t *);
190 static void sk_start(struct ifnet *);
191 static int sk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
192 static void sk_init(void *);
193 static void sk_init_xmac(struct sk_if_softc *);
194 static void sk_init_yukon(struct sk_if_softc *);
195 static void sk_stop(struct sk_if_softc *);
196 static void sk_watchdog(struct ifnet *);
197 static int sk_ifmedia_upd(struct ifnet *);
198 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
199 static void sk_reset(struct sk_softc *);
200 static int sk_newbuf(struct sk_if_softc *, struct sk_chain *,
202 static int sk_jpool_alloc(device_t);
203 static void sk_jpool_free(struct sk_if_softc *);
204 static struct sk_jpool_entry
205 *sk_jalloc(struct sk_if_softc *);
206 static void sk_jfree(void *);
207 static void sk_jref(void *);
208 static int sk_init_rx_ring(struct sk_if_softc *);
209 static int sk_init_tx_ring(struct sk_if_softc *);
211 static int sk_miibus_readreg(device_t, int, int);
212 static int sk_miibus_writereg(device_t, int, int, int);
213 static void sk_miibus_statchg(device_t);
215 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
216 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int);
217 static void sk_xmac_miibus_statchg(struct sk_if_softc *);
219 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
220 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int);
221 static void sk_marv_miibus_statchg(struct sk_if_softc *);
223 static void sk_setfilt(struct sk_if_softc *, caddr_t, int);
224 static void sk_setmulti(struct sk_if_softc *);
225 static void sk_setpromisc(struct sk_if_softc *);
228 static void sk_rxcsum(struct ifnet *, struct mbuf *, const uint16_t,
231 static int sk_dma_alloc(device_t);
232 static void sk_dma_free(device_t);
234 static void sk_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
236 static void sk_dmamem_addr(void *, bus_dma_segment_t *, int, int);
239 #define DPRINTF(x) if (skdebug) printf x
240 #define DPRINTFN(n,x) if (skdebug >= (n)) printf x
241 static int skdebug = 2;
243 static void sk_dump_txdesc(struct sk_tx_desc *, int);
244 static void sk_dump_mbuf(struct mbuf *);
245 static void sk_dump_bytes(const char *, int);
248 #define DPRINTFN(n,x)
252 * Note that we have newbus methods for both the GEnesis controller
253 * itself and the XMAC(s). The XMACs are children of the GEnesis, and
254 * the miibus code is a child of the XMACs. We need to do it this way
255 * so that the miibus drivers can access the PHY registers on the
256 * right PHY. It's not quite what I had in mind, but it's the only
257 * design that achieves the desired effect.
259 static device_method_t skc_methods[] = {
260 /* Device interface */
261 DEVMETHOD(device_probe, skc_probe),
262 DEVMETHOD(device_attach, skc_attach),
263 DEVMETHOD(device_detach, skc_detach),
264 DEVMETHOD(device_shutdown, skc_shutdown),
267 DEVMETHOD(bus_print_child, bus_generic_print_child),
268 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
273 static DEFINE_CLASS_0(skc, skc_driver, skc_methods, sizeof(struct sk_softc));
274 static devclass_t skc_devclass;
276 static device_method_t sk_methods[] = {
277 /* Device interface */
278 DEVMETHOD(device_probe, sk_probe),
279 DEVMETHOD(device_attach, sk_attach),
280 DEVMETHOD(device_detach, sk_detach),
281 DEVMETHOD(device_shutdown, bus_generic_shutdown),
284 DEVMETHOD(bus_print_child, bus_generic_print_child),
285 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
288 DEVMETHOD(miibus_readreg, sk_miibus_readreg),
289 DEVMETHOD(miibus_writereg, sk_miibus_writereg),
290 DEVMETHOD(miibus_statchg, sk_miibus_statchg),
295 static DEFINE_CLASS_0(sk, sk_driver, sk_methods, sizeof(struct sk_if_softc));
296 static devclass_t sk_devclass;
298 DECLARE_DUMMY_MODULE(if_sk);
299 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
300 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0);
301 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
303 static __inline uint32_t
304 sk_win_read_4(struct sk_softc *sc, uint32_t reg)
306 return CSR_READ_4(sc, reg);
309 static __inline uint16_t
310 sk_win_read_2(struct sk_softc *sc, uint32_t reg)
312 return CSR_READ_2(sc, reg);
315 static __inline uint8_t
316 sk_win_read_1(struct sk_softc *sc, uint32_t reg)
318 return CSR_READ_1(sc, reg);
322 sk_win_write_4(struct sk_softc *sc, uint32_t reg, uint32_t x)
324 CSR_WRITE_4(sc, reg, x);
328 sk_win_write_2(struct sk_softc *sc, uint32_t reg, uint16_t x)
330 CSR_WRITE_2(sc, reg, x);
334 sk_win_write_1(struct sk_softc *sc, uint32_t reg, uint8_t x)
336 CSR_WRITE_1(sc, reg, x);
340 sk_miibus_readreg(device_t dev, int phy, int reg)
342 struct sk_if_softc *sc_if = device_get_softc(dev);
344 if (SK_IS_GENESIS(sc_if->sk_softc))
345 return sk_xmac_miibus_readreg(sc_if, phy, reg);
347 return sk_marv_miibus_readreg(sc_if, phy, reg);
351 sk_miibus_writereg(device_t dev, int phy, int reg, int val)
353 struct sk_if_softc *sc_if = device_get_softc(dev);
355 if (SK_IS_GENESIS(sc_if->sk_softc))
356 return sk_xmac_miibus_writereg(sc_if, phy, reg, val);
358 return sk_marv_miibus_writereg(sc_if, phy, reg, val);
362 sk_miibus_statchg(device_t dev)
364 struct sk_if_softc *sc_if = device_get_softc(dev);
366 if (SK_IS_GENESIS(sc_if->sk_softc))
367 sk_xmac_miibus_statchg(sc_if);
369 sk_marv_miibus_statchg(sc_if);
373 sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
377 DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
379 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
382 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
383 SK_XM_READ_2(sc_if, XM_PHY_DATA);
384 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
385 for (i = 0; i < SK_TIMEOUT; i++) {
387 if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
388 XM_MMUCMD_PHYDATARDY)
392 if (i == SK_TIMEOUT) {
393 if_printf(&sc_if->arpcom.ac_if,
394 "phy failed to come ready\n");
399 return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
403 sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
407 DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
409 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
410 for (i = 0; i < SK_TIMEOUT; i++) {
411 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
415 if (i == SK_TIMEOUT) {
416 if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
420 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
421 for (i = 0; i < SK_TIMEOUT; i++) {
423 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
428 if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
433 sk_xmac_miibus_statchg(struct sk_if_softc *sc_if)
435 struct mii_data *mii;
437 mii = device_get_softc(sc_if->sk_miibus);
438 DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
441 * If this is a GMII PHY, manually set the XMAC's
442 * duplex mode accordingly.
444 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
445 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
446 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
448 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
453 sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
459 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
460 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
461 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
466 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
467 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
469 for (i = 0; i < SK_TIMEOUT; i++) {
471 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
472 if (val & YU_SMICR_READ_VALID)
476 if (i == SK_TIMEOUT) {
477 if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
481 DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
484 val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
486 DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
493 sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
497 DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
500 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
501 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
502 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
504 for (i = 0; i < SK_TIMEOUT; i++) {
506 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
511 if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
517 sk_marv_miibus_statchg(struct sk_if_softc *sc_if)
519 DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
520 SK_YU_READ_2(sc_if, YUKON_GPCR)));
526 sk_xmac_hash(caddr_t addr)
530 crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
531 return (~crc & ((1 << HASH_BITS) - 1));
535 sk_yukon_hash(caddr_t addr)
539 crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
540 return (crc & ((1 << HASH_BITS) - 1));
544 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
548 base = XM_RXFILT_ENTRY(slot);
550 SK_XM_WRITE_2(sc_if, base, *(uint16_t *)(&addr[0]));
551 SK_XM_WRITE_2(sc_if, base + 2, *(uint16_t *)(&addr[2]));
552 SK_XM_WRITE_2(sc_if, base + 4, *(uint16_t *)(&addr[4]));
556 sk_setmulti(struct sk_if_softc *sc_if)
558 struct sk_softc *sc = sc_if->sk_softc;
559 struct ifnet *ifp = &sc_if->arpcom.ac_if;
560 uint32_t hashes[2] = { 0, 0 };
562 struct ifmultiaddr *ifma;
563 uint8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
565 /* First, zot all the existing filters. */
566 switch(sc->sk_type) {
568 for (i = 1; i < XM_RXFILT_MAX; i++)
569 sk_setfilt(sc_if, (caddr_t)&dummy, i);
571 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
572 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
577 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
578 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
579 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
580 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
584 /* Now program new ones. */
585 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
586 hashes[0] = 0xFFFFFFFF;
587 hashes[1] = 0xFFFFFFFF;
590 /* First find the tail of the list. */
591 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
592 if (ifma->ifma_link.le_next == NULL)
595 /* Now traverse the list backwards. */
596 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
597 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
600 if (ifma->ifma_addr->sa_family != AF_LINK)
603 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
606 * Program the first XM_RXFILT_MAX multicast groups
607 * into the perfect filter. For all others,
608 * use the hash table.
610 if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
611 sk_setfilt(sc_if, maddr, i);
616 switch(sc->sk_type) {
618 h = sk_xmac_hash(maddr);
624 h = sk_yukon_hash(maddr);
628 hashes[0] |= (1 << h);
630 hashes[1] |= (1 << (h - 32));
634 switch(sc->sk_type) {
636 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
637 XM_MODE_RX_USE_PERFECT);
638 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
639 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
644 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
645 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
646 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
647 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
653 sk_setpromisc(struct sk_if_softc *sc_if)
655 struct sk_softc *sc = sc_if->sk_softc;
656 struct ifnet *ifp = &sc_if->arpcom.ac_if;
658 switch(sc->sk_type) {
660 if (ifp->if_flags & IFF_PROMISC)
661 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
663 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
668 if (ifp->if_flags & IFF_PROMISC) {
669 SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
670 YU_RCR_UFLEN | YU_RCR_MUFLEN);
672 SK_YU_SETBIT_2(sc_if, YUKON_RCR,
673 YU_RCR_UFLEN | YU_RCR_MUFLEN);
680 sk_init_rx_ring(struct sk_if_softc *sc_if)
682 struct sk_chain_data *cd = &sc_if->sk_cdata;
683 struct sk_ring_data *rd = sc_if->sk_rdata;
686 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
688 for (i = 0; i < SK_RX_RING_CNT; i++) {
689 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
690 if (i == (SK_RX_RING_CNT - 1))
694 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
695 rd->sk_rx_ring[i].sk_next =
696 htole32(SK_RX_RING_ADDR(sc_if, nexti));
697 rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
698 rd->sk_rx_ring[i].sk_csum2_start =
699 htole16(ETHER_HDR_LEN + sizeof(struct ip));
701 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL, 1) == ENOBUFS) {
702 if_printf(&sc_if->arpcom.ac_if,
703 "failed alloc of %dth mbuf\n", i);
711 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
712 BUS_DMASYNC_PREWRITE);
718 sk_init_tx_ring(struct sk_if_softc *sc_if)
720 struct sk_chain_data *cd = &sc_if->sk_cdata;
721 struct sk_ring_data *rd = sc_if->sk_rdata;
724 bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
726 for (i = 0; i < SK_TX_RING_CNT; i++) {
727 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
728 if (i == (SK_TX_RING_CNT - 1))
732 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
733 rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti));
736 sc_if->sk_cdata.sk_tx_prod = 0;
737 sc_if->sk_cdata.sk_tx_cons = 0;
738 sc_if->sk_cdata.sk_tx_cnt = 0;
740 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
741 BUS_DMASYNC_PREWRITE);
747 sk_newbuf(struct sk_if_softc *sc_if, struct sk_chain *c, struct mbuf *m,
750 struct sk_jpool_entry *entry;
751 struct mbuf *m_new = NULL;
752 struct sk_rx_desc *r;
755 MGETHDR(m_new, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
759 /* Allocate the jumbo buffer */
760 entry = sk_jalloc(sc_if);
763 DPRINTFN(1, ("%s jumbo allocation failed -- packet "
764 "dropped!\n", sc_if->arpcom.ac_if.if_xname));
768 m_new->m_ext.ext_arg = entry;
769 m_new->m_ext.ext_buf = entry->buf;
770 m_new->m_ext.ext_free = sk_jfree;
771 m_new->m_ext.ext_ref = sk_jref;
772 m_new->m_ext.ext_size = SK_JLEN;
774 m_new->m_flags |= M_EXT;
777 * We're re-using a previously allocated mbuf;
778 * be sure to re-init pointers and lengths to
781 KKASSERT(m->m_flags & M_EXT);
782 entry = m->m_ext.ext_arg;
785 m_new->m_data = m_new->m_ext.ext_buf;
786 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
789 * Adjust alignment so packet payload begins on a
790 * longword boundary. Mandatory for Alpha, useful on
793 m_adj(m_new, ETHER_ALIGN);
798 r->sk_data_lo = htole32(entry->paddr + ETHER_ALIGN);
799 r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT);
805 * Allocate a jumbo buffer.
807 struct sk_jpool_entry *
808 sk_jalloc(struct sk_if_softc *sc_if)
810 struct sk_chain_data *cd = &sc_if->sk_cdata;
811 struct sk_jpool_entry *entry;
813 lwkt_serialize_enter(&cd->sk_jpool_serializer);
815 entry = SLIST_FIRST(&cd->sk_jpool_free_ent);
817 SLIST_REMOVE_HEAD(&cd->sk_jpool_free_ent, entry_next);
820 if_printf(&sc_if->arpcom.ac_if,
821 "no free jumbo buffer\n");
824 lwkt_serialize_exit(&cd->sk_jpool_serializer);
829 * Release a jumbo buffer.
834 struct sk_jpool_entry *entry = arg;
835 struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
837 if (&cd->sk_jpool_ent[entry->slot] != entry)
838 panic("%s: free wrong jumbo buffer\n", __func__);
839 else if (entry->inuse == 0)
840 panic("%s: jumbo buffer already freed\n", __func__);
842 lwkt_serialize_enter(&cd->sk_jpool_serializer);
844 atomic_subtract_int(&entry->inuse, 1);
845 if (entry->inuse == 0)
846 SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
848 lwkt_serialize_exit(&cd->sk_jpool_serializer);
854 struct sk_jpool_entry *entry = arg;
855 struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
857 if (&cd->sk_jpool_ent[entry->slot] != entry)
858 panic("%s: free wrong jumbo buffer\n", __func__);
859 else if (entry->inuse == 0)
860 panic("%s: jumbo buffer already freed\n", __func__);
862 atomic_add_int(&entry->inuse, 1);
869 sk_ifmedia_upd(struct ifnet *ifp)
871 struct sk_if_softc *sc_if = ifp->if_softc;
872 struct mii_data *mii;
874 mii = device_get_softc(sc_if->sk_miibus);
882 * Report current media status.
885 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
887 struct sk_if_softc *sc_if;
888 struct mii_data *mii;
890 sc_if = ifp->if_softc;
891 mii = device_get_softc(sc_if->sk_miibus);
894 ifmr->ifm_active = mii->mii_media_active;
895 ifmr->ifm_status = mii->mii_media_status;
899 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
901 struct sk_if_softc *sc_if = ifp->if_softc;
902 struct ifreq *ifr = (struct ifreq *)data;
903 struct mii_data *mii;
906 ASSERT_SERIALIZED(ifp->if_serializer);
910 if (ifr->ifr_mtu > SK_JUMBO_MTU)
913 ifp->if_mtu = ifr->ifr_mtu;
914 ifp->if_flags &= ~IFF_RUNNING;
919 if (ifp->if_flags & IFF_UP) {
920 if (ifp->if_flags & IFF_RUNNING) {
921 if ((ifp->if_flags ^ sc_if->sk_if_flags)
923 sk_setpromisc(sc_if);
929 if (ifp->if_flags & IFF_RUNNING)
932 sc_if->sk_if_flags = ifp->if_flags;
940 mii = device_get_softc(sc_if->sk_miibus);
941 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
944 error = ether_ioctl(ifp, command, data);
952 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
953 * IDs against our list and return a device name if we find a match.
956 skc_probe(device_t dev)
958 const struct skc_type *t;
961 vid = pci_get_vendor(dev);
962 did = pci_get_device(dev);
965 * Only attach to rev.2 of the Linksys EG1032 adapter.
966 * Rev.3 is supported by re(4).
968 if (vid == PCI_VENDOR_LINKSYS &&
969 did == PCI_PRODUCT_LINKSYS_EG1032 &&
970 pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2)
973 for (t = skc_devs; t->skc_name != NULL; t++) {
974 if (vid == t->skc_vid && did == t->skc_did) {
975 device_set_desc(dev, t->skc_name);
983 * Force the GEnesis into reset, then bring it out of reset.
986 sk_reset(struct sk_softc *sc)
988 uint32_t imtimer_ticks;
990 DPRINTFN(2, ("sk_reset\n"));
992 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
993 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
995 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
998 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
1000 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1001 if (SK_IS_YUKON(sc))
1002 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1004 DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
1005 DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
1006 CSR_READ_2(sc, SK_LINK_CTRL)));
1008 if (SK_IS_GENESIS(sc)) {
1009 /* Configure packet arbiter */
1010 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1011 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1012 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1013 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1014 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1017 /* Enable RAM interface */
1018 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1021 * Configure interrupt moderation. The moderation timer
1022 * defers interrupts specified in the interrupt moderation
1023 * timer mask based on the timeout specified in the interrupt
1024 * moderation timer init register. Each bit in the timer
1025 * register represents one tick, so to specify a timeout in
1026 * microseconds, we have to multiply by the correct number of
1027 * ticks-per-microsecond.
1029 switch (sc->sk_type) {
1031 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1034 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1036 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
1037 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1038 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1039 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1043 sk_probe(device_t dev)
1045 struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1046 const char *revstr = "", *name = NULL;
1049 switch (sc->sk_type) {
1051 name = "SysKonnect GEnesis";
1054 name = "Marvell Yukon";
1057 name = "Marvell Yukon Lite";
1058 switch (sc->sk_rev) {
1059 case SK_YUKON_LITE_REV_A0:
1062 case SK_YUKON_LITE_REV_A1:
1065 case SK_YUKON_LITE_REV_A3:
1071 name = "Marvell Yukon LP";
1077 ksnprintf(devname, sizeof(devname), "%s%s (0x%x)",
1078 name, revstr, sc->sk_rev);
1079 device_set_desc_copy(dev, devname);
1084 * Each XMAC chip is attached as a separate logical IP interface.
1085 * Single port cards will have only one logical interface of course.
1088 sk_attach(device_t dev)
1090 struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1091 struct sk_if_softc *sc_if = device_get_softc(dev);
1092 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1095 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1097 sc_if->sk_port = *(int *)device_get_ivars(dev);
1098 KKASSERT(sc_if->sk_port == SK_PORT_A || sc_if->sk_port == SK_PORT_B);
1100 sc_if->sk_softc = sc;
1101 sc->sk_if[sc_if->sk_port] = sc_if;
1103 kfree(device_get_ivars(dev), M_DEVBUF);
1104 device_set_ivars(dev, NULL);
1106 if (sc_if->sk_port == SK_PORT_A)
1107 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1108 if (sc_if->sk_port == SK_PORT_B)
1109 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1111 DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1114 * Get station address for this interface. Note that
1115 * dual port cards actually come with three station
1116 * addresses: one for each port, plus an extra. The
1117 * extra one is used by the SysKonnect driver software
1118 * as a 'virtual' station address for when both ports
1119 * are operating in failover mode. Currently we don't
1120 * use this extra address.
1122 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1124 sc_if->arpcom.ac_enaddr[i] =
1125 sk_win_read_1(sc, SK_MAC0_0 + (sc_if->sk_port * 8) + i);
1129 * Set up RAM buffer addresses. The NIC will have a certain
1130 * amount of SRAM on it, somewhere between 512K and 2MB. We
1131 * need to divide this up a) between the transmitter and
1132 * receiver and b) between the two XMACs, if this is a
1133 * dual port NIC. Our algorithm is to divide up the memory
1134 * evenly so that everyone gets a fair share.
1136 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1137 uint32_t chunk, val;
1139 chunk = sc->sk_ramsize / 2;
1140 val = sc->sk_rboff / sizeof(uint64_t);
1141 sc_if->sk_rx_ramstart = val;
1142 val += (chunk / sizeof(uint64_t));
1143 sc_if->sk_rx_ramend = val - 1;
1144 sc_if->sk_tx_ramstart = val;
1145 val += (chunk / sizeof(uint64_t));
1146 sc_if->sk_tx_ramend = val - 1;
1148 uint32_t chunk, val;
1150 chunk = sc->sk_ramsize / 4;
1151 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1153 sc_if->sk_rx_ramstart = val;
1154 val += (chunk / sizeof(uint64_t));
1155 sc_if->sk_rx_ramend = val - 1;
1156 sc_if->sk_tx_ramstart = val;
1157 val += (chunk / sizeof(uint64_t));
1158 sc_if->sk_tx_ramend = val - 1;
1161 DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1162 " tx_ramstart=%#x tx_ramend=%#x\n",
1163 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1164 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1166 /* Read and save PHY type */
1167 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1169 /* Set PHY address */
1170 if (SK_IS_GENESIS(sc)) {
1171 switch (sc_if->sk_phytype) {
1172 case SK_PHYTYPE_XMAC:
1173 sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1175 case SK_PHYTYPE_BCOM:
1176 sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1179 device_printf(dev, "unsupported PHY type: %d\n",
1186 if (SK_IS_YUKON(sc)) {
1187 if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1188 sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1189 /* not initialized, punt */
1190 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1191 sc->sk_coppertype = 1;
1194 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1196 if (!(sc->sk_coppertype))
1197 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1200 error = sk_dma_alloc(dev);
1204 ifp->if_softc = sc_if;
1205 ifp->if_mtu = ETHERMTU;
1206 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1207 ifp->if_ioctl = sk_ioctl;
1208 ifp->if_start = sk_start;
1209 ifp->if_watchdog = sk_watchdog;
1210 ifp->if_init = sk_init;
1211 ifp->if_baudrate = 1000000000;
1212 ifq_set_maxlen(&ifp->if_snd, SK_TX_RING_CNT - 1);
1213 ifq_set_ready(&ifp->if_snd);
1215 ifp->if_capabilities = IFCAP_VLAN_MTU;
1220 switch (sc->sk_type) {
1222 sk_init_xmac(sc_if);
1227 sk_init_yukon(sc_if);
1230 device_printf(dev, "unknown device type %d\n", sc->sk_type);
1235 DPRINTFN(2, ("sk_attach: 1\n"));
1237 error = mii_phy_probe(dev, &sc_if->sk_miibus,
1238 sk_ifmedia_upd, sk_ifmedia_sts);
1240 device_printf(dev, "no PHY found!\n");
1244 callout_init(&sc_if->sk_tick_timer);
1247 * Call MI attach routines.
1249 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr, &sc->sk_serializer);
1251 DPRINTFN(2, ("sk_attach: end\n"));
1255 sc->sk_if[sc_if->sk_port] = NULL;
1260 * Attach the interface. Allocate softc structures, do ifmedia
1261 * setup and ethernet/BPF attach.
1264 skc_attach(device_t dev)
1266 struct sk_softc *sc = device_get_softc(dev);
1271 DPRINTFN(2, ("begin skc_attach\n"));
1273 lwkt_serialize_init(&sc->sk_serializer);
1275 #ifndef BURN_BRIDGES
1277 * Handle power management nonsense.
1279 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1280 uint32_t iobase, membase, irq;
1282 /* Save important PCI config data. */
1283 iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1284 membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1285 irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1287 /* Reset the power state. */
1288 device_printf(dev, "chip is in D%d power mode "
1289 "-- setting to D0\n", pci_get_powerstate(dev));
1291 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1293 /* Restore PCI config data. */
1294 pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1295 pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1296 pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1298 #endif /* BURN_BRIDGES */
1301 * Map control/status registers.
1303 pci_enable_busmaster(dev);
1305 sc->sk_res_rid = SK_PCI_LOMEM;
1306 sc->sk_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1307 &sc->sk_res_rid, RF_ACTIVE);
1308 if (sc->sk_res == NULL) {
1309 device_printf(dev, "couldn't map memory\n");
1313 sc->sk_btag = rman_get_bustag(sc->sk_res);
1314 sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1316 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1317 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1319 /* Bail out here if chip is not recognized */
1320 if (!SK_IS_GENESIS(sc) && !SK_IS_YUKON(sc)) {
1321 device_printf(dev, "unknown chip type: %d\n", sc->sk_type);
1326 DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1328 /* Allocate interrupt */
1330 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sk_irq_rid,
1331 RF_SHAREABLE | RF_ACTIVE);
1332 if (sc->sk_irq == NULL) {
1333 device_printf(dev, "couldn't map interrupt\n");
1338 /* Reset the adapter. */
1341 skrs = sk_win_read_1(sc, SK_EPROM0);
1342 if (SK_IS_GENESIS(sc)) {
1343 /* Read and save RAM size and RAMbuffer offset */
1345 case SK_RAMSIZE_512K_64:
1346 sc->sk_ramsize = 0x80000;
1347 sc->sk_rboff = SK_RBOFF_0;
1349 case SK_RAMSIZE_1024K_64:
1350 sc->sk_ramsize = 0x100000;
1351 sc->sk_rboff = SK_RBOFF_80000;
1353 case SK_RAMSIZE_1024K_128:
1354 sc->sk_ramsize = 0x100000;
1355 sc->sk_rboff = SK_RBOFF_0;
1357 case SK_RAMSIZE_2048K_128:
1358 sc->sk_ramsize = 0x200000;
1359 sc->sk_rboff = SK_RBOFF_0;
1362 device_printf(dev, "unknown ram size: %d\n", skrs);
1368 sc->sk_ramsize = 0x20000;
1370 sc->sk_ramsize = skrs * (1<<12);
1371 sc->sk_rboff = SK_RBOFF_0;
1374 DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1375 sc->sk_ramsize, sc->sk_ramsize / 1024,
1378 /* Read and save physical media type */
1379 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1381 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1382 sc->sk_coppertype = 1;
1384 sc->sk_coppertype = 0;
1386 /* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1387 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1391 flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1393 /* Test Flash-Address Register */
1394 sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1395 testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1397 if (testbyte != 0) {
1398 /* This is a Yukon Lite Rev A0 */
1399 sc->sk_type = SK_YUKON_LITE;
1400 sc->sk_rev = SK_YUKON_LITE_REV_A0;
1401 /* Restore Flash-Address Register */
1402 sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1406 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1407 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1409 device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1411 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1412 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1413 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1415 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1418 /* Turn on the 'driver is loaded' LED. */
1419 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1421 bus_generic_attach(dev);
1423 error = bus_setup_intr(dev, sc->sk_irq, INTR_NETSAFE, sk_intr, sc,
1424 &sc->sk_intrhand, &sc->sk_serializer);
1426 device_printf(dev, "couldn't set up irq\n");
1436 sk_detach(device_t dev)
1438 struct sk_if_softc *sc_if = device_get_softc(dev);
1439 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1441 if (device_is_attached(dev))
1442 ether_ifdetach(ifp);
1444 bus_generic_detach(dev);
1445 if (sc_if->sk_miibus != NULL)
1446 device_delete_child(dev, sc_if->sk_miibus);
1453 skc_detach(device_t dev)
1455 struct sk_softc *sc = device_get_softc(dev);
1458 if (device_is_attached(dev)) {
1459 lwkt_serialize_enter(&sc->sk_serializer);
1461 if (sc->sk_if[SK_PORT_A] != NULL)
1462 sk_stop(sc->sk_if[SK_PORT_A]);
1463 if (sc->sk_if[SK_PORT_B] != NULL)
1464 sk_stop(sc->sk_if[SK_PORT_B]);
1466 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1468 lwkt_serialize_exit(&sc->sk_serializer);
1471 bus_generic_detach(dev);
1472 if (sc->sk_devs[SK_PORT_A] != NULL) {
1473 port = device_get_ivars(sc->sk_devs[SK_PORT_A]);
1475 kfree(port, M_DEVBUF);
1476 device_set_ivars(sc->sk_devs[SK_PORT_A], NULL);
1478 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1480 if (sc->sk_devs[SK_PORT_B] != NULL) {
1481 port = device_get_ivars(sc->sk_devs[SK_PORT_B]);
1483 kfree(port, M_DEVBUF);
1484 device_set_ivars(sc->sk_devs[SK_PORT_B], NULL);
1486 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1489 if (sc->sk_irq != NULL) {
1490 bus_release_resource(dev, SYS_RES_IRQ, sc->sk_irq_rid,
1493 if (sc->sk_res != NULL) {
1494 bus_release_resource(dev, SYS_RES_MEMORY, sc->sk_res_rid,
1502 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, uint32_t *txidx)
1504 struct sk_chain_data *cd = &sc_if->sk_cdata;
1505 struct sk_ring_data *rd = sc_if->sk_rdata;
1506 struct sk_tx_desc *f = NULL;
1507 uint32_t frag, cur, sk_ctl;
1508 struct sk_dma_ctx ctx;
1509 bus_dma_segment_t segs[SK_NTXSEG];
1513 DPRINTFN(2, ("sk_encap\n"));
1515 cur = frag = *txidx;
1519 sk_dump_mbuf(m_head);
1522 map = cd->sk_tx_dmap[*txidx];
1525 * Start packing the mbufs in this chain into
1526 * the fragment pointers. Stop when we run out
1527 * of fragments or hit the end of the mbuf chain.
1529 ctx.nsegs = SK_NTXSEG;
1531 error = bus_dmamap_load_mbuf(cd->sk_tx_dtag, map, m_head,
1532 sk_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1534 if_printf(&sc_if->arpcom.ac_if, "could not map TX mbuf\n");
1538 if ((SK_TX_RING_CNT - (cd->sk_tx_cnt + ctx.nsegs)) < 2) {
1539 bus_dmamap_unload(cd->sk_tx_dtag, map);
1540 DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1544 DPRINTFN(2, ("sk_encap: nsegs=%d\n", ctx.nsegs));
1546 /* Sync the DMA map. */
1547 bus_dmamap_sync(cd->sk_tx_dtag, map, BUS_DMASYNC_PREWRITE);
1549 for (i = 0; i < ctx.nsegs; i++) {
1550 f = &rd->sk_tx_ring[frag];
1551 f->sk_data_lo = htole32(segs[i].ds_addr);
1552 sk_ctl = segs[i].ds_len | SK_OPCODE_DEFAULT;
1554 sk_ctl |= SK_TXCTL_FIRSTFRAG;
1556 sk_ctl |= SK_TXCTL_OWN;
1557 f->sk_ctl = htole32(sk_ctl);
1559 SK_INC(frag, SK_TX_RING_CNT);
1562 cd->sk_tx_chain[cur].sk_mbuf = m_head;
1563 /* Switch DMA map */
1564 cd->sk_tx_dmap[*txidx] = cd->sk_tx_dmap[cur];
1565 cd->sk_tx_dmap[cur] = map;
1567 rd->sk_tx_ring[cur].sk_ctl |=
1568 htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1569 rd->sk_tx_ring[*txidx].sk_ctl |= htole32(SK_TXCTL_OWN);
1571 /* Sync first descriptor to hand it off */
1572 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1573 BUS_DMASYNC_PREWRITE);
1575 sc_if->sk_cdata.sk_tx_cnt += ctx.nsegs;
1579 struct sk_tx_desc *desc;
1582 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1583 desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1584 sk_dump_txdesc(desc, idx);
1591 DPRINTFN(2, ("sk_encap: completed successfully\n"));
1597 sk_start(struct ifnet *ifp)
1599 struct sk_if_softc *sc_if = ifp->if_softc;
1600 struct sk_softc *sc = sc_if->sk_softc;
1601 struct mbuf *m_head = NULL;
1602 uint32_t idx = sc_if->sk_cdata.sk_tx_prod;
1605 DPRINTFN(2, ("sk_start\n"));
1607 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1608 m_head = ifq_poll(&ifp->if_snd);
1613 * Pack the data into the transmit ring. If we
1614 * don't have room, set the OACTIVE flag and wait
1615 * for the NIC to drain the ring.
1617 if (sk_encap(sc_if, m_head, &idx)) {
1618 ifp->if_flags |= IFF_OACTIVE;
1622 /* now we are committed to transmit the packet */
1623 ifq_dequeue(&ifp->if_snd, m_head);
1626 BPF_MTAP(ifp, m_head);
1632 if (idx != sc_if->sk_cdata.sk_tx_prod) {
1633 sc_if->sk_cdata.sk_tx_prod = idx;
1634 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1636 /* Set a timeout in case the chip goes out to lunch. */
1642 sk_watchdog(struct ifnet *ifp)
1644 struct sk_if_softc *sc_if = ifp->if_softc;
1646 ASSERT_SERIALIZED(ifp->if_serializer);
1648 * Reclaim first as there is a possibility of losing Tx completion
1652 if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1653 if_printf(&sc_if->arpcom.ac_if, "watchdog timeout\n");
1655 ifp->if_flags &= ~IFF_RUNNING;
1661 skc_shutdown(device_t dev)
1663 struct sk_softc *sc = device_get_softc(dev);
1665 DPRINTFN(2, ("sk_shutdown\n"));
1667 lwkt_serialize_enter(&sc->sk_serializer);
1669 /* Turn off the 'driver is loaded' LED. */
1670 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1673 * Reset the GEnesis controller. Doing this should also
1674 * assert the resets on the attached XMAC(s).
1678 lwkt_serialize_exit(&sc->sk_serializer);
1682 sk_rxvalid(struct sk_softc *sc, uint32_t stat, uint32_t len)
1684 if (sc->sk_type == SK_GENESIS) {
1685 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1686 XM_RXSTAT_BYTES(stat) != len)
1689 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1690 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1691 YU_RXSTAT_JABBER)) != 0 ||
1692 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1693 YU_RXSTAT_BYTES(stat) != len)
1701 sk_rxeof(struct sk_if_softc *sc_if)
1703 struct sk_softc *sc = sc_if->sk_softc;
1704 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1705 struct sk_chain_data *cd = &sc_if->sk_cdata;
1706 struct sk_ring_data *rd = sc_if->sk_rdata;
1709 DPRINTFN(2, ("sk_rxeof\n"));
1713 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1714 BUS_DMASYNC_POSTREAD);
1715 bus_dmamap_sync(cd->sk_jpool_dtag, cd->sk_jpool_dmap,
1716 BUS_DMASYNC_POSTREAD);
1720 struct sk_chain *cur_rx;
1721 struct sk_rx_desc *cur_desc;
1722 uint32_t rxstat, sk_ctl;
1723 uint16_t csum1, csum2;
1729 sk_ctl = le32toh(rd->sk_rx_ring[i].sk_ctl);
1730 if ((sk_ctl & SK_RXCTL_OWN) != 0) {
1731 /* Invalidate the descriptor -- it's not ready yet */
1736 cur_rx = &cd->sk_rx_chain[cur];
1737 cur_desc = &rd->sk_rx_ring[cur];
1739 rxstat = le32toh(cur_desc->sk_xmac_rxstat);
1740 m = cur_rx->sk_mbuf;
1741 cur_rx->sk_mbuf = NULL;
1742 total_len = SK_RXBYTES(le32toh(cur_desc->sk_ctl));
1744 csum1 = le16toh(rd->sk_rx_ring[i].sk_csum1);
1745 csum2 = le16toh(rd->sk_rx_ring[i].sk_csum2);
1747 SK_INC(i, SK_RX_RING_CNT);
1749 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1750 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1751 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1752 total_len < SK_MIN_FRAMELEN ||
1753 total_len > SK_JUMBO_FRAMELEN ||
1754 sk_rxvalid(sc, rxstat, total_len) == 0) {
1756 sk_newbuf(sc_if, cur_rx, m, 0);
1761 * Try to allocate a new jumbo buffer. If that
1762 * fails, copy the packet to mbufs and put the
1763 * jumbo buffer back in the ring so it can be
1764 * re-used. If allocating mbufs fails, then we
1765 * have to drop the packet.
1767 if (sk_newbuf(sc_if, cur_rx, NULL, 0) == ENOBUFS) {
1770 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1771 total_len + ETHER_ALIGN, 0, ifp, NULL);
1772 sk_newbuf(sc_if, cur_rx, m, 0);
1777 m_adj(m0, ETHER_ALIGN);
1780 m->m_pkthdr.rcvif = ifp;
1781 m->m_pkthdr.len = m->m_len = total_len;
1785 sk_rxcsum(ifp, m, csum1, csum2);
1790 ifp->if_input(ifp, m);
1794 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1795 BUS_DMASYNC_PREWRITE);
1801 sk_rxcsum(struct ifnet *ifp, struct mbuf *m,
1802 const uint16_t csum1, const uint16_t csum2)
1804 struct ether_header *eh;
1807 int hlen, len, plen;
1808 uint16_t iph_csum, ipo_csum, ipd_csum, csum;
1810 pp = mtod(m, uint8_t *);
1811 plen = m->m_pkthdr.len;
1812 if (plen < sizeof(*eh))
1814 eh = (struct ether_header *)pp;
1815 iph_csum = in_addword(csum1, (~csum2 & 0xffff));
1817 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1818 uint16_t *xp = (uint16_t *)pp;
1820 xp = (uint16_t *)pp;
1821 if (xp[1] != htons(ETHERTYPE_IP))
1823 iph_csum = in_addword(iph_csum, (~xp[0] & 0xffff));
1824 iph_csum = in_addword(iph_csum, (~xp[1] & 0xffff));
1825 xp = (uint16_t *)(pp + sizeof(struct ip));
1826 iph_csum = in_addword(iph_csum, xp[0]);
1827 iph_csum = in_addword(iph_csum, xp[1]);
1829 } else if (eh->ether_type != htons(ETHERTYPE_IP)) {
1834 plen -= sizeof(*eh);
1836 ip = (struct ip *)pp;
1838 if (ip->ip_v != IPVERSION)
1841 hlen = ip->ip_hl << 2;
1842 if (hlen < sizeof(struct ip))
1844 if (hlen > ntohs(ip->ip_len))
1847 /* Don't deal with truncated or padded packets. */
1848 if (plen != ntohs(ip->ip_len))
1851 len = hlen - sizeof(struct ip);
1855 p = (uint16_t *)(ip + 1);
1857 for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1858 ipo_csum = in_addword(ipo_csum, *p);
1859 iph_csum = in_addword(iph_csum, ipo_csum);
1860 ipd_csum = in_addword(csum2, (~ipo_csum & 0xffff));
1865 if (iph_csum != 0xffff)
1867 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
1869 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1870 return; /* ip frag, we're done for now */
1874 /* Only know checksum protocol for udp/tcp */
1875 if (ip->ip_p == IPPROTO_UDP) {
1876 struct udphdr *uh = (struct udphdr *)pp;
1878 if (uh->uh_sum == 0) /* udp with no checksum */
1880 } else if (ip->ip_p != IPPROTO_TCP) {
1884 csum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1885 htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1886 if (csum == 0xffff) {
1887 m->m_pkthdr.csum_data = csum;
1888 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1894 sk_txeof(struct sk_if_softc *sc_if)
1896 struct sk_chain_data *cd = &sc_if->sk_cdata;
1897 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1901 DPRINTFN(2, ("sk_txeof\n"));
1903 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1904 BUS_DMASYNC_POSTREAD);
1907 * Go through our tx ring and free mbufs for those
1908 * frames that have been sent.
1910 idx = cd->sk_tx_cons;
1911 while (idx != cd->sk_tx_prod) {
1912 struct sk_tx_desc *cur_tx;
1915 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1916 sk_ctl = le32toh(cur_tx->sk_ctl);
1919 sk_dump_txdesc(cur_tx, idx);
1921 if (sk_ctl & SK_TXCTL_OWN)
1923 if (sk_ctl & SK_TXCTL_LASTFRAG)
1925 if (cd->sk_tx_chain[idx].sk_mbuf != NULL) {
1926 bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[idx]);
1927 m_freem(cd->sk_tx_chain[idx].sk_mbuf);
1928 cd->sk_tx_chain[idx].sk_mbuf = NULL;
1930 sc_if->sk_cdata.sk_tx_cnt--;
1932 SK_INC(idx, SK_TX_RING_CNT);
1934 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1936 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1937 ifp->if_flags &= ~IFF_OACTIVE;
1939 sc_if->sk_cdata.sk_tx_cons = idx;
1942 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1943 BUS_DMASYNC_PREWRITE);
1948 sk_tick(void *xsc_if)
1950 struct sk_if_softc *sc_if = xsc_if;
1951 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1952 struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
1955 DPRINTFN(2, ("sk_tick\n"));
1957 lwkt_serialize_enter(ifp->if_serializer);
1959 if ((ifp->if_flags & IFF_UP) == 0) {
1960 lwkt_serialize_exit(ifp->if_serializer);
1964 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1965 sk_intr_bcom(sc_if);
1966 lwkt_serialize_exit(ifp->if_serializer);
1971 * According to SysKonnect, the correct way to verify that
1972 * the link has come back up is to poll bit 0 of the GPIO
1973 * register three times. This pin has the signal from the
1974 * link sync pin connected to it; if we read the same link
1975 * state 3 times in a row, we know the link is up.
1977 for (i = 0; i < 3; i++) {
1978 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1983 callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if);
1984 lwkt_serialize_exit(ifp->if_serializer);
1988 /* Turn the GP0 interrupt back on. */
1989 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1990 SK_XM_READ_2(sc_if, XM_ISR);
1992 callout_stop(&sc_if->sk_tick_timer);
1993 lwkt_serialize_exit(ifp->if_serializer);
1997 sk_yukon_tick(void *xsc_if)
1999 struct sk_if_softc *sc_if = xsc_if;
2000 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2001 struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2003 lwkt_serialize_enter(ifp->if_serializer);
2005 callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2006 lwkt_serialize_exit(ifp->if_serializer);
2010 sk_intr_bcom(struct sk_if_softc *sc_if)
2012 struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2013 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2016 DPRINTFN(2, ("sk_intr_bcom\n"));
2018 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2021 * Read the PHY interrupt register to make sure
2022 * we clear any pending interrupts.
2024 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2026 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2027 sk_init_xmac(sc_if);
2031 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2034 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2037 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2039 /* Turn off the link LED. */
2040 SK_IF_WRITE_1(sc_if, 0,
2041 SK_LINKLED1_CTL, SK_LINKLED_OFF);
2043 } else if (status & BRGPHY_ISR_LNK_CHG) {
2044 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2045 BRGPHY_MII_IMR, 0xFF00);
2048 /* Turn on the link LED. */
2049 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2050 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2051 SK_LINKLED_BLINK_OFF);
2054 callout_reset(&sc_if->sk_tick_timer, hz,
2059 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2063 sk_intr_xmac(struct sk_if_softc *sc_if)
2067 status = SK_XM_READ_2(sc_if, XM_ISR);
2068 DPRINTFN(2, ("sk_intr_xmac\n"));
2070 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC &&
2071 (status & (XM_ISR_GP0_SET | XM_ISR_AUTONEG_DONE))) {
2072 if (status & XM_ISR_GP0_SET)
2073 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2075 callout_reset(&sc_if->sk_tick_timer, hz,
2079 if (status & XM_IMR_TX_UNDERRUN)
2080 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2082 if (status & XM_IMR_RX_OVERRUN)
2083 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2087 sk_intr_yukon(struct sk_if_softc *sc_if)
2091 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2093 if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2094 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2095 SK_RFCTL_RX_FIFO_OVER);
2098 if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2099 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2100 SK_TFCTL_TX_FIFO_UNDER);
2103 DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2109 struct sk_softc *sc = xsc;
2110 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
2111 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
2112 struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2115 ASSERT_SERIALIZED(&sc->sk_serializer);
2117 status = CSR_READ_4(sc, SK_ISSR);
2118 if (status == 0 || status == 0xffffffff)
2122 ifp0 = &sc_if0->arpcom.ac_if;
2124 ifp1 = &sc_if1->arpcom.ac_if;
2126 for (; (status &= sc->sk_intrmask) != 0;) {
2127 /* Handle receive interrupts first. */
2128 if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2130 CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2131 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2133 if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2135 CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2136 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2139 /* Then transmit interrupts. */
2140 if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2142 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2143 SK_TXBMU_CLR_IRQ_EOF);
2145 if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2147 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2148 SK_TXBMU_CLR_IRQ_EOF);
2151 /* Then MAC interrupts. */
2152 if (sc_if0 && (status & SK_ISR_MAC1) &&
2153 (ifp0->if_flags & IFF_RUNNING)) {
2154 if (SK_IS_GENESIS(sc))
2155 sk_intr_xmac(sc_if0);
2157 sk_intr_yukon(sc_if0);
2160 if (sc_if1 && (status & SK_ISR_MAC2) &&
2161 (ifp1->if_flags & IFF_RUNNING)) {
2162 if (SK_IS_GENESIS(sc))
2163 sk_intr_xmac(sc_if1);
2165 sk_intr_yukon(sc_if1);
2168 if (status & SK_ISR_EXTERNAL_REG) {
2169 if (sc_if0 != NULL &&
2170 sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2171 sk_intr_bcom(sc_if0);
2173 if (sc_if1 != NULL &&
2174 sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2175 sk_intr_bcom(sc_if1);
2177 status = CSR_READ_4(sc, SK_ISSR);
2180 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2182 if (ifp0 != NULL && !ifq_is_empty(&ifp0->if_snd))
2184 if (ifp1 != NULL && !ifq_is_empty(&ifp1->if_snd))
2189 sk_init_xmac(struct sk_if_softc *sc_if)
2191 struct sk_softc *sc = sc_if->sk_softc;
2192 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2193 static const struct sk_bcom_hack bhack[] = {
2194 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2195 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2196 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2199 DPRINTFN(2, ("sk_init_xmac\n"));
2201 /* Unreset the XMAC. */
2202 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2205 /* Reset the XMAC's internal state. */
2206 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2208 /* Save the XMAC II revision */
2209 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2212 * Perform additional initialization for external PHYs,
2213 * namely for the 1000baseTX cards that use the XMAC's
2216 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2220 /* Take PHY out of reset. */
2221 val = sk_win_read_4(sc, SK_GPIO);
2222 if (sc_if->sk_port == SK_PORT_A)
2223 val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2225 val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2226 sk_win_write_4(sc, SK_GPIO, val);
2228 /* Enable GMII mode on the XMAC. */
2229 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2231 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2232 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2234 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2235 BRGPHY_MII_IMR, 0xFFF0);
2238 * Early versions of the BCM5400 apparently have
2239 * a bug that requires them to have their reserved
2240 * registers initialized to some magic values. I don't
2241 * know what the numbers do, I'm just the messenger.
2243 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2245 while(bhack[i].reg) {
2246 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2247 bhack[i].reg, bhack[i].val);
2253 /* Set station address */
2254 SK_XM_WRITE_2(sc_if, XM_PAR0,
2255 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2256 SK_XM_WRITE_2(sc_if, XM_PAR1,
2257 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2258 SK_XM_WRITE_2(sc_if, XM_PAR2,
2259 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2260 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2262 if (ifp->if_flags & IFF_BROADCAST)
2263 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2265 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2267 /* We don't need the FCS appended to the packet. */
2268 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2270 /* We want short frames padded to 60 bytes. */
2271 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2274 * Enable the reception of all error frames. This is
2275 * a necessary evil due to the design of the XMAC. The
2276 * XMAC's receive FIFO is only 8K in size, however jumbo
2277 * frames can be up to 9000 bytes in length. When bad
2278 * frame filtering is enabled, the XMAC's RX FIFO operates
2279 * in 'store and forward' mode. For this to work, the
2280 * entire frame has to fit into the FIFO, but that means
2281 * that jumbo frames larger than 8192 bytes will be
2282 * truncated. Disabling all bad frame filtering causes
2283 * the RX FIFO to operate in streaming mode, in which
2284 * case the XMAC will start transfering frames out of the
2285 * RX FIFO as soon as the FIFO threshold is reached.
2287 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2288 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2289 XM_MODE_RX_INRANGELEN);
2291 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2294 * Bump up the transmit threshold. This helps hold off transmit
2295 * underruns when we're blasting traffic from both ports at once.
2297 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2299 /* Set promiscuous mode */
2300 sk_setpromisc(sc_if);
2302 /* Set multicast filter */
2305 /* Clear and enable interrupts */
2306 SK_XM_READ_2(sc_if, XM_ISR);
2307 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2308 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2310 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2312 /* Configure MAC arbiter */
2313 switch(sc_if->sk_xmac_rev) {
2314 case XM_XMAC_REV_B2:
2315 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2316 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2317 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2318 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2319 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2320 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2321 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2322 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2323 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2325 case XM_XMAC_REV_C1:
2326 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2327 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2328 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2329 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2330 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2331 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2332 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2333 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2334 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2339 sk_win_write_2(sc, SK_MACARB_CTL,
2340 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2346 sk_init_yukon(struct sk_if_softc *sc_if)
2350 struct sk_softc *sc;
2353 sc = sc_if->sk_softc;
2355 DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2356 CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2358 if (sc->sk_type == SK_YUKON_LITE &&
2359 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2361 * Workaround code for COMA mode, set PHY reset.
2362 * Otherwise it will not correctly take chip out of
2365 v = sk_win_read_4(sc, SK_GPIO);
2366 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2367 sk_win_write_4(sc, SK_GPIO, v);
2370 DPRINTFN(6, ("sk_init_yukon: 1\n"));
2372 /* GMAC and GPHY Reset */
2373 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2374 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2377 DPRINTFN(6, ("sk_init_yukon: 2\n"));
2379 if (sc->sk_type == SK_YUKON_LITE &&
2380 sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2382 * Workaround code for COMA mode, clear PHY reset
2384 v = sk_win_read_4(sc, SK_GPIO);
2387 sk_win_write_4(sc, SK_GPIO, v);
2390 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2391 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2393 if (sc->sk_coppertype)
2394 phy |= SK_GPHY_COPPER;
2396 phy |= SK_GPHY_FIBER;
2398 DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2400 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2402 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2403 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2404 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2406 DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2407 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2409 DPRINTFN(6, ("sk_init_yukon: 3\n"));
2411 /* unused read of the interrupt source register */
2412 DPRINTFN(6, ("sk_init_yukon: 4\n"));
2413 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2415 DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2416 reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2417 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2419 /* MIB Counter Clear Mode set */
2420 reg |= YU_PAR_MIB_CLR;
2421 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2422 DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2423 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2425 /* MIB Counter Clear Mode clear */
2426 DPRINTFN(6, ("sk_init_yukon: 5\n"));
2427 reg &= ~YU_PAR_MIB_CLR;
2428 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2430 /* receive control reg */
2431 DPRINTFN(6, ("sk_init_yukon: 7\n"));
2432 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2434 /* transmit parameter register */
2435 DPRINTFN(6, ("sk_init_yukon: 8\n"));
2436 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2437 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2439 /* serial mode register */
2440 DPRINTFN(6, ("sk_init_yukon: 9\n"));
2441 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2442 YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2443 YU_SMR_IPG_DATA(0x1e));
2445 DPRINTFN(6, ("sk_init_yukon: 10\n"));
2446 /* Setup Yukon's address */
2447 for (i = 0; i < 3; i++) {
2448 /* Write Source Address 1 (unicast filter) */
2449 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2450 sc_if->arpcom.ac_enaddr[i * 2] |
2451 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2454 for (i = 0; i < 3; i++) {
2455 reg = sk_win_read_2(sc_if->sk_softc,
2456 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2457 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2460 /* Set promiscuous mode */
2461 sk_setpromisc(sc_if);
2463 /* Set multicast filter */
2464 DPRINTFN(6, ("sk_init_yukon: 11\n"));
2467 /* enable interrupt mask for counter overflows */
2468 DPRINTFN(6, ("sk_init_yukon: 12\n"));
2469 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2470 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2471 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2473 /* Configure RX MAC FIFO Flush Mask */
2474 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2475 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2477 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2479 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2480 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2481 v = SK_TFCTL_OPERATION_ON;
2483 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2484 /* Configure RX MAC FIFO */
2485 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2486 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2488 /* Increase flush threshould to 64 bytes */
2489 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2490 SK_RFCTL_FIFO_THRESHOLD + 1);
2492 /* Configure TX MAC FIFO */
2493 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2494 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2496 DPRINTFN(6, ("sk_init_yukon: end\n"));
2500 * Note that to properly initialize any part of the GEnesis chip,
2501 * you first have to take it out of reset mode.
2504 sk_init(void *xsc_if)
2506 struct sk_if_softc *sc_if = xsc_if;
2507 struct sk_softc *sc = sc_if->sk_softc;
2508 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2509 struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2511 DPRINTFN(2, ("sk_init\n"));
2513 ASSERT_SERIALIZED(ifp->if_serializer);
2515 if (ifp->if_flags & IFF_RUNNING)
2518 /* Cancel pending I/O and free all RX/TX buffers. */
2521 if (SK_IS_GENESIS(sc)) {
2522 /* Configure LINK_SYNC LED */
2523 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2524 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2525 SK_LINKLED_LINKSYNC_ON);
2527 /* Configure RX LED */
2528 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2529 SK_RXLEDCTL_COUNTER_START);
2531 /* Configure TX LED */
2532 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2533 SK_TXLEDCTL_COUNTER_START);
2537 * Configure descriptor poll timer
2539 * SK-NET GENESIS data sheet says that possibility of losing Start
2540 * transmit command due to CPU/cache related interim storage problems
2541 * under certain conditions. The document recommends a polling
2542 * mechanism to send a Start transmit command to initiate transfer
2543 * of ready descriptors regulary. To cope with this issue sk(4) now
2544 * enables descriptor poll timer to initiate descriptor processing
2545 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2546 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2547 * command instead of waiting for next descriptor polling time.
2548 * The same rule may apply to Rx side too but it seems that is not
2549 * needed at the moment.
2550 * Since sk(4) uses descriptor polling as a last resort there is no
2551 * need to set smaller polling time than maximum allowable one.
2553 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2555 /* Configure I2C registers */
2557 /* Configure XMAC(s) */
2558 switch (sc->sk_type) {
2560 sk_init_xmac(sc_if);
2565 sk_init_yukon(sc_if);
2570 if (SK_IS_GENESIS(sc)) {
2571 /* Configure MAC FIFOs */
2572 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2573 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2574 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2576 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2577 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2578 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2581 /* Configure transmit arbiter(s) */
2582 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2583 SK_TXARCTL_ON | SK_TXARCTL_FSYNC_ON);
2585 /* Configure RAMbuffers */
2586 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2587 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2588 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2589 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2590 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2591 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2593 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2594 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2595 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2596 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2597 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2598 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2599 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2601 /* Configure BMUs */
2602 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2603 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2604 SK_RX_RING_ADDR(sc_if, 0));
2605 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2607 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2608 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2609 SK_TX_RING_ADDR(sc_if, 0));
2610 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2612 /* Init descriptors */
2613 if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2614 if_printf(ifp, "initialization failed: "
2615 "no memory for rx buffers\n");
2620 if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2621 if_printf(ifp, "initialization failed: "
2622 "no memory for tx buffers\n");
2627 /* Configure interrupt handling */
2628 CSR_READ_4(sc, SK_ISSR);
2629 if (sc_if->sk_port == SK_PORT_A)
2630 sc->sk_intrmask |= SK_INTRS1;
2632 sc->sk_intrmask |= SK_INTRS2;
2634 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2636 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2639 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2641 if (SK_IS_GENESIS(sc)) {
2642 /* Enable XMACs TX and RX state machines */
2643 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2644 SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2645 XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2648 if (SK_IS_YUKON(sc)) {
2649 uint16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2650 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2652 /* XXX disable 100Mbps and full duplex mode? */
2653 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
2655 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2658 /* Activate descriptor polling timer */
2659 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2660 /* Start transfer of Tx descriptors */
2661 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2663 ifp->if_flags |= IFF_RUNNING;
2664 ifp->if_flags &= ~IFF_OACTIVE;
2666 if (SK_IS_YUKON(sc))
2667 callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2671 sk_stop(struct sk_if_softc *sc_if)
2673 struct sk_softc *sc = sc_if->sk_softc;
2674 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2675 struct sk_chain_data *cd = &sc_if->sk_cdata;
2679 ASSERT_SERIALIZED(ifp->if_serializer);
2681 DPRINTFN(2, ("sk_stop\n"));
2683 callout_stop(&sc_if->sk_tick_timer);
2685 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2687 /* Stop Tx descriptor polling timer */
2688 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2690 /* Stop transfer of Tx descriptors */
2691 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2692 for (i = 0; i < SK_TIMEOUT; i++) {
2693 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2694 if (!(val & SK_TXBMU_TX_STOP))
2698 if (i == SK_TIMEOUT)
2699 if_printf(ifp, "cannot stop transfer of Tx descriptors\n");
2701 /* Stop transfer of Rx descriptors */
2702 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2703 for (i = 0; i < SK_TIMEOUT; i++) {
2704 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2705 if (!(val & SK_RXBMU_RX_STOP))
2709 if (i == SK_TIMEOUT)
2710 if_printf(ifp, "cannot stop transfer of Rx descriptors\n");
2712 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2713 /* Put PHY back into reset. */
2714 val = sk_win_read_4(sc, SK_GPIO);
2715 if (sc_if->sk_port == SK_PORT_A) {
2716 val |= SK_GPIO_DIR0;
2717 val &= ~SK_GPIO_DAT0;
2719 val |= SK_GPIO_DIR2;
2720 val &= ~SK_GPIO_DAT2;
2722 sk_win_write_4(sc, SK_GPIO, val);
2725 /* Turn off various components of this interface. */
2726 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2727 switch (sc->sk_type) {
2729 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2730 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2735 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2736 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2739 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2740 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET | SK_RBCTL_OFF);
2741 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2742 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST,
2743 SK_RBCTL_RESET | SK_RBCTL_OFF);
2744 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2745 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2746 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2747 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2748 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2750 /* Disable interrupts */
2751 if (sc_if->sk_port == SK_PORT_A)
2752 sc->sk_intrmask &= ~SK_INTRS1;
2754 sc->sk_intrmask &= ~SK_INTRS2;
2755 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2757 SK_XM_READ_2(sc_if, XM_ISR);
2758 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2760 /* Free RX and TX mbufs still in the queues. */
2761 for (i = 0; i < SK_RX_RING_CNT; i++) {
2762 if (cd->sk_rx_chain[i].sk_mbuf != NULL) {
2763 m_freem(cd->sk_rx_chain[i].sk_mbuf);
2764 cd->sk_rx_chain[i].sk_mbuf = NULL;
2767 for (i = 0; i < SK_TX_RING_CNT; i++) {
2768 if (cd->sk_tx_chain[i].sk_mbuf != NULL) {
2769 bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
2770 m_freem(cd->sk_tx_chain[i].sk_mbuf);
2771 cd->sk_tx_chain[i].sk_mbuf = NULL;
2778 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2780 #define DESC_PRINT(X) \
2782 printf("txdesc[%d]." #X "=%#x\n", \
2785 DESC_PRINT(le32toh(desc->sk_ctl));
2786 DESC_PRINT(le32toh(desc->sk_next));
2787 DESC_PRINT(le32toh(desc->sk_data_lo));
2788 DESC_PRINT(le32toh(desc->sk_data_hi));
2789 DESC_PRINT(le32toh(desc->sk_xmac_txstat));
2790 DESC_PRINT(le16toh(desc->sk_rsvd0));
2791 DESC_PRINT(le16toh(desc->sk_csum_startval));
2792 DESC_PRINT(le16toh(desc->sk_csum_startpos));
2793 DESC_PRINT(le16toh(desc->sk_csum_writepos));
2794 DESC_PRINT(le16toh(desc->sk_rsvd1));
2799 sk_dump_bytes(const char *data, int len)
2803 for (i = 0; i < len; i += 16) {
2808 for (j = 0; j < c; j++) {
2809 printf("%02x ", data[i + j] & 0xff);
2810 if ((j & 0xf) == 7 && j > 0)
2818 for (j = 0; j < c; j++) {
2819 int ch = data[i + j] & 0xff;
2820 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2831 sk_dump_mbuf(struct mbuf *m)
2833 int count = m->m_pkthdr.len;
2835 printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
2837 while (count > 0 && m) {
2838 printf("m=%p, m->m_data=%p, m->m_len=%d\n",
2839 m, m->m_data, m->m_len);
2840 sk_dump_bytes(mtod(m, char *), m->m_len);
2849 * Allocate jumbo buffer storage. The SysKonnect adapters support
2850 * "jumbograms" (9K frames), although SysKonnect doesn't currently
2851 * use them in their drivers. In order for us to use them, we need
2852 * large 9K receive buffers, however standard mbuf clusters are only
2853 * 2048 bytes in size. Consequently, we need to allocate and manage
2854 * our own jumbo buffer pool. Fortunately, this does not require an
2855 * excessive amount of additional code.
2858 sk_jpool_alloc(device_t dev)
2860 struct sk_if_softc *sc_if = device_get_softc(dev);
2861 struct sk_chain_data *cd = &sc_if->sk_cdata;
2866 lwkt_serialize_init(&cd->sk_jpool_serializer);
2868 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
2869 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2870 NULL, NULL, SK_JMEM, 1, SK_JMEM,
2871 0, &cd->sk_jpool_dtag);
2873 device_printf(dev, "can't create jpool DMA tag\n");
2877 error = bus_dmamem_alloc(cd->sk_jpool_dtag, &cd->sk_jpool,
2878 BUS_DMA_WAITOK, &cd->sk_jpool_dmap);
2880 device_printf(dev, "can't alloc jpool DMA mem\n");
2881 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2882 cd->sk_jpool_dtag = NULL;
2886 error = bus_dmamap_load(cd->sk_jpool_dtag, cd->sk_jpool_dmap,
2887 cd->sk_jpool, SK_JMEM,
2888 sk_dmamem_addr, &paddr, BUS_DMA_WAITOK);
2890 device_printf(dev, "can't load DMA mem\n");
2891 bus_dmamem_free(cd->sk_jpool_dtag, cd->sk_jpool,
2893 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2894 cd->sk_jpool_dtag = NULL;
2898 SLIST_INIT(&cd->sk_jpool_free_ent);
2902 * Now divide it up into SK_JLEN pieces.
2904 for (i = 0; i < SK_JSLOTS; i++) {
2905 struct sk_jpool_entry *entry = &cd->sk_jpool_ent[i];
2907 entry->sc_if = sc_if;
2911 entry->paddr = paddr;
2913 SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
2922 sk_jpool_free(struct sk_if_softc *sc_if)
2924 struct sk_chain_data *cd = &sc_if->sk_cdata;
2926 if (cd->sk_jpool_dtag != NULL) {
2927 bus_dmamap_unload(cd->sk_jpool_dtag, cd->sk_jpool_dmap);
2928 bus_dmamem_free(cd->sk_jpool_dtag, cd->sk_jpool,
2930 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2931 cd->sk_jpool_dtag = NULL;
2936 sk_dma_alloc(device_t dev)
2938 struct sk_if_softc *sc_if = device_get_softc(dev);
2939 struct sk_chain_data *cd = &sc_if->sk_cdata;
2943 * Allocate the descriptor queues.
2944 * TODO: split into RX/TX rings
2946 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
2947 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2949 sizeof(struct sk_ring_data), 1,
2950 sizeof(struct sk_ring_data), 0,
2951 &sc_if->sk_rdata_dtag);
2953 device_printf(dev, "can't create desc DMA tag\n");
2957 error = bus_dmamem_alloc(sc_if->sk_rdata_dtag,
2958 (void **)&sc_if->sk_rdata,
2959 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2960 &sc_if->sk_rdata_dmap);
2962 device_printf(dev, "can't alloc desc DMA mem\n");
2963 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
2964 sc_if->sk_rdata_dtag = NULL;
2968 error = bus_dmamap_load(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
2969 sc_if->sk_rdata, sizeof(struct sk_ring_data),
2970 sk_dmamem_addr, &sc_if->sk_rdata_paddr,
2973 device_printf(dev, "can't load desc DMA mem\n");
2974 bus_dmamem_free(sc_if->sk_rdata_dtag, sc_if->sk_rdata,
2975 sc_if->sk_rdata_dmap);
2976 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
2977 sc_if->sk_rdata_dtag = NULL;
2981 /* Try to allocate memory for jumbo buffers. */
2982 error = sk_jpool_alloc(dev);
2984 device_printf(dev, "jumbo buffer allocation failed\n");
2988 /* Create DMA tag for TX. */
2989 error = bus_dma_tag_create(NULL, 1, 0,
2990 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2992 SK_JLEN, SK_NTXSEG, SK_JLEN,
2993 0, &cd->sk_tx_dtag);
2995 device_printf(dev, "can't create TX DMA tag\n");
2999 /* Create DMA maps for TX. */
3000 for (i = 0; i < SK_TX_RING_CNT; i++) {
3001 error = bus_dmamap_create(cd->sk_tx_dtag, 0,
3002 &cd->sk_tx_dmap[i]);
3004 device_printf(dev, "can't create %dth TX DMA map\n", i);
3010 for (j = 0; j < i; ++j)
3011 bus_dmamap_destroy(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
3012 bus_dma_tag_destroy(cd->sk_tx_dtag);
3013 cd->sk_tx_dtag = NULL;
3018 sk_dma_free(device_t dev)
3020 struct sk_if_softc *sc_if = device_get_softc(dev);
3021 struct sk_chain_data *cd = &sc_if->sk_cdata;
3023 if (cd->sk_tx_dtag != NULL) {
3026 for (i = 0; i < SK_TX_RING_CNT; ++i) {
3027 if (cd->sk_tx_chain[i].sk_mbuf != NULL) {
3028 bus_dmamap_unload(cd->sk_tx_dtag,
3030 m_freem(cd->sk_tx_chain[i].sk_mbuf);
3031 cd->sk_tx_chain[i].sk_mbuf = NULL;
3033 bus_dmamap_destroy(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
3035 bus_dma_tag_destroy(cd->sk_tx_dtag);
3036 cd->sk_tx_dtag = NULL;
3039 sk_jpool_free(sc_if);
3041 if (sc_if->sk_rdata_dtag != NULL) {
3042 bus_dmamap_unload(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap);
3043 bus_dmamem_free(sc_if->sk_rdata_dtag, sc_if->sk_rdata,
3044 sc_if->sk_rdata_dmap);
3045 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
3046 sc_if->sk_rdata_dtag = NULL;
3051 sk_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
3052 bus_size_t mapsz __unused, int error)
3054 struct sk_dma_ctx *ctx = arg;
3060 KASSERT(nsegs <= ctx->nsegs,
3061 ("too many segments(%d), should be <= %d\n",
3062 nsegs, ctx->nsegs));
3065 for (i = 0; i < nsegs; ++i)
3066 ctx->segs[i] = segs[i];
3070 sk_dmamem_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
3072 KASSERT(nseg == 1, ("too many segments %d", nseg));
3073 *((bus_addr_t *)arg) = seg->ds_addr;