- busdma(9)fy
[dragonfly.git] / sys / dev / netif / sk / if_sk.c
1 /*
2  * Copyright (c) 1997, 1998, 1999, 2000
3  *      Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Bill Paul.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  *
32  * $OpenBSD: if_sk.c,v 1.129 2006/10/16 12:30:08 tom Exp $
33  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
34  * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.49 2006/11/14 12:52:31 sephe Exp $
35  */
36
37 /*
38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39  *
40  * Permission to use, copy, modify, and distribute this software for any
41  * purpose with or without fee is hereby granted, provided that the above
42  * copyright notice and this permission notice appear in all copies.
43  *
44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51  */
52
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  *      The XaQti XMAC II datasheet,
58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *      The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71
72 /*
73  * The SysKonnect gigabit ethernet adapters consist of two main
74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76  * components and a PHY while the GEnesis controller provides a PCI
77  * interface with DMA support. Each card may have between 512K and
78  * 2MB of SRAM on board depending on the configuration.
79  *
80  * The SysKonnect GEnesis controller can have either one or two XMAC
81  * chips connected to it, allowing single or dual port NIC configurations.
82  * SysKonnect has the distinction of being the only vendor on the market
83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85  * XMAC registers. This driver takes advantage of these features to allow
86  * both XMACs to operate as independent interfaces.
87  */
88  
89 #include <sys/param.h>
90 #include <sys/bus.h>
91 #include <sys/endian.h>
92 #include <sys/in_cksum.h>
93 #include <sys/kernel.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/queue.h>
97 #include <sys/rman.h>
98 #include <sys/serialize.h>
99 #include <sys/socket.h>
100 #include <sys/sockio.h>
101
102 #include <net/bpf.h>
103 #include <net/ethernet.h>
104 #include <net/if.h>
105 #include <net/if_arp.h>
106 #include <net/if_dl.h>
107 #include <net/if_media.h>
108 #include <net/ifq_var.h>
109 #include <net/vlan/if_vlan_var.h>
110
111 #include <netinet/ip.h>
112 #include <netinet/udp.h>
113
114 #include <dev/netif/mii_layer/mii.h>
115 #include <dev/netif/mii_layer/miivar.h>
116 #include <dev/netif/mii_layer/brgphyreg.h>
117
118 #include <bus/pci/pcireg.h>
119 #include <bus/pci/pcivar.h>
120 #include <bus/pci/pcidevs.h>
121
122 #include <dev/netif/sk/if_skreg.h>
123 #include <dev/netif/sk/yukonreg.h>
124 #include <dev/netif/sk/xmaciireg.h>
125 #include <dev/netif/sk/if_skvar.h>
126
127 #include "miibus_if.h"
128
129 #if 0
130 #define SK_DEBUG
131 #endif
132
133 #if 0
134 #define SK_RXCSUM
135 #endif
136
137 /* supported device vendors */
138 static const struct skc_type {
139         uint16_t        skc_vid;
140         uint16_t        skc_did;
141         const char      *skc_name;
142 } skc_devs[] = {
143         { PCI_VENDOR_3COM,              PCI_PRODUCT_3COM_3C940,
144           "3Com 3C940" },
145         { PCI_VENDOR_3COM,              PCI_PRODUCT_3COM_3C940B,
146           "3Com 3C940B" },
147
148         { PCI_VENDOR_CNET,              PCI_PRODUCT_CNET_GIGACARD,
149           "CNet GigaCard" },
150
151         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE530T_A1,
152           "D-Link DGE-530T A1" },
153         { PCI_VENDOR_DLINK,             PCI_PRODUCT_DLINK_DGE530T_B1,
154           "D-Link DGE-530T B1" },
155
156         { PCI_VENDOR_LINKSYS,           PCI_PRODUCT_LINKSYS_EG1032,
157           "Linksys EG1032 v2" },
158         { PCI_VENDOR_LINKSYS,           PCI_PRODUCT_LINKSYS_EG1064,
159           "Linksys EG1064" },
160
161         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON,
162           "Marvell Yukon 88E8001/8003/8010" },
163         { PCI_VENDOR_MARVELL,           PCI_PRODUCT_MARVELL_YUKON_BELKIN,
164           "Belkin F5D5005" },
165
166         { PCI_VENDOR_SCHNEIDERKOCH,     PCI_PRODUCT_SCHNEIDERKOCH_SKNET_GE,
167           "SysKonnect SK-NET" },
168         { PCI_VENDOR_SCHNEIDERKOCH,     PCI_PRODUCT_SCHNEIDERKOCH_SK9821v2,
169           "SysKonnect SK9821 v2" },
170
171         { 0, 0, NULL }
172 };
173
174 static int      skc_probe(device_t);
175 static int      skc_attach(device_t);
176 static int      skc_detach(device_t);
177 static void     skc_shutdown(device_t);
178 static int      sk_probe(device_t);
179 static int      sk_attach(device_t);
180 static int      sk_detach(device_t);
181 static void     sk_tick(void *);
182 static void     sk_yukon_tick(void *);
183 static void     sk_intr(void *);
184 static void     sk_intr_bcom(struct sk_if_softc *);
185 static void     sk_intr_xmac(struct sk_if_softc *);
186 static void     sk_intr_yukon(struct sk_if_softc *);
187 static void     sk_rxeof(struct sk_if_softc *);
188 static void     sk_txeof(struct sk_if_softc *);
189 static int      sk_encap(struct sk_if_softc *, struct mbuf *, uint32_t *);
190 static void     sk_start(struct ifnet *);
191 static int      sk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
192 static void     sk_init(void *);
193 static void     sk_init_xmac(struct sk_if_softc *);
194 static void     sk_init_yukon(struct sk_if_softc *);
195 static void     sk_stop(struct sk_if_softc *);
196 static void     sk_watchdog(struct ifnet *);
197 static int      sk_ifmedia_upd(struct ifnet *);
198 static void     sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
199 static void     sk_reset(struct sk_softc *);
200 static int      sk_newbuf(struct sk_if_softc *, struct sk_chain *,
201                           struct mbuf *, int);
202 static int      sk_jpool_alloc(device_t);
203 static void     sk_jpool_free(struct sk_if_softc *);
204 static struct sk_jpool_entry
205                 *sk_jalloc(struct sk_if_softc *);
206 static void     sk_jfree(void *);
207 static void     sk_jref(void *);
208 static int      sk_init_rx_ring(struct sk_if_softc *);
209 static int      sk_init_tx_ring(struct sk_if_softc *);
210
211 static int      sk_miibus_readreg(device_t, int, int);
212 static int      sk_miibus_writereg(device_t, int, int, int);
213 static void     sk_miibus_statchg(device_t);
214
215 static int      sk_xmac_miibus_readreg(struct sk_if_softc *, int, int);
216 static int      sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int);
217 static void     sk_xmac_miibus_statchg(struct sk_if_softc *);
218
219 static int      sk_marv_miibus_readreg(struct sk_if_softc *, int, int);
220 static int      sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int);
221 static void     sk_marv_miibus_statchg(struct sk_if_softc *);
222
223 static void     sk_setfilt(struct sk_if_softc *, caddr_t, int);
224 static void     sk_setmulti(struct sk_if_softc *);
225 static void     sk_setpromisc(struct sk_if_softc *);
226
227 #ifdef SK_RXCSUM
228 static void     sk_rxcsum(struct ifnet *, struct mbuf *, const uint16_t,
229                           const uint16_t);
230 #endif
231 static int      sk_dma_alloc(device_t);
232 static void     sk_dma_free(device_t);
233
234 static void     sk_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t,
235                                 int);
236 static void     sk_dmamem_addr(void *, bus_dma_segment_t *, int, int);
237
238 #ifdef SK_DEBUG
239 #define DPRINTF(x)      if (skdebug) printf x
240 #define DPRINTFN(n,x)   if (skdebug >= (n)) printf x
241 static int      skdebug = 2;
242
243 static void     sk_dump_txdesc(struct sk_tx_desc *, int);
244 static void     sk_dump_mbuf(struct mbuf *);
245 static void     sk_dump_bytes(const char *, int);
246 #else
247 #define DPRINTF(x)
248 #define DPRINTFN(n,x)
249 #endif
250
251 /*
252  * Note that we have newbus methods for both the GEnesis controller
253  * itself and the XMAC(s). The XMACs are children of the GEnesis, and
254  * the miibus code is a child of the XMACs. We need to do it this way
255  * so that the miibus drivers can access the PHY registers on the
256  * right PHY. It's not quite what I had in mind, but it's the only
257  * design that achieves the desired effect.
258  */
259 static device_method_t skc_methods[] = {
260         /* Device interface */
261         DEVMETHOD(device_probe,         skc_probe),
262         DEVMETHOD(device_attach,        skc_attach),
263         DEVMETHOD(device_detach,        skc_detach),
264         DEVMETHOD(device_shutdown,      skc_shutdown),
265
266         /* bus interface */
267         DEVMETHOD(bus_print_child,      bus_generic_print_child),
268         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
269
270         { 0, 0 }
271 };
272
273 static DEFINE_CLASS_0(skc, skc_driver, skc_methods, sizeof(struct sk_softc));
274 static devclass_t skc_devclass;
275
276 static device_method_t sk_methods[] = {
277         /* Device interface */
278         DEVMETHOD(device_probe,         sk_probe),
279         DEVMETHOD(device_attach,        sk_attach),
280         DEVMETHOD(device_detach,        sk_detach),
281         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
282
283         /* bus interface */
284         DEVMETHOD(bus_print_child,      bus_generic_print_child),
285         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
286
287         /* MII interface */
288         DEVMETHOD(miibus_readreg,       sk_miibus_readreg),
289         DEVMETHOD(miibus_writereg,      sk_miibus_writereg),
290         DEVMETHOD(miibus_statchg,       sk_miibus_statchg),
291
292         { 0, 0 }
293 };
294
295 static DEFINE_CLASS_0(sk, sk_driver, sk_methods, sizeof(struct sk_if_softc));
296 static devclass_t sk_devclass;
297
298 DECLARE_DUMMY_MODULE(if_sk);
299 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0);
300 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0);
301 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0);
302
303 static __inline uint32_t
304 sk_win_read_4(struct sk_softc *sc, uint32_t reg)
305 {
306         return CSR_READ_4(sc, reg);
307 }
308
309 static __inline uint16_t
310 sk_win_read_2(struct sk_softc *sc, uint32_t reg)
311 {
312         return CSR_READ_2(sc, reg);
313 }
314
315 static __inline uint8_t
316 sk_win_read_1(struct sk_softc *sc, uint32_t reg)
317 {
318         return CSR_READ_1(sc, reg);
319 }
320
321 static __inline void
322 sk_win_write_4(struct sk_softc *sc, uint32_t reg, uint32_t x)
323 {
324         CSR_WRITE_4(sc, reg, x);
325 }
326
327 static __inline void
328 sk_win_write_2(struct sk_softc *sc, uint32_t reg, uint16_t x)
329 {
330         CSR_WRITE_2(sc, reg, x);
331 }
332
333 static __inline void
334 sk_win_write_1(struct sk_softc *sc, uint32_t reg, uint8_t x)
335 {
336         CSR_WRITE_1(sc, reg, x);
337 }
338
339 static int
340 sk_miibus_readreg(device_t dev, int phy, int reg)
341 {
342         struct sk_if_softc *sc_if = device_get_softc(dev);
343
344         if (SK_IS_GENESIS(sc_if->sk_softc))
345                 return sk_xmac_miibus_readreg(sc_if, phy, reg);
346         else
347                 return sk_marv_miibus_readreg(sc_if, phy, reg);
348 }
349
350 static int
351 sk_miibus_writereg(device_t dev, int phy, int reg, int val)
352 {
353         struct sk_if_softc *sc_if = device_get_softc(dev);
354
355         if (SK_IS_GENESIS(sc_if->sk_softc))
356                 return sk_xmac_miibus_writereg(sc_if, phy, reg, val);
357         else
358                 return sk_marv_miibus_writereg(sc_if, phy, reg, val);
359 }
360
361 static void
362 sk_miibus_statchg(device_t dev)
363 {
364         struct sk_if_softc *sc_if = device_get_softc(dev);
365
366         if (SK_IS_GENESIS(sc_if->sk_softc))
367                 sk_xmac_miibus_statchg(sc_if);
368         else
369                 sk_marv_miibus_statchg(sc_if);
370 }
371
372 static int
373 sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
374 {
375         int i;
376
377         DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
378
379         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
380                 return(0);
381
382         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
383         SK_XM_READ_2(sc_if, XM_PHY_DATA);
384         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
385                 for (i = 0; i < SK_TIMEOUT; i++) {
386                         DELAY(1);
387                         if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
388                             XM_MMUCMD_PHYDATARDY)
389                                 break;
390                 }
391
392                 if (i == SK_TIMEOUT) {
393                         if_printf(&sc_if->arpcom.ac_if,
394                                   "phy failed to come ready\n");
395                         return(0);
396                 }
397         }
398         DELAY(1);
399         return(SK_XM_READ_2(sc_if, XM_PHY_DATA));
400 }
401
402 static int
403 sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
404 {
405         int i;
406
407         DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
408
409         SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
410         for (i = 0; i < SK_TIMEOUT; i++) {
411                 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
412                         break;
413         }
414
415         if (i == SK_TIMEOUT) {
416                 if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
417                 return(ETIMEDOUT);
418         }
419
420         SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
421         for (i = 0; i < SK_TIMEOUT; i++) {
422                 DELAY(1);
423                 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0)
424                         break;
425         }
426
427         if (i == SK_TIMEOUT)
428                 if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
429         return(0);
430 }
431
432 static void
433 sk_xmac_miibus_statchg(struct sk_if_softc *sc_if)
434 {
435         struct mii_data *mii;
436
437         mii = device_get_softc(sc_if->sk_miibus);
438         DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
439
440         /*
441          * If this is a GMII PHY, manually set the XMAC's
442          * duplex mode accordingly.
443          */
444         if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
445                 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
446                         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
447                 else
448                         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
449         }
450 }
451
452 static int
453 sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg)
454 {
455         uint16_t val;
456         int i;
457
458         if (phy != 0 ||
459             (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
460              sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
461                 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
462                              phy, reg));
463                 return(0);
464         }
465
466         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
467                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
468         
469         for (i = 0; i < SK_TIMEOUT; i++) {
470                 DELAY(1);
471                 val = SK_YU_READ_2(sc_if, YUKON_SMICR);
472                 if (val & YU_SMICR_READ_VALID)
473                         break;
474         }
475
476         if (i == SK_TIMEOUT) {
477                 if_printf(&sc_if->arpcom.ac_if, "phy failed to come ready\n");
478                 return(0);
479         }
480         
481         DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
482                      SK_TIMEOUT));
483
484         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
485
486         DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
487                      phy, reg, val));
488
489         return(val);
490 }
491
492 static int
493 sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val)
494 {
495         int i;
496
497         DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
498                      phy, reg, val));
499
500         SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
501         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
502                       YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
503
504         for (i = 0; i < SK_TIMEOUT; i++) {
505                 DELAY(1);
506                 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)
507                         break;
508         }
509
510         if (i == SK_TIMEOUT)
511                 if_printf(&sc_if->arpcom.ac_if, "phy write timed out\n");
512
513         return(0);
514 }
515
516 static void
517 sk_marv_miibus_statchg(struct sk_if_softc *sc_if)
518 {
519         DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
520                      SK_YU_READ_2(sc_if, YUKON_GPCR)));
521 }
522
523 #define HASH_BITS       6
524   
525 static uint32_t
526 sk_xmac_hash(caddr_t addr)
527 {
528         uint32_t crc;
529
530         crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
531         return (~crc & ((1 << HASH_BITS) - 1));
532 }
533
534 static uint32_t
535 sk_yukon_hash(caddr_t addr)
536 {
537         uint32_t crc;
538
539         crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
540         return (crc & ((1 << HASH_BITS) - 1));
541 }
542
543 static void
544 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
545 {
546         int base;
547
548         base = XM_RXFILT_ENTRY(slot);
549
550         SK_XM_WRITE_2(sc_if, base, *(uint16_t *)(&addr[0]));
551         SK_XM_WRITE_2(sc_if, base + 2, *(uint16_t *)(&addr[2]));
552         SK_XM_WRITE_2(sc_if, base + 4, *(uint16_t *)(&addr[4]));
553 }
554
555 static void
556 sk_setmulti(struct sk_if_softc *sc_if)
557 {
558         struct sk_softc *sc = sc_if->sk_softc;
559         struct ifnet *ifp = &sc_if->arpcom.ac_if;
560         uint32_t hashes[2] = { 0, 0 };
561         int h = 0, i;
562         struct ifmultiaddr *ifma;
563         uint8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
564
565         /* First, zot all the existing filters. */
566         switch(sc->sk_type) {
567         case SK_GENESIS:
568                 for (i = 1; i < XM_RXFILT_MAX; i++)
569                         sk_setfilt(sc_if, (caddr_t)&dummy, i);
570
571                 SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
572                 SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
573                 break;
574         case SK_YUKON:
575         case SK_YUKON_LITE:
576         case SK_YUKON_LP:
577                 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
578                 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
579                 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
580                 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
581                 break;
582         }
583
584         /* Now program new ones. */
585         if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
586                 hashes[0] = 0xFFFFFFFF;
587                 hashes[1] = 0xFFFFFFFF;
588         } else {
589                 i = 1;
590                 /* First find the tail of the list. */
591                 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
592                         if (ifma->ifma_link.le_next == NULL)
593                                 break;
594                 }
595                 /* Now traverse the list backwards. */
596                 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs;
597                         ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) {
598                         caddr_t maddr;
599
600                         if (ifma->ifma_addr->sa_family != AF_LINK)
601                                 continue;
602
603                         maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
604
605                         /*
606                          * Program the first XM_RXFILT_MAX multicast groups
607                          * into the perfect filter. For all others,
608                          * use the hash table.
609                          */
610                         if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
611                                 sk_setfilt(sc_if, maddr, i);
612                                 i++;
613                                 continue;
614                         }
615
616                         switch(sc->sk_type) {
617                         case SK_GENESIS:
618                                 h = sk_xmac_hash(maddr);
619                                 break;
620                                 
621                         case SK_YUKON:
622                         case SK_YUKON_LITE:
623                         case SK_YUKON_LP:
624                                 h = sk_yukon_hash(maddr);
625                                 break;
626                         }
627                         if (h < 32)
628                                 hashes[0] |= (1 << h);
629                         else
630                                 hashes[1] |= (1 << (h - 32));
631                 }
632         }
633
634         switch(sc->sk_type) {
635         case SK_GENESIS:
636                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
637                                XM_MODE_RX_USE_PERFECT);
638                 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
639                 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
640                 break;
641         case SK_YUKON:
642         case SK_YUKON_LITE:
643         case SK_YUKON_LP:
644                 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
645                 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
646                 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
647                 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
648                 break;
649         }
650 }
651
652 static void
653 sk_setpromisc(struct sk_if_softc *sc_if)
654 {
655         struct sk_softc *sc = sc_if->sk_softc;
656         struct ifnet *ifp = &sc_if->arpcom.ac_if;
657
658         switch(sc->sk_type) {
659         case SK_GENESIS:
660                 if (ifp->if_flags & IFF_PROMISC)
661                         SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
662                 else
663                         SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
664                 break;
665         case SK_YUKON:
666         case SK_YUKON_LITE:
667         case SK_YUKON_LP:
668                 if (ifp->if_flags & IFF_PROMISC) {
669                         SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
670                             YU_RCR_UFLEN | YU_RCR_MUFLEN);
671                 } else {
672                         SK_YU_SETBIT_2(sc_if, YUKON_RCR,
673                             YU_RCR_UFLEN | YU_RCR_MUFLEN);
674                 }
675                 break;
676         }
677 }
678
679 static int
680 sk_init_rx_ring(struct sk_if_softc *sc_if)
681 {
682         struct sk_chain_data *cd = &sc_if->sk_cdata;
683         struct sk_ring_data *rd = sc_if->sk_rdata;
684         int i, nexti;
685
686         bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
687
688         for (i = 0; i < SK_RX_RING_CNT; i++) {
689                 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
690                 if (i == (SK_RX_RING_CNT - 1))
691                         nexti = 0;
692                 else
693                         nexti = i + 1;
694                 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
695                 rd->sk_rx_ring[i].sk_next =
696                         htole32(SK_RX_RING_ADDR(sc_if, nexti));
697                 rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
698                 rd->sk_rx_ring[i].sk_csum2_start =
699                         htole16(ETHER_HDR_LEN + sizeof(struct ip));
700
701                 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL, 1) == ENOBUFS) {
702                         if_printf(&sc_if->arpcom.ac_if,
703                                   "failed alloc of %dth mbuf\n", i);
704                         return (ENOBUFS);
705                 }
706         }
707
708         cd->sk_rx_prod = 0;
709         cd->sk_rx_cons = 0;
710
711         bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
712                         BUS_DMASYNC_PREWRITE);
713
714         return (0);
715 }
716
717 static int
718 sk_init_tx_ring(struct sk_if_softc *sc_if)
719 {
720         struct sk_chain_data *cd = &sc_if->sk_cdata;
721         struct sk_ring_data *rd = sc_if->sk_rdata;
722         int i, nexti;
723
724         bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
725
726         for (i = 0; i < SK_TX_RING_CNT; i++) {
727                 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
728                 if (i == (SK_TX_RING_CNT - 1))
729                         nexti = 0;
730                 else
731                         nexti = i + 1;
732                 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
733                 rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti));
734         }
735
736         sc_if->sk_cdata.sk_tx_prod = 0;
737         sc_if->sk_cdata.sk_tx_cons = 0;
738         sc_if->sk_cdata.sk_tx_cnt = 0;
739
740         bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
741                         BUS_DMASYNC_PREWRITE);
742
743         return (0);
744 }
745
746 static int
747 sk_newbuf(struct sk_if_softc *sc_if, struct sk_chain *c, struct mbuf *m,
748           int wait)
749 {
750         struct sk_jpool_entry *entry;
751         struct mbuf *m_new = NULL;
752         struct sk_rx_desc *r;
753
754         if (m == NULL) {
755                 MGETHDR(m_new, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA);
756                 if (m_new == NULL)
757                         return (ENOBUFS);
758                 
759                 /* Allocate the jumbo buffer */
760                 entry = sk_jalloc(sc_if);
761                 if (entry == NULL) {
762                         m_freem(m_new);
763                         DPRINTFN(1, ("%s jumbo allocation failed -- packet "
764                             "dropped!\n", sc_if->arpcom.ac_if.if_xname));
765                         return (ENOBUFS);
766                 }
767
768                 m_new->m_ext.ext_arg = entry;
769                 m_new->m_ext.ext_buf = entry->buf;
770                 m_new->m_ext.ext_free = sk_jfree;
771                 m_new->m_ext.ext_ref = sk_jref;
772                 m_new->m_ext.ext_size = SK_JLEN;
773
774                 m_new->m_flags |= M_EXT;
775         } else {
776                 /*
777                  * We're re-using a previously allocated mbuf;
778                  * be sure to re-init pointers and lengths to
779                  * default values.
780                  */
781                 KKASSERT(m->m_flags & M_EXT);
782                 entry = m->m_ext.ext_arg;
783                 m_new = m;
784         }
785         m_new->m_data = m_new->m_ext.ext_buf;
786         m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size;
787
788         /*
789          * Adjust alignment so packet payload begins on a
790          * longword boundary. Mandatory for Alpha, useful on
791          * x86 too.
792          */
793         m_adj(m_new, ETHER_ALIGN);
794
795         c->sk_mbuf = m_new;
796
797         r = c->sk_desc;
798         r->sk_data_lo = htole32(entry->paddr + ETHER_ALIGN);
799         r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT);
800
801         return (0);
802 }
803
804 /*
805  * Allocate a jumbo buffer.
806  */
807 struct sk_jpool_entry *
808 sk_jalloc(struct sk_if_softc *sc_if)
809 {
810         struct sk_chain_data *cd = &sc_if->sk_cdata;
811         struct sk_jpool_entry *entry;
812
813         lwkt_serialize_enter(&cd->sk_jpool_serializer);
814
815         entry = SLIST_FIRST(&cd->sk_jpool_free_ent);
816         if (entry != NULL) {
817                 SLIST_REMOVE_HEAD(&cd->sk_jpool_free_ent, entry_next);
818                 entry->inuse = 1;
819         } else {
820                 if_printf(&sc_if->arpcom.ac_if,
821                           "no free jumbo buffer\n");
822         }
823
824         lwkt_serialize_exit(&cd->sk_jpool_serializer);
825         return entry;
826 }
827
828 /*
829  * Release a jumbo buffer.
830  */
831 void
832 sk_jfree(void *arg)
833 {
834         struct sk_jpool_entry *entry = arg;
835         struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
836
837         if (&cd->sk_jpool_ent[entry->slot] != entry)
838                 panic("%s: free wrong jumbo buffer\n", __func__);
839         else if (entry->inuse == 0)
840                 panic("%s: jumbo buffer already freed\n", __func__);
841
842         lwkt_serialize_enter(&cd->sk_jpool_serializer);
843
844         atomic_subtract_int(&entry->inuse, 1);
845         if (entry->inuse == 0)
846                 SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
847
848         lwkt_serialize_exit(&cd->sk_jpool_serializer);
849 }
850
851 static void
852 sk_jref(void *arg)
853 {
854         struct sk_jpool_entry *entry = arg;
855         struct sk_chain_data *cd = &entry->sc_if->sk_cdata;
856
857         if (&cd->sk_jpool_ent[entry->slot] != entry)
858                 panic("%s: free wrong jumbo buffer\n", __func__);
859         else if (entry->inuse == 0)
860                 panic("%s: jumbo buffer already freed\n", __func__);
861
862         atomic_add_int(&entry->inuse, 1);
863 }
864
865 /*
866  * Set media options.
867  */
868 static int
869 sk_ifmedia_upd(struct ifnet *ifp)
870 {
871         struct sk_if_softc *sc_if = ifp->if_softc;
872         struct mii_data *mii;
873
874         mii = device_get_softc(sc_if->sk_miibus);
875         sk_init(sc_if);
876         mii_mediachg(mii);
877
878         return(0);
879 }
880
881 /*
882  * Report current media status.
883  */
884 static void
885 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
886 {
887         struct sk_if_softc *sc_if;
888         struct mii_data *mii;
889
890         sc_if = ifp->if_softc;
891         mii = device_get_softc(sc_if->sk_miibus);
892
893         mii_pollstat(mii);
894         ifmr->ifm_active = mii->mii_media_active;
895         ifmr->ifm_status = mii->mii_media_status;
896 }
897
898 static int
899 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
900 {
901         struct sk_if_softc *sc_if = ifp->if_softc;
902         struct ifreq *ifr = (struct ifreq *)data;
903         struct mii_data *mii;
904         int error = 0;
905
906         ASSERT_SERIALIZED(ifp->if_serializer);
907
908         switch(command) {
909         case SIOCSIFMTU:
910                 if (ifr->ifr_mtu > SK_JUMBO_MTU)
911                         error = EINVAL;
912                 else {
913                         ifp->if_mtu = ifr->ifr_mtu;
914                         ifp->if_flags &= ~IFF_RUNNING;
915                         sk_init(sc_if);
916                 }
917                 break;
918         case SIOCSIFFLAGS:
919                 if (ifp->if_flags & IFF_UP) {
920                         if (ifp->if_flags & IFF_RUNNING) {
921                                 if ((ifp->if_flags ^ sc_if->sk_if_flags)
922                                     & IFF_PROMISC) {
923                                         sk_setpromisc(sc_if);
924                                         sk_setmulti(sc_if);
925                                 }
926                         } else
927                                 sk_init(sc_if);
928                 } else {
929                         if (ifp->if_flags & IFF_RUNNING)
930                                 sk_stop(sc_if);
931                 }
932                 sc_if->sk_if_flags = ifp->if_flags;
933                 break;
934         case SIOCADDMULTI:
935         case SIOCDELMULTI:
936                 sk_setmulti(sc_if);
937                 break;
938         case SIOCGIFMEDIA:
939         case SIOCSIFMEDIA:
940                 mii = device_get_softc(sc_if->sk_miibus);
941                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
942                 break;
943         default:
944                 error = ether_ioctl(ifp, command, data);
945                 break;
946         }
947
948         return(error);
949 }
950
951 /*
952  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
953  * IDs against our list and return a device name if we find a match.
954  */
955 static int
956 skc_probe(device_t dev)
957 {
958         const struct skc_type *t;
959         uint16_t vid, did;
960
961         vid = pci_get_vendor(dev);
962         did = pci_get_device(dev);
963
964         /*
965          * Only attach to rev.2 of the Linksys EG1032 adapter.
966          * Rev.3 is supported by re(4).
967          */
968         if (vid == PCI_VENDOR_LINKSYS &&
969             did == PCI_PRODUCT_LINKSYS_EG1032 &&
970             pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2)
971                 return ENXIO;
972
973         for (t = skc_devs; t->skc_name != NULL; t++) {
974                 if (vid == t->skc_vid && did == t->skc_did) {
975                         device_set_desc(dev, t->skc_name);
976                         return 0;
977                 }
978         }
979         return ENXIO;
980 }
981
982 /*
983  * Force the GEnesis into reset, then bring it out of reset.
984  */
985 static void
986 sk_reset(struct sk_softc *sc)
987 {
988         uint32_t imtimer_ticks;
989
990         DPRINTFN(2, ("sk_reset\n"));
991
992         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
993         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
994         if (SK_IS_YUKON(sc))
995                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
996
997         DELAY(1000);
998         CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
999         DELAY(2);
1000         CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
1001         if (SK_IS_YUKON(sc))
1002                 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
1003
1004         DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
1005         DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
1006                      CSR_READ_2(sc, SK_LINK_CTRL)));
1007
1008         if (SK_IS_GENESIS(sc)) {
1009                 /* Configure packet arbiter */
1010                 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
1011                 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
1012                 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
1013                 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
1014                 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
1015         }
1016
1017         /* Enable RAM interface */
1018         sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
1019
1020         /*
1021          * Configure interrupt moderation. The moderation timer
1022          * defers interrupts specified in the interrupt moderation
1023          * timer mask based on the timeout specified in the interrupt
1024          * moderation timer init register. Each bit in the timer
1025          * register represents one tick, so to specify a timeout in
1026          * microseconds, we have to multiply by the correct number of
1027          * ticks-per-microsecond.
1028          */
1029         switch (sc->sk_type) {
1030         case SK_GENESIS:
1031                 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1032                 break;
1033         default:
1034                 imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1035         }
1036         sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
1037         sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1038             SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1039         sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1040 }
1041
1042 static int
1043 sk_probe(device_t dev)
1044 {
1045         struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1046         const char *revstr = "", *name = NULL;
1047         char devname[80];
1048
1049         switch (sc->sk_type) {
1050         case SK_GENESIS:
1051                 name = "SysKonnect GEnesis";
1052                 break;
1053         case SK_YUKON:
1054                 name = "Marvell Yukon";
1055                 break;
1056         case SK_YUKON_LITE:
1057                 name = "Marvell Yukon Lite";
1058                 switch (sc->sk_rev) {
1059                 case SK_YUKON_LITE_REV_A0:
1060                         revstr = " rev.A0";
1061                         break;
1062                 case SK_YUKON_LITE_REV_A1:
1063                         revstr = " rev.A1";
1064                         break;
1065                 case SK_YUKON_LITE_REV_A3:
1066                         revstr = " rev.A3";
1067                         break;
1068                 }
1069                 break;
1070         case SK_YUKON_LP:
1071                 name = "Marvell Yukon LP";
1072                 break;
1073         default:
1074                 return ENXIO;
1075         }
1076
1077         snprintf(devname, sizeof(devname), "%s%s (0x%x)",
1078                  name, revstr, sc->sk_rev);
1079         device_set_desc_copy(dev, devname);
1080         return 0;
1081 }
1082
1083 /*
1084  * Each XMAC chip is attached as a separate logical IP interface.
1085  * Single port cards will have only one logical interface of course.
1086  */
1087 static int
1088 sk_attach(device_t dev)
1089 {
1090         struct sk_softc *sc = device_get_softc(device_get_parent(dev));
1091         struct sk_if_softc *sc_if = device_get_softc(dev);
1092         struct ifnet *ifp = &sc_if->arpcom.ac_if;
1093         int i, error;
1094
1095         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1096
1097         sc_if->sk_port = *(int *)device_get_ivars(dev);
1098         KKASSERT(sc_if->sk_port == SK_PORT_A || sc_if->sk_port == SK_PORT_B);
1099
1100         sc_if->sk_softc = sc;
1101         sc->sk_if[sc_if->sk_port] = sc_if;
1102
1103         kfree(device_get_ivars(dev), M_DEVBUF);
1104         device_set_ivars(dev, NULL);
1105
1106         if (sc_if->sk_port == SK_PORT_A)
1107                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1108         if (sc_if->sk_port == SK_PORT_B)
1109                 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1110
1111         DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1112
1113         /*
1114          * Get station address for this interface. Note that
1115          * dual port cards actually come with three station
1116          * addresses: one for each port, plus an extra. The
1117          * extra one is used by the SysKonnect driver software
1118          * as a 'virtual' station address for when both ports
1119          * are operating in failover mode. Currently we don't
1120          * use this extra address.
1121          */
1122         for (i = 0; i < ETHER_ADDR_LEN; i++) {
1123                 /* XXX */
1124                 sc_if->arpcom.ac_enaddr[i] =
1125                     sk_win_read_1(sc, SK_MAC0_0 + (sc_if->sk_port * 8) + i);
1126         }
1127
1128         /*
1129          * Set up RAM buffer addresses. The NIC will have a certain
1130          * amount of SRAM on it, somewhere between 512K and 2MB. We
1131          * need to divide this up a) between the transmitter and
1132          * receiver and b) between the two XMACs, if this is a
1133          * dual port NIC. Our algorithm is to divide up the memory
1134          * evenly so that everyone gets a fair share.
1135          */
1136         if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1137                 uint32_t chunk, val;
1138
1139                 chunk = sc->sk_ramsize / 2;
1140                 val = sc->sk_rboff / sizeof(uint64_t);
1141                 sc_if->sk_rx_ramstart = val;
1142                 val += (chunk / sizeof(uint64_t));
1143                 sc_if->sk_rx_ramend = val - 1;
1144                 sc_if->sk_tx_ramstart = val;
1145                 val += (chunk / sizeof(uint64_t));
1146                 sc_if->sk_tx_ramend = val - 1;
1147         } else {
1148                 uint32_t chunk, val;
1149
1150                 chunk = sc->sk_ramsize / 4;
1151                 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1152                     sizeof(uint64_t);
1153                 sc_if->sk_rx_ramstart = val;
1154                 val += (chunk / sizeof(uint64_t));
1155                 sc_if->sk_rx_ramend = val - 1;
1156                 sc_if->sk_tx_ramstart = val;
1157                 val += (chunk / sizeof(uint64_t));
1158                 sc_if->sk_tx_ramend = val - 1;
1159         }
1160
1161         DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1162                      "           tx_ramstart=%#x tx_ramend=%#x\n",
1163                      sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1164                      sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1165
1166         /* Read and save PHY type */
1167         sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1168
1169         /* Set PHY address */
1170         if (SK_IS_GENESIS(sc)) {
1171                 switch (sc_if->sk_phytype) {
1172                 case SK_PHYTYPE_XMAC:
1173                         sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1174                         break;
1175                 case SK_PHYTYPE_BCOM:
1176                         sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1177                         break;
1178                 default:
1179                         device_printf(dev, "unsupported PHY type: %d\n",
1180                             sc_if->sk_phytype);
1181                         error = ENXIO;
1182                         goto fail;
1183                 }
1184         }
1185
1186         if (SK_IS_YUKON(sc)) {
1187                 if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1188                     sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1189                         /* not initialized, punt */
1190                         sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1191                         sc->sk_coppertype = 1;
1192                 }
1193
1194                 sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1195
1196                 if (!(sc->sk_coppertype))
1197                         sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1198         }
1199
1200         error = sk_dma_alloc(dev);
1201         if (error)
1202                 goto fail;
1203
1204         ifp->if_softc = sc_if;
1205         ifp->if_mtu = ETHERMTU;
1206         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1207         ifp->if_ioctl = sk_ioctl;
1208         ifp->if_start = sk_start;
1209         ifp->if_watchdog = sk_watchdog;
1210         ifp->if_init = sk_init;
1211         ifp->if_baudrate = 1000000000;
1212         ifq_set_maxlen(&ifp->if_snd, SK_TX_RING_CNT - 1);
1213         ifq_set_ready(&ifp->if_snd);
1214
1215         ifp->if_capabilities = IFCAP_VLAN_MTU;
1216
1217         /*
1218          * Do miibus setup.
1219          */
1220         switch (sc->sk_type) {
1221         case SK_GENESIS:
1222                 sk_init_xmac(sc_if);
1223                 break;
1224         case SK_YUKON:
1225         case SK_YUKON_LITE:
1226         case SK_YUKON_LP:
1227                 sk_init_yukon(sc_if);
1228                 break;
1229         default:
1230                 device_printf(dev, "unknown device type %d\n", sc->sk_type);
1231                 error = ENXIO;
1232                 goto fail;
1233         }
1234
1235         DPRINTFN(2, ("sk_attach: 1\n"));
1236
1237         error = mii_phy_probe(dev, &sc_if->sk_miibus,
1238                               sk_ifmedia_upd, sk_ifmedia_sts);
1239         if (error) {
1240                 device_printf(dev, "no PHY found!\n");
1241                 goto fail;
1242         }
1243
1244         callout_init(&sc_if->sk_tick_timer);
1245
1246         /*
1247          * Call MI attach routines.
1248          */
1249         ether_ifattach(ifp, sc_if->arpcom.ac_enaddr, &sc->sk_serializer);
1250
1251         DPRINTFN(2, ("sk_attach: end\n"));
1252         return 0;
1253 fail:
1254         sk_detach(dev);
1255         sc->sk_if[sc_if->sk_port] = NULL;
1256         return error;
1257 }
1258
1259 /*
1260  * Attach the interface. Allocate softc structures, do ifmedia
1261  * setup and ethernet/BPF attach.
1262  */
1263 static int
1264 skc_attach(device_t dev)
1265 {
1266         struct sk_softc *sc = device_get_softc(dev);
1267         uint8_t skrs;
1268         int *port;
1269         int error;
1270
1271         DPRINTFN(2, ("begin skc_attach\n"));
1272
1273         lwkt_serialize_init(&sc->sk_serializer);
1274
1275 #ifndef BURN_BRIDGES
1276         /*
1277          * Handle power management nonsense.
1278          */
1279         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1280                 uint32_t iobase, membase, irq;
1281
1282                 /* Save important PCI config data. */
1283                 iobase = pci_read_config(dev, SK_PCI_LOIO, 4);
1284                 membase = pci_read_config(dev, SK_PCI_LOMEM, 4);
1285                 irq = pci_read_config(dev, SK_PCI_INTLINE, 4);
1286
1287                 /* Reset the power state. */
1288                 device_printf(dev, "chip is in D%d power mode "
1289                               "-- setting to D0\n", pci_get_powerstate(dev));
1290
1291                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1292
1293                 /* Restore PCI config data. */
1294                 pci_write_config(dev, SK_PCI_LOIO, iobase, 4);
1295                 pci_write_config(dev, SK_PCI_LOMEM, membase, 4);
1296                 pci_write_config(dev, SK_PCI_INTLINE, irq, 4);
1297         }
1298 #endif  /* BURN_BRIDGES */
1299
1300         /*
1301          * Map control/status registers.
1302          */
1303         pci_enable_busmaster(dev);
1304
1305         sc->sk_res_rid = SK_PCI_LOMEM;
1306         sc->sk_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
1307                                             &sc->sk_res_rid, RF_ACTIVE);
1308         if (sc->sk_res == NULL) {
1309                 device_printf(dev, "couldn't map memory\n");
1310                 error = ENXIO;
1311                 goto fail;
1312         }
1313         sc->sk_btag = rman_get_bustag(sc->sk_res);
1314         sc->sk_bhandle = rman_get_bushandle(sc->sk_res);
1315
1316         sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1317         sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1318
1319         /* Bail out here if chip is not recognized */
1320         if (!SK_IS_GENESIS(sc) && !SK_IS_YUKON(sc)) {
1321                 device_printf(dev, "unknown chip type: %d\n", sc->sk_type);
1322                 error = ENXIO;
1323                 goto fail;
1324         }
1325
1326         DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1327
1328         /* Allocate interrupt */
1329         sc->sk_irq_rid = 0;
1330         sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sk_irq_rid,
1331                                             RF_SHAREABLE | RF_ACTIVE);
1332         if (sc->sk_irq == NULL) {
1333                 device_printf(dev, "couldn't map interrupt\n");
1334                 error = ENXIO;
1335                 goto fail;
1336         }
1337
1338         /* Reset the adapter. */
1339         sk_reset(sc);
1340
1341         skrs = sk_win_read_1(sc, SK_EPROM0);
1342         if (SK_IS_GENESIS(sc)) {
1343                 /* Read and save RAM size and RAMbuffer offset */
1344                 switch(skrs) {
1345                 case SK_RAMSIZE_512K_64:
1346                         sc->sk_ramsize = 0x80000;
1347                         sc->sk_rboff = SK_RBOFF_0;
1348                         break;
1349                 case SK_RAMSIZE_1024K_64:
1350                         sc->sk_ramsize = 0x100000;
1351                         sc->sk_rboff = SK_RBOFF_80000;
1352                         break;
1353                 case SK_RAMSIZE_1024K_128:
1354                         sc->sk_ramsize = 0x100000;
1355                         sc->sk_rboff = SK_RBOFF_0;
1356                         break;
1357                 case SK_RAMSIZE_2048K_128:
1358                         sc->sk_ramsize = 0x200000;
1359                         sc->sk_rboff = SK_RBOFF_0;
1360                         break;
1361                 default:
1362                         device_printf(dev, "unknown ram size: %d\n", skrs);
1363                         error = ENXIO;
1364                         goto fail;
1365                 }
1366         } else {
1367                 if (skrs == 0x00)
1368                         sc->sk_ramsize = 0x20000;
1369                 else
1370                         sc->sk_ramsize = skrs * (1<<12);
1371                 sc->sk_rboff = SK_RBOFF_0;
1372         }
1373
1374         DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1375                      sc->sk_ramsize, sc->sk_ramsize / 1024,
1376                      sc->sk_rboff));
1377
1378         /* Read and save physical media type */
1379         sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1380
1381         if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1382                 sc->sk_coppertype = 1;
1383         else
1384                 sc->sk_coppertype = 0;
1385
1386         /* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1387         if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1388                 uint32_t flashaddr;
1389                 uint8_t testbyte;
1390
1391                 flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1392
1393                 /* Test Flash-Address Register */
1394                 sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1395                 testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1396
1397                 if (testbyte != 0) {
1398                         /* This is a Yukon Lite Rev A0 */
1399                         sc->sk_type = SK_YUKON_LITE;
1400                         sc->sk_rev = SK_YUKON_LITE_REV_A0;
1401                         /* Restore Flash-Address Register */
1402                         sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1403                 }
1404         }
1405
1406         sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
1407         port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1408         *port = SK_PORT_A;
1409         device_set_ivars(sc->sk_devs[SK_PORT_A], port);
1410
1411         if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
1412                 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
1413                 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1414                 *port = SK_PORT_B;
1415                 device_set_ivars(sc->sk_devs[SK_PORT_B], port);
1416         }
1417
1418         /* Turn on the 'driver is loaded' LED. */
1419         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1420
1421         bus_generic_attach(dev);
1422
1423         error = bus_setup_intr(dev, sc->sk_irq, INTR_NETSAFE, sk_intr, sc,
1424                                &sc->sk_intrhand, &sc->sk_serializer);
1425         if (error) {
1426                 device_printf(dev, "couldn't set up irq\n");
1427                 goto fail;
1428         }
1429         return 0;
1430 fail:
1431         skc_detach(dev);
1432         return error;
1433 }
1434
1435 static int
1436 sk_detach(device_t dev)
1437 {
1438         struct sk_if_softc *sc_if = device_get_softc(dev);
1439         struct ifnet *ifp = &sc_if->arpcom.ac_if;
1440
1441         if (device_is_attached(dev))
1442                 ether_ifdetach(ifp);
1443
1444         bus_generic_detach(dev);
1445         if (sc_if->sk_miibus != NULL)
1446                 device_delete_child(dev, sc_if->sk_miibus);
1447
1448         sk_dma_free(dev);
1449         return 0;
1450 }
1451
1452 static int
1453 skc_detach(device_t dev)
1454 {
1455         struct sk_softc *sc = device_get_softc(dev);
1456         int *port;
1457
1458         if (device_is_attached(dev)) {
1459                 lwkt_serialize_enter(&sc->sk_serializer);
1460
1461                 if (sc->sk_if[SK_PORT_A] != NULL)
1462                         sk_stop(sc->sk_if[SK_PORT_A]);
1463                 if (sc->sk_if[SK_PORT_B] != NULL)
1464                         sk_stop(sc->sk_if[SK_PORT_B]);
1465
1466                 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand);
1467
1468                 lwkt_serialize_exit(&sc->sk_serializer);
1469         }
1470
1471         bus_generic_detach(dev);
1472         if (sc->sk_devs[SK_PORT_A] != NULL) {
1473                 port = device_get_ivars(sc->sk_devs[SK_PORT_A]);
1474                 if (port != NULL) {
1475                         kfree(port, M_DEVBUF);
1476                         device_set_ivars(sc->sk_devs[SK_PORT_A], NULL);
1477                 }
1478                 device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
1479         }
1480         if (sc->sk_devs[SK_PORT_B] != NULL) {
1481                 port = device_get_ivars(sc->sk_devs[SK_PORT_B]);
1482                 if (port != NULL) {
1483                         kfree(port, M_DEVBUF);
1484                         device_set_ivars(sc->sk_devs[SK_PORT_B], NULL);
1485                 }
1486                 device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
1487         }
1488
1489         if (sc->sk_irq != NULL) {
1490                 bus_release_resource(dev, SYS_RES_IRQ, sc->sk_irq_rid,
1491                                      sc->sk_irq);
1492         }
1493         if (sc->sk_res != NULL) {
1494                 bus_release_resource(dev, SYS_RES_MEMORY, sc->sk_res_rid,
1495                                      sc->sk_res);
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int
1502 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, uint32_t *txidx)
1503 {
1504         struct sk_chain_data *cd = &sc_if->sk_cdata;
1505         struct sk_ring_data *rd = sc_if->sk_rdata;
1506         struct sk_tx_desc *f = NULL;
1507         uint32_t frag, cur, sk_ctl;
1508         struct sk_dma_ctx ctx;
1509         bus_dma_segment_t segs[SK_NTXSEG];
1510         bus_dmamap_t map;
1511         int i, error;
1512
1513         DPRINTFN(2, ("sk_encap\n"));
1514
1515         cur = frag = *txidx;
1516
1517 #ifdef SK_DEBUG
1518         if (skdebug >= 2)
1519                 sk_dump_mbuf(m_head);
1520 #endif
1521
1522         map = cd->sk_tx_dmap[*txidx];
1523
1524         /*
1525          * Start packing the mbufs in this chain into
1526          * the fragment pointers. Stop when we run out
1527          * of fragments or hit the end of the mbuf chain.
1528          */
1529         ctx.nsegs = SK_NTXSEG;
1530         ctx.segs = segs;
1531         error = bus_dmamap_load_mbuf(cd->sk_tx_dtag, map, m_head,
1532                                      sk_buf_dma_addr, &ctx, BUS_DMA_NOWAIT);
1533         if (error) {
1534                 if_printf(&sc_if->arpcom.ac_if, "could not map TX mbuf\n");
1535                 return ENOBUFS;
1536         }
1537
1538         if ((SK_TX_RING_CNT - (cd->sk_tx_cnt + ctx.nsegs)) < 2) {
1539                 bus_dmamap_unload(cd->sk_tx_dtag, map);
1540                 DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1541                 return ENOBUFS;
1542         }
1543
1544         DPRINTFN(2, ("sk_encap: nsegs=%d\n", ctx.nsegs));
1545
1546         /* Sync the DMA map. */
1547         bus_dmamap_sync(cd->sk_tx_dtag, map, BUS_DMASYNC_PREWRITE);
1548
1549         for (i = 0; i < ctx.nsegs; i++) {
1550                 f = &rd->sk_tx_ring[frag];
1551                 f->sk_data_lo = htole32(segs[i].ds_addr);
1552                 sk_ctl = segs[i].ds_len | SK_OPCODE_DEFAULT;
1553                 if (i == 0)
1554                         sk_ctl |= SK_TXCTL_FIRSTFRAG;
1555                 else
1556                         sk_ctl |= SK_TXCTL_OWN;
1557                 f->sk_ctl = htole32(sk_ctl);
1558                 cur = frag;
1559                 SK_INC(frag, SK_TX_RING_CNT);
1560         }
1561
1562         cd->sk_tx_chain[cur].sk_mbuf = m_head;
1563         /* Switch DMA map */
1564         cd->sk_tx_dmap[*txidx] = cd->sk_tx_dmap[cur];
1565         cd->sk_tx_dmap[cur] = map;
1566
1567         rd->sk_tx_ring[cur].sk_ctl |=
1568                 htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1569         rd->sk_tx_ring[*txidx].sk_ctl |= htole32(SK_TXCTL_OWN);
1570
1571         /* Sync first descriptor to hand it off */
1572         bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1573                         BUS_DMASYNC_PREWRITE);
1574
1575         sc_if->sk_cdata.sk_tx_cnt += ctx.nsegs;
1576
1577 #ifdef SK_DEBUG
1578         if (skdebug >= 2) {
1579                 struct sk_tx_desc *desc;
1580                 uint32_t idx;
1581
1582                 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1583                         desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1584                         sk_dump_txdesc(desc, idx);
1585                 }
1586         }
1587 #endif
1588
1589         *txidx = frag;
1590
1591         DPRINTFN(2, ("sk_encap: completed successfully\n"));
1592
1593         return (0);
1594 }
1595
1596 static void
1597 sk_start(struct ifnet *ifp)
1598 {
1599         struct sk_if_softc *sc_if = ifp->if_softc;
1600         struct sk_softc *sc = sc_if->sk_softc;
1601         struct mbuf *m_head = NULL;
1602         uint32_t idx = sc_if->sk_cdata.sk_tx_prod;
1603         int pkts = 0;
1604
1605         DPRINTFN(2, ("sk_start\n"));
1606
1607         while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1608                 m_head = ifq_poll(&ifp->if_snd);
1609                 if (m_head == NULL)
1610                         break;
1611
1612                 /*
1613                  * Pack the data into the transmit ring. If we
1614                  * don't have room, set the OACTIVE flag and wait
1615                  * for the NIC to drain the ring.
1616                  */
1617                 if (sk_encap(sc_if, m_head, &idx)) {
1618                         ifp->if_flags |= IFF_OACTIVE;
1619                         break;
1620                 }
1621
1622                 /* now we are committed to transmit the packet */
1623                 ifq_dequeue(&ifp->if_snd, m_head);
1624                 pkts++;
1625
1626                 BPF_MTAP(ifp, m_head);
1627         }
1628         if (pkts == 0)
1629                 return;
1630
1631         /* Transmit */
1632         if (idx != sc_if->sk_cdata.sk_tx_prod) {
1633                 sc_if->sk_cdata.sk_tx_prod = idx;
1634                 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1635
1636                 /* Set a timeout in case the chip goes out to lunch. */
1637                 ifp->if_timer = 5;
1638         }
1639 }
1640
1641 static void
1642 sk_watchdog(struct ifnet *ifp)
1643 {
1644         struct sk_if_softc *sc_if = ifp->if_softc;
1645
1646         ASSERT_SERIALIZED(ifp->if_serializer);
1647         /*
1648          * Reclaim first as there is a possibility of losing Tx completion
1649          * interrupts.
1650          */
1651         sk_txeof(sc_if);
1652         if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1653                 if_printf(&sc_if->arpcom.ac_if, "watchdog timeout\n");
1654                 ifp->if_oerrors++;
1655                 ifp->if_flags &= ~IFF_RUNNING;
1656                 sk_init(sc_if);
1657         }
1658 }
1659
1660 static void
1661 skc_shutdown(device_t dev)
1662 {
1663         struct sk_softc *sc = device_get_softc(dev);
1664
1665         DPRINTFN(2, ("sk_shutdown\n"));
1666
1667         lwkt_serialize_enter(&sc->sk_serializer);
1668
1669         /* Turn off the 'driver is loaded' LED. */
1670         CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1671
1672         /*
1673          * Reset the GEnesis controller. Doing this should also
1674          * assert the resets on the attached XMAC(s).
1675          */
1676         sk_reset(sc);
1677
1678         lwkt_serialize_exit(&sc->sk_serializer);
1679 }
1680
1681 static __inline int
1682 sk_rxvalid(struct sk_softc *sc, uint32_t stat, uint32_t len)
1683 {
1684         if (sc->sk_type == SK_GENESIS) {
1685                 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1686                     XM_RXSTAT_BYTES(stat) != len)
1687                         return (0);
1688         } else {
1689                 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1690                     YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1691                     YU_RXSTAT_JABBER)) != 0 ||
1692                     (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1693                     YU_RXSTAT_BYTES(stat) != len)
1694                         return (0);
1695         }
1696
1697         return (1);
1698 }
1699
1700 static void
1701 sk_rxeof(struct sk_if_softc *sc_if)
1702 {
1703         struct sk_softc *sc = sc_if->sk_softc;
1704         struct ifnet *ifp = &sc_if->arpcom.ac_if;
1705         struct sk_chain_data *cd = &sc_if->sk_cdata;
1706         struct sk_ring_data *rd = sc_if->sk_rdata;
1707         int i, reap;
1708
1709         DPRINTFN(2, ("sk_rxeof\n"));
1710
1711         i = cd->sk_rx_prod;
1712
1713         bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1714                         BUS_DMASYNC_POSTREAD);
1715         bus_dmamap_sync(cd->sk_jpool_dtag, cd->sk_jpool_dmap,
1716                         BUS_DMASYNC_POSTREAD);
1717
1718         reap = 0;
1719         for (;;) {
1720                 struct sk_chain *cur_rx;
1721                 struct sk_rx_desc *cur_desc;
1722                 uint32_t rxstat, sk_ctl;
1723                 uint16_t csum1, csum2;
1724                 int cur, total_len;
1725                 struct mbuf *m;
1726
1727                 cur = i;
1728
1729                 sk_ctl = le32toh(rd->sk_rx_ring[i].sk_ctl);
1730                 if ((sk_ctl & SK_RXCTL_OWN) != 0) {
1731                         /* Invalidate the descriptor -- it's not ready yet */
1732                         cd->sk_rx_prod = i;
1733                         break;
1734                 }
1735
1736                 cur_rx = &cd->sk_rx_chain[cur];
1737                 cur_desc = &rd->sk_rx_ring[cur];
1738
1739                 rxstat = le32toh(cur_desc->sk_xmac_rxstat);
1740                 m = cur_rx->sk_mbuf;
1741                 cur_rx->sk_mbuf = NULL;
1742                 total_len = SK_RXBYTES(le32toh(cur_desc->sk_ctl));
1743
1744                 csum1 = le16toh(rd->sk_rx_ring[i].sk_csum1);
1745                 csum2 = le16toh(rd->sk_rx_ring[i].sk_csum2);
1746
1747                 SK_INC(i, SK_RX_RING_CNT);
1748
1749                 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1750                     SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1751                     SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1752                     total_len < SK_MIN_FRAMELEN ||
1753                     total_len > SK_JUMBO_FRAMELEN ||
1754                     sk_rxvalid(sc, rxstat, total_len) == 0) {
1755                         ifp->if_ierrors++;
1756                         sk_newbuf(sc_if, cur_rx, m, 0);
1757                         continue;
1758                 }
1759
1760                 /*
1761                  * Try to allocate a new jumbo buffer. If that
1762                  * fails, copy the packet to mbufs and put the
1763                  * jumbo buffer back in the ring so it can be
1764                  * re-used. If allocating mbufs fails, then we
1765                  * have to drop the packet.
1766                  */
1767                 if (sk_newbuf(sc_if, cur_rx, NULL, 0) == ENOBUFS) {
1768                         struct mbuf *m0;
1769
1770                         m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1771                             total_len + ETHER_ALIGN, 0, ifp, NULL);
1772                         sk_newbuf(sc_if, cur_rx, m, 0);
1773                         if (m0 == NULL) {
1774                                 ifp->if_ierrors++;
1775                                 continue;
1776                         }
1777                         m_adj(m0, ETHER_ALIGN);
1778                         m = m0;
1779                 } else {
1780                         m->m_pkthdr.rcvif = ifp;
1781                         m->m_pkthdr.len = m->m_len = total_len;
1782                 }
1783
1784 #ifdef SK_RXCSUM
1785                 sk_rxcsum(ifp, m, csum1, csum2);
1786 #endif
1787
1788                 reap = 1;
1789                 ifp->if_ipackets++;
1790                 ifp->if_input(ifp, m);
1791         }
1792
1793         if (reap) {
1794                 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1795                                 BUS_DMASYNC_PREWRITE);
1796         }
1797 }
1798
1799 #ifdef SK_RXCSUM
1800 static void
1801 sk_rxcsum(struct ifnet *ifp, struct mbuf *m,
1802           const uint16_t csum1, const uint16_t csum2)
1803 {
1804         struct ether_header *eh;
1805         struct ip *ip;
1806         uint8_t *pp;
1807         int hlen, len, plen;
1808         uint16_t iph_csum, ipo_csum, ipd_csum, csum;
1809
1810         pp = mtod(m, uint8_t *);
1811         plen = m->m_pkthdr.len;
1812         if (plen < sizeof(*eh))
1813                 return;
1814         eh = (struct ether_header *)pp;
1815         iph_csum = in_addword(csum1, (~csum2 & 0xffff));
1816
1817         if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1818                 uint16_t *xp = (uint16_t *)pp;
1819
1820                 xp = (uint16_t *)pp;
1821                 if (xp[1] != htons(ETHERTYPE_IP))
1822                         return;
1823                 iph_csum = in_addword(iph_csum, (~xp[0] & 0xffff));
1824                 iph_csum = in_addword(iph_csum, (~xp[1] & 0xffff));
1825                 xp = (uint16_t *)(pp + sizeof(struct ip));
1826                 iph_csum = in_addword(iph_csum, xp[0]);
1827                 iph_csum = in_addword(iph_csum, xp[1]);
1828                 pp += EVL_ENCAPLEN;
1829         } else if (eh->ether_type != htons(ETHERTYPE_IP)) {
1830                 return;
1831         }
1832
1833         pp += sizeof(*eh);
1834         plen -= sizeof(*eh);
1835
1836         ip = (struct ip *)pp;
1837
1838         if (ip->ip_v != IPVERSION)
1839                 return;
1840
1841         hlen = ip->ip_hl << 2;
1842         if (hlen < sizeof(struct ip))
1843                 return;
1844         if (hlen > ntohs(ip->ip_len))
1845                 return;
1846
1847         /* Don't deal with truncated or padded packets. */
1848         if (plen != ntohs(ip->ip_len))
1849                 return;
1850
1851         len = hlen - sizeof(struct ip);
1852         if (len > 0) {
1853                 uint16_t *p;
1854
1855                 p = (uint16_t *)(ip + 1);
1856                 ipo_csum = 0;
1857                 for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1858                         ipo_csum = in_addword(ipo_csum, *p);
1859                 iph_csum = in_addword(iph_csum, ipo_csum);
1860                 ipd_csum = in_addword(csum2, (~ipo_csum & 0xffff));
1861         } else {
1862                 ipd_csum = csum2;
1863         }
1864
1865         if (iph_csum != 0xffff)
1866                 return;
1867         m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
1868
1869         if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1870                 return;                 /* ip frag, we're done for now */
1871
1872         pp += hlen;
1873
1874         /* Only know checksum protocol for udp/tcp */
1875         if (ip->ip_p == IPPROTO_UDP) {
1876                 struct udphdr *uh = (struct udphdr *)pp;
1877
1878                 if (uh->uh_sum == 0)    /* udp with no checksum */
1879                         return;
1880         } else if (ip->ip_p != IPPROTO_TCP) {
1881                 return;
1882         }
1883
1884         csum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1885             htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1886         if (csum == 0xffff) {
1887                 m->m_pkthdr.csum_data = csum;
1888                 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1889         }
1890 }
1891 #endif
1892
1893 static void
1894 sk_txeof(struct sk_if_softc *sc_if)
1895 {
1896         struct sk_chain_data *cd = &sc_if->sk_cdata;
1897         struct ifnet *ifp = &sc_if->arpcom.ac_if;
1898         uint32_t idx;
1899         int reap = 0;
1900
1901         DPRINTFN(2, ("sk_txeof\n"));
1902
1903         bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1904                         BUS_DMASYNC_POSTREAD);
1905
1906         /*
1907          * Go through our tx ring and free mbufs for those
1908          * frames that have been sent.
1909          */
1910         idx = cd->sk_tx_cons;
1911         while (idx != cd->sk_tx_prod) {
1912                 struct sk_tx_desc *cur_tx;
1913                 uint32_t sk_ctl;
1914
1915                 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1916                 sk_ctl = le32toh(cur_tx->sk_ctl);
1917 #ifdef SK_DEBUG
1918                 if (skdebug >= 2)
1919                         sk_dump_txdesc(cur_tx, idx);
1920 #endif
1921                 if (sk_ctl & SK_TXCTL_OWN)
1922                         break;
1923                 if (sk_ctl & SK_TXCTL_LASTFRAG)
1924                         ifp->if_opackets++;
1925                 if (cd->sk_tx_chain[idx].sk_mbuf != NULL) {
1926                         bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[idx]);
1927                         m_freem(cd->sk_tx_chain[idx].sk_mbuf);
1928                         cd->sk_tx_chain[idx].sk_mbuf = NULL;
1929                 }
1930                 sc_if->sk_cdata.sk_tx_cnt--;
1931                 reap = 1;
1932                 SK_INC(idx, SK_TX_RING_CNT);
1933         }
1934         ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1935
1936         if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1937                 ifp->if_flags &= ~IFF_OACTIVE;
1938
1939         sc_if->sk_cdata.sk_tx_cons = idx;
1940
1941         if (reap) {
1942                 bus_dmamap_sync(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
1943                                 BUS_DMASYNC_PREWRITE);
1944         }
1945 }
1946
1947 static void
1948 sk_tick(void *xsc_if)
1949 {
1950         struct sk_if_softc *sc_if = xsc_if;
1951         struct ifnet *ifp = &sc_if->arpcom.ac_if;
1952         struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
1953         int i;
1954
1955         DPRINTFN(2, ("sk_tick\n"));
1956
1957         lwkt_serialize_enter(ifp->if_serializer);
1958
1959         if ((ifp->if_flags & IFF_UP) == 0) {
1960                 lwkt_serialize_exit(ifp->if_serializer);
1961                 return;
1962         }
1963
1964         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1965                 sk_intr_bcom(sc_if);
1966                 lwkt_serialize_exit(ifp->if_serializer);
1967                 return;
1968         }
1969
1970         /*
1971          * According to SysKonnect, the correct way to verify that
1972          * the link has come back up is to poll bit 0 of the GPIO
1973          * register three times. This pin has the signal from the
1974          * link sync pin connected to it; if we read the same link
1975          * state 3 times in a row, we know the link is up.
1976          */
1977         for (i = 0; i < 3; i++) {
1978                 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1979                         break;
1980         }
1981
1982         if (i != 3) {
1983                 callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if);
1984                 lwkt_serialize_exit(ifp->if_serializer);
1985                 return;
1986         }
1987
1988         /* Turn the GP0 interrupt back on. */
1989         SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1990         SK_XM_READ_2(sc_if, XM_ISR);
1991         mii_tick(mii);
1992         callout_stop(&sc_if->sk_tick_timer);
1993         lwkt_serialize_exit(ifp->if_serializer);
1994 }
1995
1996 static void
1997 sk_yukon_tick(void *xsc_if)
1998 {
1999         struct sk_if_softc *sc_if = xsc_if;  
2000         struct ifnet *ifp = &sc_if->arpcom.ac_if;
2001         struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2002
2003         lwkt_serialize_enter(ifp->if_serializer);
2004         mii_tick(mii);
2005         callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2006         lwkt_serialize_exit(ifp->if_serializer);
2007 }
2008
2009 static void
2010 sk_intr_bcom(struct sk_if_softc *sc_if)
2011 {
2012         struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2013         struct ifnet *ifp = &sc_if->arpcom.ac_if;
2014         int status;
2015
2016         DPRINTFN(2, ("sk_intr_bcom\n"));
2017
2018         SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2019
2020         /*
2021          * Read the PHY interrupt register to make sure
2022          * we clear any pending interrupts.
2023          */
2024         status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2025
2026         if ((ifp->if_flags & IFF_RUNNING) == 0) {
2027                 sk_init_xmac(sc_if);
2028                 return;
2029         }
2030
2031         if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2032                 int lstat;
2033
2034                 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM,
2035                     BRGPHY_MII_AUXSTS);
2036
2037                 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2038                         mii_mediachg(mii);
2039                         /* Turn off the link LED. */
2040                         SK_IF_WRITE_1(sc_if, 0,
2041                             SK_LINKLED1_CTL, SK_LINKLED_OFF);
2042                         sc_if->sk_link = 0;
2043                 } else if (status & BRGPHY_ISR_LNK_CHG) {
2044                         sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2045                             BRGPHY_MII_IMR, 0xFF00);
2046                         mii_tick(mii);
2047                         sc_if->sk_link = 1;
2048                         /* Turn on the link LED. */
2049                         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2050                             SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2051                             SK_LINKLED_BLINK_OFF);
2052                 } else {
2053                         mii_tick(mii);
2054                         callout_reset(&sc_if->sk_tick_timer, hz,
2055                                       sk_tick, sc_if);
2056                 }
2057         }
2058
2059         SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2060 }
2061
2062 static void
2063 sk_intr_xmac(struct sk_if_softc *sc_if)
2064 {
2065         uint16_t status;
2066
2067         status = SK_XM_READ_2(sc_if, XM_ISR);
2068         DPRINTFN(2, ("sk_intr_xmac\n"));
2069
2070         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC &&
2071             (status & (XM_ISR_GP0_SET | XM_ISR_AUTONEG_DONE))) {
2072                 if (status & XM_ISR_GP0_SET)
2073                         SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2074
2075                 callout_reset(&sc_if->sk_tick_timer, hz,
2076                               sk_tick, sc_if);
2077         }
2078
2079         if (status & XM_IMR_TX_UNDERRUN)
2080                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2081
2082         if (status & XM_IMR_RX_OVERRUN)
2083                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2084 }
2085
2086 static void
2087 sk_intr_yukon(struct sk_if_softc *sc_if)
2088 {
2089         uint8_t status;
2090
2091         status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2092         /* RX overrun */
2093         if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2094                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2095                     SK_RFCTL_RX_FIFO_OVER);
2096         }
2097         /* TX underrun */
2098         if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2099                 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2100                     SK_TFCTL_TX_FIFO_UNDER);
2101         }
2102
2103         DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2104 }
2105
2106 static void
2107 sk_intr(void *xsc)
2108 {
2109         struct sk_softc *sc = xsc;
2110         struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A];
2111         struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B];
2112         struct ifnet *ifp0 = NULL, *ifp1 = NULL;
2113         uint32_t status;
2114
2115         ASSERT_SERIALIZED(&sc->sk_serializer);
2116
2117         status = CSR_READ_4(sc, SK_ISSR);
2118         if (status == 0 || status == 0xffffffff)
2119                 return;
2120
2121         if (sc_if0 != NULL)
2122                 ifp0 = &sc_if0->arpcom.ac_if;
2123         if (sc_if1 != NULL)
2124                 ifp1 = &sc_if1->arpcom.ac_if;
2125
2126         for (; (status &= sc->sk_intrmask) != 0;) {
2127                 /* Handle receive interrupts first. */
2128                 if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2129                         sk_rxeof(sc_if0);
2130                         CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2131                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2132                 }
2133                 if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2134                         sk_rxeof(sc_if1);
2135                         CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2136                             SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2137                 }
2138
2139                 /* Then transmit interrupts. */
2140                 if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2141                         sk_txeof(sc_if0);
2142                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2143                             SK_TXBMU_CLR_IRQ_EOF);
2144                 }
2145                 if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2146                         sk_txeof(sc_if1);
2147                         CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2148                             SK_TXBMU_CLR_IRQ_EOF);
2149                 }
2150
2151                 /* Then MAC interrupts. */
2152                 if (sc_if0 && (status & SK_ISR_MAC1) &&
2153                     (ifp0->if_flags & IFF_RUNNING)) {
2154                         if (SK_IS_GENESIS(sc))
2155                                 sk_intr_xmac(sc_if0);
2156                         else
2157                                 sk_intr_yukon(sc_if0);
2158                 }
2159
2160                 if (sc_if1 && (status & SK_ISR_MAC2) &&
2161                     (ifp1->if_flags & IFF_RUNNING)) {
2162                         if (SK_IS_GENESIS(sc))
2163                                 sk_intr_xmac(sc_if1);
2164                         else
2165                                 sk_intr_yukon(sc_if1);
2166                 }
2167
2168                 if (status & SK_ISR_EXTERNAL_REG) {
2169                         if (sc_if0 != NULL &&
2170                             sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2171                                 sk_intr_bcom(sc_if0);
2172
2173                         if (sc_if1 != NULL &&
2174                             sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2175                                 sk_intr_bcom(sc_if1);
2176                 }
2177                 status = CSR_READ_4(sc, SK_ISSR);
2178         }
2179
2180         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2181
2182         if (ifp0 != NULL && !ifq_is_empty(&ifp0->if_snd))
2183                 sk_start(ifp0);
2184         if (ifp1 != NULL && !ifq_is_empty(&ifp1->if_snd))
2185                 sk_start(ifp1);
2186 }
2187
2188 static void
2189 sk_init_xmac(struct sk_if_softc *sc_if)
2190 {
2191         struct sk_softc *sc = sc_if->sk_softc;
2192         struct ifnet *ifp = &sc_if->arpcom.ac_if;
2193         static const struct sk_bcom_hack bhack[] = {
2194         { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2195         { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2196         { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2197         { 0, 0 } };
2198
2199         DPRINTFN(2, ("sk_init_xmac\n"));
2200
2201         /* Unreset the XMAC. */
2202         SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2203         DELAY(1000);
2204
2205         /* Reset the XMAC's internal state. */
2206         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2207
2208         /* Save the XMAC II revision */
2209         sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2210
2211         /*
2212          * Perform additional initialization for external PHYs,
2213          * namely for the 1000baseTX cards that use the XMAC's
2214          * GMII mode.
2215          */
2216         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2217                 int i = 0;
2218                 uint32_t val;
2219
2220                 /* Take PHY out of reset. */
2221                 val = sk_win_read_4(sc, SK_GPIO);
2222                 if (sc_if->sk_port == SK_PORT_A)
2223                         val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2224                 else
2225                         val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2226                 sk_win_write_4(sc, SK_GPIO, val);
2227
2228                 /* Enable GMII mode on the XMAC. */
2229                 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2230
2231                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2232                     BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2233                 DELAY(10000);
2234                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2235                     BRGPHY_MII_IMR, 0xFFF0);
2236
2237                 /*
2238                  * Early versions of the BCM5400 apparently have
2239                  * a bug that requires them to have their reserved
2240                  * registers initialized to some magic values. I don't
2241                  * know what the numbers do, I'm just the messenger.
2242                  */
2243                 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03)
2244                     == 0x6041) {
2245                         while(bhack[i].reg) {
2246                                 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM,
2247                                     bhack[i].reg, bhack[i].val);
2248                                 i++;
2249                         }
2250                 }
2251         }
2252
2253         /* Set station address */
2254         SK_XM_WRITE_2(sc_if, XM_PAR0,
2255             *(uint16_t *)(&sc_if->arpcom.ac_enaddr[0]));
2256         SK_XM_WRITE_2(sc_if, XM_PAR1,
2257             *(uint16_t *)(&sc_if->arpcom.ac_enaddr[2]));
2258         SK_XM_WRITE_2(sc_if, XM_PAR2,
2259             *(uint16_t *)(&sc_if->arpcom.ac_enaddr[4]));
2260         SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2261
2262         if (ifp->if_flags & IFF_BROADCAST)
2263                 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2264         else
2265                 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2266
2267         /* We don't need the FCS appended to the packet. */
2268         SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2269
2270         /* We want short frames padded to 60 bytes. */
2271         SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2272
2273         /*
2274          * Enable the reception of all error frames. This is
2275          * a necessary evil due to the design of the XMAC. The
2276          * XMAC's receive FIFO is only 8K in size, however jumbo
2277          * frames can be up to 9000 bytes in length. When bad
2278          * frame filtering is enabled, the XMAC's RX FIFO operates
2279          * in 'store and forward' mode. For this to work, the
2280          * entire frame has to fit into the FIFO, but that means
2281          * that jumbo frames larger than 8192 bytes will be
2282          * truncated. Disabling all bad frame filtering causes
2283          * the RX FIFO to operate in streaming mode, in which
2284          * case the XMAC will start transfering frames out of the
2285          * RX FIFO as soon as the FIFO threshold is reached.
2286          */
2287         SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2288             XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2289             XM_MODE_RX_INRANGELEN);
2290
2291         SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2292
2293         /*
2294          * Bump up the transmit threshold. This helps hold off transmit
2295          * underruns when we're blasting traffic from both ports at once.
2296          */
2297         SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2298
2299         /* Set promiscuous mode */
2300         sk_setpromisc(sc_if);
2301
2302         /* Set multicast filter */
2303         sk_setmulti(sc_if);
2304
2305         /* Clear and enable interrupts */
2306         SK_XM_READ_2(sc_if, XM_ISR);
2307         if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2308                 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2309         else
2310                 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2311
2312         /* Configure MAC arbiter */
2313         switch(sc_if->sk_xmac_rev) {
2314         case XM_XMAC_REV_B2:
2315                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2316                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2317                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2318                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2319                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2320                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2321                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2322                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2323                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2324                 break;
2325         case XM_XMAC_REV_C1:
2326                 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2327                 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2328                 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2329                 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2330                 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2331                 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2332                 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2333                 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2334                 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2335                 break;
2336         default:
2337                 break;
2338         }
2339         sk_win_write_2(sc, SK_MACARB_CTL,
2340             SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2341
2342         sc_if->sk_link = 1;
2343 }
2344
2345 static void
2346 sk_init_yukon(struct sk_if_softc *sc_if)
2347 {
2348         uint32_t phy, v;
2349         uint16_t reg;
2350         struct sk_softc *sc;
2351         int i;
2352
2353         sc = sc_if->sk_softc;
2354
2355         DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2356                      CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2357
2358         if (sc->sk_type == SK_YUKON_LITE &&
2359             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2360                 /*
2361                  * Workaround code for COMA mode, set PHY reset.
2362                  * Otherwise it will not correctly take chip out of
2363                  * powerdown (coma)
2364                  */
2365                 v = sk_win_read_4(sc, SK_GPIO);
2366                 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2367                 sk_win_write_4(sc, SK_GPIO, v);
2368         }
2369
2370         DPRINTFN(6, ("sk_init_yukon: 1\n"));
2371
2372         /* GMAC and GPHY Reset */
2373         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2374         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2375         DELAY(1000);
2376
2377         DPRINTFN(6, ("sk_init_yukon: 2\n"));
2378
2379         if (sc->sk_type == SK_YUKON_LITE &&
2380             sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2381                 /*
2382                  * Workaround code for COMA mode, clear PHY reset
2383                  */
2384                 v = sk_win_read_4(sc, SK_GPIO);
2385                 v |= SK_GPIO_DIR9;
2386                 v &= ~SK_GPIO_DAT9;
2387                 sk_win_write_4(sc, SK_GPIO, v);
2388         }
2389
2390         phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2391                 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2392
2393         if (sc->sk_coppertype)
2394                 phy |= SK_GPHY_COPPER;
2395         else
2396                 phy |= SK_GPHY_FIBER;
2397
2398         DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2399
2400         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2401         DELAY(1000);
2402         SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2403         SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2404                       SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2405
2406         DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2407                      SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2408
2409         DPRINTFN(6, ("sk_init_yukon: 3\n"));
2410
2411         /* unused read of the interrupt source register */
2412         DPRINTFN(6, ("sk_init_yukon: 4\n"));
2413         SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2414
2415         DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2416         reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2417         DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2418
2419         /* MIB Counter Clear Mode set */
2420         reg |= YU_PAR_MIB_CLR;
2421         DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2422         DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2423         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2424
2425         /* MIB Counter Clear Mode clear */
2426         DPRINTFN(6, ("sk_init_yukon: 5\n"));
2427         reg &= ~YU_PAR_MIB_CLR;
2428         SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2429
2430         /* receive control reg */
2431         DPRINTFN(6, ("sk_init_yukon: 7\n"));
2432         SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2433
2434         /* transmit parameter register */
2435         DPRINTFN(6, ("sk_init_yukon: 8\n"));
2436         SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2437                       YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2438
2439         /* serial mode register */
2440         DPRINTFN(6, ("sk_init_yukon: 9\n"));
2441         SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2442                       YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2443                       YU_SMR_IPG_DATA(0x1e));
2444
2445         DPRINTFN(6, ("sk_init_yukon: 10\n"));
2446         /* Setup Yukon's address */
2447         for (i = 0; i < 3; i++) {
2448                 /* Write Source Address 1 (unicast filter) */
2449                 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 
2450                               sc_if->arpcom.ac_enaddr[i * 2] |
2451                               sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2452         }
2453
2454         for (i = 0; i < 3; i++) {
2455                 reg = sk_win_read_2(sc_if->sk_softc,
2456                                     SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2457                 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2458         }
2459
2460         /* Set promiscuous mode */
2461         sk_setpromisc(sc_if);
2462
2463         /* Set multicast filter */
2464         DPRINTFN(6, ("sk_init_yukon: 11\n"));
2465         sk_setmulti(sc_if);
2466
2467         /* enable interrupt mask for counter overflows */
2468         DPRINTFN(6, ("sk_init_yukon: 12\n"));
2469         SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2470         SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2471         SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2472
2473         /* Configure RX MAC FIFO Flush Mask */
2474         v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2475             YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2476             YU_RXSTAT_JABBER;
2477         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2478
2479         /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2480         if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2481                 v = SK_TFCTL_OPERATION_ON;
2482         else
2483                 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2484         /* Configure RX MAC FIFO */
2485         SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2486         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2487
2488         /* Increase flush threshould to 64 bytes */
2489         SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2490             SK_RFCTL_FIFO_THRESHOLD + 1);
2491
2492         /* Configure TX MAC FIFO */
2493         SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2494         SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2495
2496         DPRINTFN(6, ("sk_init_yukon: end\n"));
2497 }
2498
2499 /*
2500  * Note that to properly initialize any part of the GEnesis chip,
2501  * you first have to take it out of reset mode.
2502  */
2503 static void
2504 sk_init(void *xsc_if)
2505 {
2506         struct sk_if_softc *sc_if = xsc_if;
2507         struct sk_softc *sc = sc_if->sk_softc;
2508         struct ifnet *ifp = &sc_if->arpcom.ac_if;
2509         struct mii_data *mii = device_get_softc(sc_if->sk_miibus);
2510
2511         DPRINTFN(2, ("sk_init\n"));
2512
2513         ASSERT_SERIALIZED(ifp->if_serializer);
2514
2515         if (ifp->if_flags & IFF_RUNNING)
2516                 return;
2517
2518         /* Cancel pending I/O and free all RX/TX buffers. */
2519         sk_stop(sc_if);
2520
2521         if (SK_IS_GENESIS(sc)) {
2522                 /* Configure LINK_SYNC LED */
2523                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2524                 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2525                         SK_LINKLED_LINKSYNC_ON);
2526
2527                 /* Configure RX LED */
2528                 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2529                         SK_RXLEDCTL_COUNTER_START);
2530                 
2531                 /* Configure TX LED */
2532                 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2533                         SK_TXLEDCTL_COUNTER_START);
2534         }
2535
2536         /*
2537          * Configure descriptor poll timer
2538          *
2539          * SK-NET GENESIS data sheet says that possibility of losing Start
2540          * transmit command due to CPU/cache related interim storage problems
2541          * under certain conditions. The document recommends a polling
2542          * mechanism to send a Start transmit command to initiate transfer
2543          * of ready descriptors regulary. To cope with this issue sk(4) now
2544          * enables descriptor poll timer to initiate descriptor processing
2545          * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2546          * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2547          * command instead of waiting for next descriptor polling time.
2548          * The same rule may apply to Rx side too but it seems that is not
2549          * needed at the moment.
2550          * Since sk(4) uses descriptor polling as a last resort there is no
2551          * need to set smaller polling time than maximum allowable one.
2552          */
2553         SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2554
2555         /* Configure I2C registers */
2556
2557         /* Configure XMAC(s) */
2558         switch (sc->sk_type) {
2559         case SK_GENESIS:
2560                 sk_init_xmac(sc_if);
2561                 break;
2562         case SK_YUKON:
2563         case SK_YUKON_LITE:
2564         case SK_YUKON_LP:
2565                 sk_init_yukon(sc_if);
2566                 break;
2567         }
2568         mii_mediachg(mii);
2569
2570         if (SK_IS_GENESIS(sc)) {
2571                 /* Configure MAC FIFOs */
2572                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2573                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2574                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2575
2576                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2577                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2578                 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2579         }
2580
2581         /* Configure transmit arbiter(s) */
2582         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2583             SK_TXARCTL_ON | SK_TXARCTL_FSYNC_ON);
2584
2585         /* Configure RAMbuffers */
2586         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2587         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2588         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2589         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2590         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2591         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2592
2593         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2594         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2595         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2596         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2597         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2598         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2599         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2600
2601         /* Configure BMUs */
2602         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2603         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2604             SK_RX_RING_ADDR(sc_if, 0));
2605         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2606
2607         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2608         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2609             SK_TX_RING_ADDR(sc_if, 0));
2610         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2611
2612         /* Init descriptors */
2613         if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2614                 if_printf(ifp, "initialization failed: "
2615                           "no memory for rx buffers\n");
2616                 sk_stop(sc_if);
2617                 return;
2618         }
2619
2620         if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2621                 if_printf(ifp, "initialization failed: "
2622                           "no memory for tx buffers\n");
2623                 sk_stop(sc_if);
2624                 return;
2625         }
2626
2627         /* Configure interrupt handling */
2628         CSR_READ_4(sc, SK_ISSR);
2629         if (sc_if->sk_port == SK_PORT_A)
2630                 sc->sk_intrmask |= SK_INTRS1;
2631         else
2632                 sc->sk_intrmask |= SK_INTRS2;
2633
2634         sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2635
2636         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2637
2638         /* Start BMUs. */
2639         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2640
2641         if (SK_IS_GENESIS(sc)) {
2642                 /* Enable XMACs TX and RX state machines */
2643                 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2644                 SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2645                                XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2646         }
2647
2648         if (SK_IS_YUKON(sc)) {
2649                 uint16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2650                 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2651 #if 0
2652                 /* XXX disable 100Mbps and full duplex mode? */
2653                 reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS);
2654 #endif
2655                 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2656         }
2657
2658         /* Activate descriptor polling timer */
2659         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2660         /* Start transfer of Tx descriptors */
2661         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2662
2663         ifp->if_flags |= IFF_RUNNING;
2664         ifp->if_flags &= ~IFF_OACTIVE;
2665
2666         if (SK_IS_YUKON(sc))
2667                 callout_reset(&sc_if->sk_tick_timer, hz, sk_yukon_tick, sc_if);
2668 }
2669
2670 static void
2671 sk_stop(struct sk_if_softc *sc_if)
2672 {
2673         struct sk_softc *sc = sc_if->sk_softc;
2674         struct ifnet *ifp = &sc_if->arpcom.ac_if;
2675         struct sk_chain_data *cd = &sc_if->sk_cdata;
2676         uint32_t val;
2677         int i;
2678
2679         ASSERT_SERIALIZED(ifp->if_serializer);
2680
2681         DPRINTFN(2, ("sk_stop\n"));
2682
2683         callout_stop(&sc_if->sk_tick_timer);
2684
2685         ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2686
2687         /* Stop Tx descriptor polling timer */
2688         SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2689
2690         /* Stop transfer of Tx descriptors */
2691         CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2692         for (i = 0; i < SK_TIMEOUT; i++) {
2693                 val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2694                 if (!(val & SK_TXBMU_TX_STOP))
2695                         break;
2696                 DELAY(1);
2697         }
2698         if (i == SK_TIMEOUT)
2699                 if_printf(ifp, "cannot stop transfer of Tx descriptors\n");
2700
2701         /* Stop transfer of Rx descriptors */
2702         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2703         for (i = 0; i < SK_TIMEOUT; i++) {
2704                 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2705                 if (!(val & SK_RXBMU_RX_STOP))
2706                         break;
2707                 DELAY(1);
2708         }
2709         if (i == SK_TIMEOUT)
2710                 if_printf(ifp, "cannot stop transfer of Rx descriptors\n");
2711
2712         if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2713                 /* Put PHY back into reset. */
2714                 val = sk_win_read_4(sc, SK_GPIO);
2715                 if (sc_if->sk_port == SK_PORT_A) {
2716                         val |= SK_GPIO_DIR0;
2717                         val &= ~SK_GPIO_DAT0;
2718                 } else {
2719                         val |= SK_GPIO_DIR2;
2720                         val &= ~SK_GPIO_DAT2;
2721                 }
2722                 sk_win_write_4(sc, SK_GPIO, val);
2723         }
2724
2725         /* Turn off various components of this interface. */
2726         SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2727         switch (sc->sk_type) {
2728         case SK_GENESIS:
2729                 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET);
2730                 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2731                 break;
2732         case SK_YUKON:
2733         case SK_YUKON_LITE:
2734         case SK_YUKON_LP:
2735                 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2736                 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2737                 break;
2738         }
2739         SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2740         SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET | SK_RBCTL_OFF);
2741         SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2742         SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST,
2743             SK_RBCTL_RESET | SK_RBCTL_OFF);
2744         SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2745         SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2746         SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2747         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2748         SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2749
2750         /* Disable interrupts */
2751         if (sc_if->sk_port == SK_PORT_A)
2752                 sc->sk_intrmask &= ~SK_INTRS1;
2753         else
2754                 sc->sk_intrmask &= ~SK_INTRS2;
2755         CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2756
2757         SK_XM_READ_2(sc_if, XM_ISR);
2758         SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2759
2760         /* Free RX and TX mbufs still in the queues. */
2761         for (i = 0; i < SK_RX_RING_CNT; i++) {
2762                 if (cd->sk_rx_chain[i].sk_mbuf != NULL) {
2763                         m_freem(cd->sk_rx_chain[i].sk_mbuf);
2764                         cd->sk_rx_chain[i].sk_mbuf = NULL;
2765                 }
2766         }
2767         for (i = 0; i < SK_TX_RING_CNT; i++) {
2768                 if (cd->sk_tx_chain[i].sk_mbuf != NULL) {
2769                         bus_dmamap_unload(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
2770                         m_freem(cd->sk_tx_chain[i].sk_mbuf);
2771                         cd->sk_tx_chain[i].sk_mbuf = NULL;
2772                 }
2773         }
2774 }
2775
2776 #ifdef SK_DEBUG
2777 static void
2778 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2779 {
2780 #define DESC_PRINT(X)                                   \
2781         if (X)                                  \
2782                 printf("txdesc[%d]." #X "=%#x\n",       \
2783                        idx, X);
2784
2785         DESC_PRINT(le32toh(desc->sk_ctl));
2786         DESC_PRINT(le32toh(desc->sk_next));
2787         DESC_PRINT(le32toh(desc->sk_data_lo));
2788         DESC_PRINT(le32toh(desc->sk_data_hi));
2789         DESC_PRINT(le32toh(desc->sk_xmac_txstat));
2790         DESC_PRINT(le16toh(desc->sk_rsvd0));
2791         DESC_PRINT(le16toh(desc->sk_csum_startval));
2792         DESC_PRINT(le16toh(desc->sk_csum_startpos));
2793         DESC_PRINT(le16toh(desc->sk_csum_writepos));
2794         DESC_PRINT(le16toh(desc->sk_rsvd1));
2795 #undef PRINT
2796 }
2797
2798 static void
2799 sk_dump_bytes(const char *data, int len)
2800 {
2801         int c, i, j;
2802
2803         for (i = 0; i < len; i += 16) {
2804                 printf("%08x  ", i);
2805                 c = len - i;
2806                 if (c > 16) c = 16;
2807
2808                 for (j = 0; j < c; j++) {
2809                         printf("%02x ", data[i + j] & 0xff);
2810                         if ((j & 0xf) == 7 && j > 0)
2811                                 printf(" ");
2812                 }
2813                 
2814                 for (; j < 16; j++)
2815                         printf("   ");
2816                 printf("  ");
2817
2818                 for (j = 0; j < c; j++) {
2819                         int ch = data[i + j] & 0xff;
2820                         printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2821                 }
2822                 
2823                 printf("\n");
2824                 
2825                 if (c < 16)
2826                         break;
2827         }
2828 }
2829
2830 static void
2831 sk_dump_mbuf(struct mbuf *m)
2832 {
2833         int count = m->m_pkthdr.len;
2834
2835         printf("m=%p, m->m_pkthdr.len=%d\n", m, m->m_pkthdr.len);
2836
2837         while (count > 0 && m) {
2838                 printf("m=%p, m->m_data=%p, m->m_len=%d\n",
2839                        m, m->m_data, m->m_len);
2840                 sk_dump_bytes(mtod(m, char *), m->m_len);
2841
2842                 count -= m->m_len;
2843                 m = m->m_next;
2844         }
2845 }
2846 #endif
2847
2848 /*
2849  * Allocate jumbo buffer storage. The SysKonnect adapters support
2850  * "jumbograms" (9K frames), although SysKonnect doesn't currently
2851  * use them in their drivers. In order for us to use them, we need
2852  * large 9K receive buffers, however standard mbuf clusters are only
2853  * 2048 bytes in size. Consequently, we need to allocate and manage
2854  * our own jumbo buffer pool. Fortunately, this does not require an
2855  * excessive amount of additional code.
2856  */
2857 static int
2858 sk_jpool_alloc(device_t dev)
2859 {
2860         struct sk_if_softc *sc_if = device_get_softc(dev);
2861         struct sk_chain_data *cd = &sc_if->sk_cdata;
2862         bus_addr_t paddr;
2863         caddr_t buf;
2864         int error, i;
2865
2866         lwkt_serialize_init(&cd->sk_jpool_serializer);
2867
2868         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
2869                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2870                                    NULL, NULL, SK_JMEM, 1, SK_JMEM,
2871                                    0, &cd->sk_jpool_dtag);
2872         if (error) {
2873                 device_printf(dev, "can't create jpool DMA tag\n");
2874                 return error;
2875         }
2876
2877         error = bus_dmamem_alloc(cd->sk_jpool_dtag, &cd->sk_jpool,
2878                                  BUS_DMA_WAITOK, &cd->sk_jpool_dmap);
2879         if (error) {
2880                 device_printf(dev, "can't alloc jpool DMA mem\n");
2881                 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2882                 cd->sk_jpool_dtag = NULL;
2883                 return error;
2884         }
2885
2886         error = bus_dmamap_load(cd->sk_jpool_dtag, cd->sk_jpool_dmap,
2887                                 cd->sk_jpool, SK_JMEM,
2888                                 sk_dmamem_addr, &paddr, BUS_DMA_WAITOK);
2889         if (error) {
2890                 device_printf(dev, "can't load DMA mem\n");
2891                 bus_dmamem_free(cd->sk_jpool_dtag, cd->sk_jpool,
2892                                 cd->sk_jpool_dmap);
2893                 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2894                 cd->sk_jpool_dtag = NULL;
2895                 return error;
2896         }
2897
2898         SLIST_INIT(&cd->sk_jpool_free_ent);
2899         buf = cd->sk_jpool;
2900
2901         /*
2902          * Now divide it up into SK_JLEN pieces.
2903          */
2904         for (i = 0; i < SK_JSLOTS; i++) {
2905                 struct sk_jpool_entry *entry = &cd->sk_jpool_ent[i];
2906
2907                 entry->sc_if = sc_if;
2908                 entry->inuse = 0;
2909                 entry->slot = i;
2910                 entry->buf = buf;
2911                 entry->paddr = paddr;
2912
2913                 SLIST_INSERT_HEAD(&cd->sk_jpool_free_ent, entry, entry_next);
2914
2915                 buf += SK_JLEN;
2916                 paddr += SK_JLEN;
2917         }
2918         return 0;
2919 }
2920
2921 static void
2922 sk_jpool_free(struct sk_if_softc *sc_if)
2923 {
2924         struct sk_chain_data *cd = &sc_if->sk_cdata;
2925
2926         if (cd->sk_jpool_dtag != NULL) {
2927                 bus_dmamap_unload(cd->sk_jpool_dtag, cd->sk_jpool_dmap);
2928                 bus_dmamem_free(cd->sk_jpool_dtag, cd->sk_jpool,
2929                                 cd->sk_jpool_dmap);
2930                 bus_dma_tag_destroy(cd->sk_jpool_dtag);
2931                 cd->sk_jpool_dtag = NULL;
2932         }
2933 }
2934
2935 static int
2936 sk_dma_alloc(device_t dev)
2937 {
2938         struct sk_if_softc *sc_if = device_get_softc(dev);
2939         struct sk_chain_data *cd = &sc_if->sk_cdata;
2940         int i, j, error;
2941
2942         /*
2943          * Allocate the descriptor queues.
2944          * TODO: split into RX/TX rings
2945          */
2946         error = bus_dma_tag_create(NULL, PAGE_SIZE, 0,
2947                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2948                                    NULL, NULL,
2949                                    sizeof(struct sk_ring_data), 1,
2950                                    sizeof(struct sk_ring_data), 0,
2951                                    &sc_if->sk_rdata_dtag);
2952         if (error) {
2953                 device_printf(dev, "can't create desc DMA tag\n");
2954                 return error;
2955         }
2956
2957         error = bus_dmamem_alloc(sc_if->sk_rdata_dtag,
2958                                  (void **)&sc_if->sk_rdata,
2959                                  BUS_DMA_WAITOK | BUS_DMA_ZERO,
2960                                  &sc_if->sk_rdata_dmap);
2961         if (error) {
2962                 device_printf(dev, "can't alloc desc DMA mem\n");
2963                 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
2964                 sc_if->sk_rdata_dtag = NULL;
2965                 return error;
2966         }
2967
2968         error = bus_dmamap_load(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap,
2969                                 sc_if->sk_rdata, sizeof(struct sk_ring_data),
2970                                 sk_dmamem_addr, &sc_if->sk_rdata_paddr,
2971                                 BUS_DMA_WAITOK);
2972         if (error) {
2973                 device_printf(dev, "can't load desc DMA mem\n");
2974                 bus_dmamem_free(sc_if->sk_rdata_dtag, sc_if->sk_rdata,
2975                                 sc_if->sk_rdata_dmap);
2976                 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
2977                 sc_if->sk_rdata_dtag = NULL;
2978                 return error;
2979         }
2980
2981         /* Try to allocate memory for jumbo buffers. */
2982         error = sk_jpool_alloc(dev);
2983         if (error) {
2984                 device_printf(dev, "jumbo buffer allocation failed\n");
2985                 return error;
2986         }
2987
2988         /* Create DMA tag for TX. */
2989         error = bus_dma_tag_create(NULL, 1, 0,
2990                                    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2991                                    NULL, NULL,
2992                                    SK_JLEN, SK_NTXSEG, SK_JLEN,
2993                                    0, &cd->sk_tx_dtag);
2994         if (error) {
2995                 device_printf(dev, "can't create TX DMA tag\n");
2996                 return error;
2997         }
2998
2999         /* Create DMA maps for TX. */
3000         for (i = 0; i < SK_TX_RING_CNT; i++) {
3001                 error = bus_dmamap_create(cd->sk_tx_dtag, 0,
3002                                           &cd->sk_tx_dmap[i]);
3003                 if (error) {
3004                         device_printf(dev, "can't create %dth TX DMA map\n", i);
3005                         goto fail;
3006                 }
3007         }
3008         return 0;
3009 fail:
3010         for (j = 0; j < i; ++j)
3011                 bus_dmamap_destroy(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
3012         bus_dma_tag_destroy(cd->sk_tx_dtag);
3013         cd->sk_tx_dtag = NULL;
3014         return error;
3015 }
3016
3017 static void
3018 sk_dma_free(device_t dev)
3019 {
3020         struct sk_if_softc *sc_if = device_get_softc(dev);
3021         struct sk_chain_data *cd = &sc_if->sk_cdata;
3022
3023         if (cd->sk_tx_dtag != NULL) {
3024                 int i;
3025
3026                 for (i = 0; i < SK_TX_RING_CNT; ++i) {
3027                         if (cd->sk_tx_chain[i].sk_mbuf != NULL) {
3028                                 bus_dmamap_unload(cd->sk_tx_dtag,
3029                                                   cd->sk_tx_dmap[i]);
3030                                 m_freem(cd->sk_tx_chain[i].sk_mbuf);
3031                                 cd->sk_tx_chain[i].sk_mbuf = NULL;
3032                         }
3033                         bus_dmamap_destroy(cd->sk_tx_dtag, cd->sk_tx_dmap[i]);
3034                 }
3035                 bus_dma_tag_destroy(cd->sk_tx_dtag);
3036                 cd->sk_tx_dtag = NULL;
3037         }
3038
3039         sk_jpool_free(sc_if);
3040
3041         if (sc_if->sk_rdata_dtag != NULL) {
3042                 bus_dmamap_unload(sc_if->sk_rdata_dtag, sc_if->sk_rdata_dmap);
3043                 bus_dmamem_free(sc_if->sk_rdata_dtag, sc_if->sk_rdata,
3044                                 sc_if->sk_rdata_dmap);
3045                 bus_dma_tag_destroy(sc_if->sk_rdata_dtag);
3046                 sc_if->sk_rdata_dtag = NULL;
3047         }
3048 }
3049
3050 static void
3051 sk_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
3052                 bus_size_t mapsz __unused, int error)
3053 {
3054         struct sk_dma_ctx *ctx = arg;
3055         int i;
3056
3057         if (error)
3058                 return;
3059
3060         KASSERT(nsegs <= ctx->nsegs,
3061                 ("too many segments(%d), should be <= %d\n",
3062                  nsegs, ctx->nsegs));
3063
3064         ctx->nsegs = nsegs;
3065         for (i = 0; i < nsegs; ++i)
3066                 ctx->segs[i] = segs[i];
3067 }
3068
3069 static void
3070 sk_dmamem_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error)
3071 {
3072         KASSERT(nseg == 1, ("too many segments %d", nseg));
3073         *((bus_addr_t *)arg) = seg->ds_addr;
3074 }