64def440558c9c541bb358148599c425c227c97b
[dragonfly.git] / sys / dev / netif / re / if_re.c
1 /*
2  * Copyright (c) 2004
3  *      Joerg Sonnenberger <joerg@bec.de>.  All rights reserved.
4  *
5  * Copyright (c) 1997, 1998-2003
6  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Bill Paul.
19  * 4. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $
36  */
37
38 /*
39  * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver
40  *
41  * Written by Bill Paul <wpaul@windriver.com>
42  * Senior Networking Software Engineer
43  * Wind River Systems
44  */
45
46 /*
47  * This driver is designed to support RealTek's next generation of
48  * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
49  * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S,
50  * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E.
51  *
52  * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
53  * with the older 8139 family, however it also supports a special
54  * C+ mode of operation that provides several new performance enhancing
55  * features. These include:
56  *
57  *      o Descriptor based DMA mechanism. Each descriptor represents
58  *        a single packet fragment. Data buffers may be aligned on
59  *        any byte boundary.
60  *
61  *      o 64-bit DMA
62  *
63  *      o TCP/IP checksum offload for both RX and TX
64  *
65  *      o High and normal priority transmit DMA rings
66  *
67  *      o VLAN tag insertion and extraction
68  *
69  *      o TCP large send (segmentation offload)
70  *
71  * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
72  * programming API is fairly straightforward. The RX filtering, EEPROM
73  * access and PHY access is the same as it is on the older 8139 series
74  * chips.
75  *
76  * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
77  * same programming API and feature set as the 8139C+ with the following
78  * differences and additions:
79  *
80  *      o 1000Mbps mode
81  *
82  *      o Jumbo frames
83  *
84  *      o GMII and TBI ports/registers for interfacing with copper
85  *        or fiber PHYs
86  *
87  *      o RX and TX DMA rings can have up to 1024 descriptors
88  *        (the 8139C+ allows a maximum of 64)
89  *
90  *      o Slight differences in register layout from the 8139C+
91  *
92  * The TX start and timer interrupt registers are at different locations
93  * on the 8169 than they are on the 8139C+. Also, the status word in the
94  * RX descriptor has a slightly different bit layout. The 8169 does not
95  * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
96  * copper gigE PHY.
97  *
98  * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
99  * (the 'S' stands for 'single-chip'). These devices have the same
100  * programming API as the older 8169, but also have some vendor-specific
101  * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
102  * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
103  * 
104  * This driver takes advantage of the RX and TX checksum offload and
105  * VLAN tag insertion/extraction features. It also implements TX
106  * interrupt moderation using the timer interrupt registers, which
107  * significantly reduces TX interrupt load. There is also support
108  * for jumbo frames, however the 8169/8169S/8110S can not transmit
109  * jumbo frames larger than 7440, so the max MTU possible with this
110  * driver is 7422 bytes.
111  */
112
113 #define _IP_VHL
114
115 #include "opt_ifpoll.h"
116
117 #include <sys/param.h>
118 #include <sys/bus.h>
119 #include <sys/endian.h>
120 #include <sys/kernel.h>
121 #include <sys/in_cksum.h>
122 #include <sys/interrupt.h>
123 #include <sys/malloc.h>
124 #include <sys/mbuf.h>
125 #include <sys/rman.h>
126 #include <sys/serialize.h>
127 #include <sys/socket.h>
128 #include <sys/sockio.h>
129 #include <sys/sysctl.h>
130
131 #include <net/bpf.h>
132 #include <net/ethernet.h>
133 #include <net/if.h>
134 #include <net/ifq_var.h>
135 #include <net/if_arp.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_poll.h>
139 #include <net/if_types.h>
140 #include <net/vlan/if_vlan_var.h>
141 #include <net/vlan/if_vlan_ether.h>
142
143 #include <netinet/ip.h>
144
145 #include <dev/netif/mii_layer/mii.h>
146 #include <dev/netif/mii_layer/miivar.h>
147
148 #include <bus/pci/pcidevs.h>
149 #include <bus/pci/pcireg.h>
150 #include <bus/pci/pcivar.h>
151
152 /* "device miibus" required.  See GENERIC if you get errors here. */
153 #include "miibus_if.h"
154
155 #include <dev/netif/re/if_rereg.h>
156 #include <dev/netif/re/if_revar.h>
157
158 #define RE_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
159
160 /*
161  * Various supported device vendors/types and their names.
162  */
163 static const struct re_type {
164         uint16_t        re_vid;
165         uint16_t        re_did;
166         const char      *re_name;
167 } re_devs[] = {
168         { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T,
169           "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
170
171         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8139,
172           "RealTek 8139C+ 10/100BaseTX" },
173
174         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8101E,
175           "RealTek 810x PCIe 10/100baseTX" },
176
177         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8168,
178           "RealTek 8111/8168 PCIe Gigabit Ethernet" },
179
180         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169,
181           "RealTek 8110/8169 Gigabit Ethernet" },
182
183         { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169SC,
184           "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" },
185
186         { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT,
187           "Corega CG-LAPCIGT Gigabit Ethernet" },
188
189         { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032,
190           "Linksys EG1032 Gigabit Ethernet" },
191
192         { PCI_VENDOR_USR2, PCI_PRODUCT_USR2_997902,
193           "US Robotics 997902 Gigabit Ethernet" },
194
195         { PCI_VENDOR_TTTECH, PCI_PRODUCT_TTTECH_MC322,
196           "TTTech MC322 Gigabit Ethernet" },
197
198         { 0, 0, NULL }
199 };
200
201 static const struct re_hwrev re_hwrevs[] = {
202         { RE_HWREV_8139CPLUS,   RE_MACVER_UNKN,         ETHERMTU,
203           RE_C_HWCSUM | RE_C_8139CP | RE_C_FASTE },
204
205         { RE_HWREV_8169,        RE_MACVER_UNKN,         ETHERMTU,
206           RE_C_HWCSUM | RE_C_8169 },
207
208         { RE_HWREV_8110S,       RE_MACVER_03,           RE_MTU_6K,
209           RE_C_HWCSUM | RE_C_8169 },
210
211         { RE_HWREV_8169S,       RE_MACVER_03,           RE_MTU_6K,
212           RE_C_HWCSUM | RE_C_8169 },
213
214         { RE_HWREV_8169SB,      RE_MACVER_04,           RE_MTU_6K,
215           RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 },
216
217         { RE_HWREV_8169SC1,     RE_MACVER_05,           RE_MTU_6K,
218           RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 },
219
220         { RE_HWREV_8169SC2,     RE_MACVER_06,           RE_MTU_6K,
221           RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_8169 },
222
223         { RE_HWREV_8168B1,      RE_MACVER_21,           RE_MTU_6K,
224           RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT },
225
226         { RE_HWREV_8168B2,      RE_MACVER_23,           RE_MTU_6K,
227           RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_AUTOPAD },
228
229         { RE_HWREV_8168B3,      RE_MACVER_23,           RE_MTU_6K,
230           RE_C_HWIM | RE_C_HWCSUM | RE_C_PHYPMGT | RE_C_AUTOPAD },
231
232         { RE_HWREV_8168C,       RE_MACVER_29,           RE_MTU_6K,
233           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
234           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
235
236         { RE_HWREV_8168CP,      RE_MACVER_2B,           RE_MTU_6K,
237           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
238           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
239
240         { RE_HWREV_8168D,       RE_MACVER_2A,           RE_MTU_9K,
241           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
242           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
243
244         { RE_HWREV_8168DP,      RE_MACVER_2D,           RE_MTU_9K,
245           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
246           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
247
248         { RE_HWREV_8168E,       RE_MACVER_UNKN,         RE_MTU_9K,
249           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
250           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
251
252         { RE_HWREV_8168F,       RE_MACVER_UNKN,         RE_MTU_9K,
253           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
254           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
255
256         { RE_HWREV_8111F,       RE_MACVER_UNKN,         RE_MTU_9K,
257           RE_C_HWIM | RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT |
258           RE_C_AUTOPAD | RE_C_CONTIGRX | RE_C_STOP_RXTX },
259
260         { RE_HWREV_8100E,       RE_MACVER_UNKN,         ETHERMTU,
261           RE_C_HWCSUM | RE_C_FASTE },
262
263         { RE_HWREV_8101E1,      RE_MACVER_16,           ETHERMTU,
264           RE_C_HWCSUM | RE_C_FASTE },
265
266         { RE_HWREV_8101E2,      RE_MACVER_16,           ETHERMTU,
267           RE_C_HWCSUM | RE_C_FASTE },
268
269         { RE_HWREV_8102E,       RE_MACVER_15,           ETHERMTU,
270           RE_C_HWCSUM | RE_C_MAC2 | RE_C_AUTOPAD | RE_C_STOP_RXTX |
271           RE_C_FASTE },
272
273         { RE_HWREV_8102EL,      RE_MACVER_15,           ETHERMTU,
274           RE_C_HWCSUM | RE_C_MAC2 | RE_C_AUTOPAD | RE_C_STOP_RXTX |
275           RE_C_FASTE },
276
277         { RE_HWREV_8105E,       RE_MACVER_UNKN,         ETHERMTU,
278           RE_C_HWCSUM | RE_C_MAC2 | RE_C_PHYPMGT | RE_C_AUTOPAD |
279           RE_C_STOP_RXTX | RE_C_FASTE },
280
281         { RE_HWREV_NULL, 0, 0, 0 }
282 };
283
284 static int      re_probe(device_t);
285 static int      re_attach(device_t);
286 static int      re_detach(device_t);
287 static int      re_suspend(device_t);
288 static int      re_resume(device_t);
289 static void     re_shutdown(device_t);
290
291 static int      re_allocmem(device_t);
292 static void     re_freemem(device_t);
293 static void     re_freebufmem(struct re_softc *, int, int);
294 static int      re_encap(struct re_softc *, struct mbuf **, int *);
295 static int      re_newbuf_std(struct re_softc *, int, int);
296 static int      re_newbuf_jumbo(struct re_softc *, int, int);
297 static void     re_setup_rxdesc(struct re_softc *, int);
298 static int      re_rx_list_init(struct re_softc *);
299 static int      re_tx_list_init(struct re_softc *);
300 static int      re_rxeof(struct re_softc *);
301 static int      re_txeof(struct re_softc *);
302 static int      re_tx_collect(struct re_softc *);
303 static void     re_intr(void *);
304 static void     re_tick(void *);
305 static void     re_tick_serialized(void *);
306
307 static void     re_start(struct ifnet *);
308 static int      re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
309 static void     re_init(void *);
310 static void     re_stop(struct re_softc *);
311 static void     re_watchdog(struct ifnet *);
312 static int      re_ifmedia_upd(struct ifnet *);
313 static void     re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
314
315 static void     re_eeprom_putbyte(struct re_softc *, int);
316 static void     re_eeprom_getword(struct re_softc *, int, u_int16_t *);
317 static void     re_read_eeprom(struct re_softc *, caddr_t, int, int);
318 static void     re_get_eewidth(struct re_softc *);
319
320 static int      re_gmii_readreg(device_t, int, int);
321 static int      re_gmii_writereg(device_t, int, int, int);
322
323 static int      re_miibus_readreg(device_t, int, int);
324 static int      re_miibus_writereg(device_t, int, int, int);
325 static void     re_miibus_statchg(device_t);
326
327 static void     re_setmulti(struct re_softc *);
328 static void     re_reset(struct re_softc *, int);
329 static void     re_get_eaddr(struct re_softc *, uint8_t *);
330
331 static void     re_setup_hw_im(struct re_softc *);
332 static void     re_setup_sim_im(struct re_softc *);
333 static void     re_disable_hw_im(struct re_softc *);
334 static void     re_disable_sim_im(struct re_softc *);
335 static void     re_config_imtype(struct re_softc *, int);
336 static void     re_setup_intr(struct re_softc *, int, int);
337
338 static int      re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *);
339 static int      re_sysctl_rxtime(SYSCTL_HANDLER_ARGS);
340 static int      re_sysctl_txtime(SYSCTL_HANDLER_ARGS);
341 static int      re_sysctl_simtime(SYSCTL_HANDLER_ARGS);
342 static int      re_sysctl_imtype(SYSCTL_HANDLER_ARGS);
343
344 static int      re_jpool_alloc(struct re_softc *);
345 static void     re_jpool_free(struct re_softc *);
346 static struct re_jbuf *re_jbuf_alloc(struct re_softc *);
347 static void     re_jbuf_free(void *);
348 static void     re_jbuf_ref(void *);
349
350 #ifdef RE_DIAG
351 static int      re_diag(struct re_softc *);
352 #endif
353
354 #ifdef IFPOLL_ENABLE
355 static void     re_npoll(struct ifnet *, struct ifpoll_info *);
356 static void     re_npoll_compat(struct ifnet *, void *, int);
357 #endif
358
359 static device_method_t re_methods[] = {
360         /* Device interface */
361         DEVMETHOD(device_probe,         re_probe),
362         DEVMETHOD(device_attach,        re_attach),
363         DEVMETHOD(device_detach,        re_detach),
364         DEVMETHOD(device_suspend,       re_suspend),
365         DEVMETHOD(device_resume,        re_resume),
366         DEVMETHOD(device_shutdown,      re_shutdown),
367
368         /* bus interface */
369         DEVMETHOD(bus_print_child,      bus_generic_print_child),
370         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
371
372         /* MII interface */
373         DEVMETHOD(miibus_readreg,       re_miibus_readreg),
374         DEVMETHOD(miibus_writereg,      re_miibus_writereg),
375         DEVMETHOD(miibus_statchg,       re_miibus_statchg),
376
377         { 0, 0 }
378 };
379
380 static driver_t re_driver = {
381         "re",
382         re_methods,
383         sizeof(struct re_softc)
384 };
385
386 static devclass_t re_devclass;
387
388 DECLARE_DUMMY_MODULE(if_re);
389 MODULE_DEPEND(if_re, miibus, 1, 1, 1);
390 DRIVER_MODULE(if_re, pci, re_driver, re_devclass, NULL, NULL);
391 DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, NULL, NULL);
392 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, NULL, NULL);
393
394 static int      re_rx_desc_count = RE_RX_DESC_CNT_DEF;
395 static int      re_tx_desc_count = RE_TX_DESC_CNT_DEF;
396 static int      re_msi_enable = 0;
397
398 TUNABLE_INT("hw.re.rx_desc_count", &re_rx_desc_count);
399 TUNABLE_INT("hw.re.tx_desc_count", &re_tx_desc_count);
400 TUNABLE_INT("hw.re.msi.enable", &re_msi_enable);
401
402 #define EE_SET(x)       \
403         CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) | (x))
404
405 #define EE_CLR(x)       \
406         CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) & ~(x))
407
408 static __inline void
409 re_free_rxchain(struct re_softc *sc)
410 {
411         if (sc->re_head != NULL) {
412                 m_freem(sc->re_head);
413                 sc->re_head = sc->re_tail = NULL;
414         }
415 }
416
417 /*
418  * Send a read command and address to the EEPROM, check for ACK.
419  */
420 static void
421 re_eeprom_putbyte(struct re_softc *sc, int addr)
422 {
423         int d, i;
424
425         d = addr | (RE_9346_READ << sc->re_eewidth);
426
427         /*
428          * Feed in each bit and strobe the clock.
429          */
430         for (i = 1 << (sc->re_eewidth + 3); i; i >>= 1) {
431                 if (d & i)
432                         EE_SET(RE_EE_DATAIN);
433                 else
434                         EE_CLR(RE_EE_DATAIN);
435                 DELAY(100);
436                 EE_SET(RE_EE_CLK);
437                 DELAY(150);
438                 EE_CLR(RE_EE_CLK);
439                 DELAY(100);
440         }
441 }
442
443 /*
444  * Read a word of data stored in the EEPROM at address 'addr.'
445  */
446 static void
447 re_eeprom_getword(struct re_softc *sc, int addr, uint16_t *dest)
448 {
449         int i;
450         uint16_t word = 0;
451
452         /*
453          * Send address of word we want to read.
454          */
455         re_eeprom_putbyte(sc, addr);
456
457         /*
458          * Start reading bits from EEPROM.
459          */
460         for (i = 0x8000; i != 0; i >>= 1) {
461                 EE_SET(RE_EE_CLK);
462                 DELAY(100);
463                 if (CSR_READ_1(sc, RE_EECMD) & RE_EE_DATAOUT)
464                         word |= i;
465                 EE_CLR(RE_EE_CLK);
466                 DELAY(100);
467         }
468
469         *dest = word;
470 }
471
472 /*
473  * Read a sequence of words from the EEPROM.
474  */
475 static void
476 re_read_eeprom(struct re_softc *sc, caddr_t dest, int off, int cnt)
477 {
478         int i;
479         uint16_t word = 0, *ptr;
480
481         CSR_SETBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM);
482         DELAY(100);
483
484         for (i = 0; i < cnt; i++) {
485                 CSR_SETBIT_1(sc, RE_EECMD, RE_EE_SEL);
486                 re_eeprom_getword(sc, off + i, &word);
487                 CSR_CLRBIT_1(sc, RE_EECMD, RE_EE_SEL);
488                 ptr = (uint16_t *)(dest + (i * 2));
489                 *ptr = word;
490         }
491
492         CSR_CLRBIT_1(sc, RE_EECMD, RE_EEMODE_PROGRAM);
493 }
494
495 static void
496 re_get_eewidth(struct re_softc *sc)
497 {
498         uint16_t re_did = 0;
499
500         sc->re_eewidth = 6;
501         re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
502         if (re_did != 0x8129)
503                 sc->re_eewidth = 8;
504 }
505
506 static int
507 re_gmii_readreg(device_t dev, int phy, int reg)
508 {
509         struct re_softc *sc = device_get_softc(dev);
510         u_int32_t rval;
511         int i;
512
513         if (phy != 1)
514                 return(0);
515
516         /* Let the rgephy driver read the GMEDIASTAT register */
517
518         if (reg == RE_GMEDIASTAT)
519                 return(CSR_READ_1(sc, RE_GMEDIASTAT));
520
521         CSR_WRITE_4(sc, RE_PHYAR, reg << 16);
522         DELAY(1000);
523
524         for (i = 0; i < RE_TIMEOUT; i++) {
525                 rval = CSR_READ_4(sc, RE_PHYAR);
526                 if (rval & RE_PHYAR_BUSY)
527                         break;
528                 DELAY(100);
529         }
530
531         if (i == RE_TIMEOUT) {
532                 device_printf(dev, "PHY read failed\n");
533                 return(0);
534         }
535
536         return(rval & RE_PHYAR_PHYDATA);
537 }
538
539 static int
540 re_gmii_writereg(device_t dev, int phy, int reg, int data)
541 {
542         struct re_softc *sc = device_get_softc(dev);
543         uint32_t rval;
544         int i;
545
546         CSR_WRITE_4(sc, RE_PHYAR,
547                     (reg << 16) | (data & RE_PHYAR_PHYDATA) | RE_PHYAR_BUSY);
548         DELAY(1000);
549
550         for (i = 0; i < RE_TIMEOUT; i++) {
551                 rval = CSR_READ_4(sc, RE_PHYAR);
552                 if ((rval & RE_PHYAR_BUSY) == 0)
553                         break;
554                 DELAY(100);
555         }
556
557         if (i == RE_TIMEOUT)
558                 device_printf(dev, "PHY write failed\n");
559
560         return(0);
561 }
562
563 static int
564 re_miibus_readreg(device_t dev, int phy, int reg)
565 {
566         struct re_softc *sc = device_get_softc(dev);
567         uint16_t rval = 0;
568         uint16_t re8139_reg = 0;
569
570         if (!RE_IS_8139CP(sc)) {
571                 rval = re_gmii_readreg(dev, phy, reg);
572                 return(rval);
573         }
574
575         /* Pretend the internal PHY is only at address 0 */
576         if (phy)
577                 return(0);
578
579         switch(reg) {
580         case MII_BMCR:
581                 re8139_reg = RE_BMCR;
582                 break;
583         case MII_BMSR:
584                 re8139_reg = RE_BMSR;
585                 break;
586         case MII_ANAR:
587                 re8139_reg = RE_ANAR;
588                 break;
589         case MII_ANER:
590                 re8139_reg = RE_ANER;
591                 break;
592         case MII_ANLPAR:
593                 re8139_reg = RE_LPAR;
594                 break;
595         case MII_PHYIDR1:
596         case MII_PHYIDR2:
597                 return(0);
598         /*
599          * Allow the rlphy driver to read the media status
600          * register. If we have a link partner which does not
601          * support NWAY, this is the register which will tell
602          * us the results of parallel detection.
603          */
604         case RE_MEDIASTAT:
605                 return(CSR_READ_1(sc, RE_MEDIASTAT));
606         default:
607                 device_printf(dev, "bad phy register\n");
608                 return(0);
609         }
610         rval = CSR_READ_2(sc, re8139_reg);
611         if (re8139_reg == RE_BMCR) {
612                 /* 8139C+ has different bit layout. */
613                 rval &= ~(BMCR_LOOP | BMCR_ISO);
614         }
615         return(rval);
616 }
617
618 static int
619 re_miibus_writereg(device_t dev, int phy, int reg, int data)
620 {
621         struct re_softc *sc= device_get_softc(dev);
622         u_int16_t re8139_reg = 0;
623
624         if (!RE_IS_8139CP(sc))
625                 return(re_gmii_writereg(dev, phy, reg, data));
626
627         /* Pretend the internal PHY is only at address 0 */
628         if (phy)
629                 return(0);
630
631         switch(reg) {
632         case MII_BMCR:
633                 re8139_reg = RE_BMCR;
634                 /* 8139C+ has different bit layout. */
635                 data &= ~(BMCR_LOOP | BMCR_ISO);
636                 break;
637         case MII_BMSR:
638                 re8139_reg = RE_BMSR;
639                 break;
640         case MII_ANAR:
641                 re8139_reg = RE_ANAR;
642                 break;
643         case MII_ANER:
644                 re8139_reg = RE_ANER;
645                 break;
646         case MII_ANLPAR:
647                 re8139_reg = RE_LPAR;
648                 break;
649         case MII_PHYIDR1:
650         case MII_PHYIDR2:
651                 return(0);
652         default:
653                 device_printf(dev, "bad phy register\n");
654                 return(0);
655         }
656         CSR_WRITE_2(sc, re8139_reg, data);
657         return(0);
658 }
659
660 static void
661 re_miibus_statchg(device_t dev)
662 {
663 }
664
665 /*
666  * Program the 64-bit multicast hash filter.
667  */
668 static void
669 re_setmulti(struct re_softc *sc)
670 {
671         struct ifnet *ifp = &sc->arpcom.ac_if;
672         int h = 0;
673         uint32_t hashes[2] = { 0, 0 };
674         struct ifmultiaddr *ifma;
675         uint32_t rxfilt;
676         int mcnt = 0;
677
678         rxfilt = CSR_READ_4(sc, RE_RXCFG);
679
680         /* Set the individual bit to receive frames for this host only. */
681         rxfilt |= RE_RXCFG_RX_INDIV;
682         /* Set capture broadcast bit to capture broadcast frames. */
683         rxfilt |= RE_RXCFG_RX_BROAD;
684
685         rxfilt &= ~(RE_RXCFG_RX_ALLPHYS | RE_RXCFG_RX_MULTI);
686         if ((ifp->if_flags & IFF_ALLMULTI) || (ifp->if_flags & IFF_PROMISC)) {
687                 rxfilt |= RE_RXCFG_RX_MULTI;
688
689                 /* If we want promiscuous mode, set the allframes bit. */
690                 if (ifp->if_flags & IFF_PROMISC)
691                         rxfilt |= RE_RXCFG_RX_ALLPHYS;
692
693                 CSR_WRITE_4(sc, RE_RXCFG, rxfilt);
694                 CSR_WRITE_4(sc, RE_MAR0, 0xFFFFFFFF);
695                 CSR_WRITE_4(sc, RE_MAR4, 0xFFFFFFFF);
696                 return;
697         }
698
699         /* first, zot all the existing hash bits */
700         CSR_WRITE_4(sc, RE_MAR0, 0);
701         CSR_WRITE_4(sc, RE_MAR4, 0);
702
703         /* now program new ones */
704         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
705                 if (ifma->ifma_addr->sa_family != AF_LINK)
706                         continue;
707                 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
708                     ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
709                 if (h < 32)
710                         hashes[0] |= (1 << h);
711                 else
712                         hashes[1] |= (1 << (h - 32));
713                 mcnt++;
714         }
715
716         if (mcnt)
717                 rxfilt |= RE_RXCFG_RX_MULTI;
718         else
719                 rxfilt &= ~RE_RXCFG_RX_MULTI;
720
721         CSR_WRITE_4(sc, RE_RXCFG, rxfilt);
722
723         /*
724          * For some unfathomable reason, RealTek decided to reverse
725          * the order of the multicast hash registers in the PCI Express
726          * parts. This means we have to write the hash pattern in reverse
727          * order for those devices.
728          */
729         if (sc->re_caps & RE_C_PCIE) {
730                 CSR_WRITE_4(sc, RE_MAR0, bswap32(hashes[1]));
731                 CSR_WRITE_4(sc, RE_MAR4, bswap32(hashes[0]));
732         } else {
733                 CSR_WRITE_4(sc, RE_MAR0, hashes[0]);
734                 CSR_WRITE_4(sc, RE_MAR4, hashes[1]);
735         }
736 }
737
738 static void
739 re_reset(struct re_softc *sc, int running)
740 {
741         int i;
742
743         if ((sc->re_caps & RE_C_STOP_RXTX) && running) {
744                 CSR_WRITE_1(sc, RE_COMMAND,
745                             RE_CMD_STOPREQ | RE_CMD_TX_ENB | RE_CMD_RX_ENB);
746                 DELAY(100);
747         }
748
749         CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_RESET);
750
751         for (i = 0; i < RE_TIMEOUT; i++) {
752                 DELAY(10);
753                 if ((CSR_READ_1(sc, RE_COMMAND) & RE_CMD_RESET) == 0)
754                         break;
755         }
756         if (i == RE_TIMEOUT)
757                 if_printf(&sc->arpcom.ac_if, "reset never completed!\n");
758 }
759
760 #ifdef RE_DIAG
761 /*
762  * The following routine is designed to test for a defect on some
763  * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
764  * lines connected to the bus, however for a 32-bit only card, they
765  * should be pulled high. The result of this defect is that the
766  * NIC will not work right if you plug it into a 64-bit slot: DMA
767  * operations will be done with 64-bit transfers, which will fail
768  * because the 64-bit data lines aren't connected.
769  *
770  * There's no way to work around this (short of talking a soldering
771  * iron to the board), however we can detect it. The method we use
772  * here is to put the NIC into digital loopback mode, set the receiver
773  * to promiscuous mode, and then try to send a frame. We then compare
774  * the frame data we sent to what was received. If the data matches,
775  * then the NIC is working correctly, otherwise we know the user has
776  * a defective NIC which has been mistakenly plugged into a 64-bit PCI
777  * slot. In the latter case, there's no way the NIC can work correctly,
778  * so we print out a message on the console and abort the device attach.
779  */
780
781 static int
782 re_diag(struct re_softc *sc)
783 {
784         struct ifnet *ifp = &sc->arpcom.ac_if;
785         struct mbuf *m0;
786         struct ether_header *eh;
787         struct re_desc *cur_rx;
788         uint16_t status;
789         int total_len, i, error = 0, phyaddr;
790         uint8_t dst[ETHER_ADDR_LEN] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
791         uint8_t src[ETHER_ADDR_LEN] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
792
793         /* Allocate a single mbuf */
794
795         MGETHDR(m0, MB_DONTWAIT, MT_DATA);
796         if (m0 == NULL)
797                 return(ENOBUFS);
798
799         /*
800          * Initialize the NIC in test mode. This sets the chip up
801          * so that it can send and receive frames, but performs the
802          * following special functions:
803          * - Puts receiver in promiscuous mode
804          * - Enables digital loopback mode
805          * - Leaves interrupts turned off
806          */
807
808         ifp->if_flags |= IFF_PROMISC;
809         sc->re_flags |= RE_F_TESTMODE;
810         re_init(sc);
811         sc->re_flags |= RE_F_LINKED;
812         if (!RE_IS_8139CP(sc))
813                 phyaddr = 1;
814         else
815                 phyaddr = 0;
816
817         re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_RESET);
818         for (i = 0; i < RE_TIMEOUT; i++) {
819                 status = re_miibus_readreg(sc->re_dev, phyaddr, MII_BMCR);
820                 if (!(status & BMCR_RESET))
821                         break;
822         }
823
824         re_miibus_writereg(sc->re_dev, phyaddr, MII_BMCR, BMCR_LOOP);
825         CSR_WRITE_2(sc, RE_ISR, RE_INTRS_DIAG);
826
827         DELAY(100000);
828
829         /* Put some data in the mbuf */
830
831         eh = mtod(m0, struct ether_header *);
832         bcopy (dst, eh->ether_dhost, ETHER_ADDR_LEN);
833         bcopy (src, eh->ether_shost, ETHER_ADDR_LEN);
834         eh->ether_type = htons(ETHERTYPE_IP);
835         m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
836
837         /*
838          * Queue the packet, start transmission.
839          * Note: ifq_handoff() ultimately calls re_start() for us.
840          */
841
842         CSR_WRITE_2(sc, RE_ISR, 0xFFFF);
843         error = ifq_handoff(ifp, m0, NULL);
844         if (error) {
845                 m0 = NULL;
846                 goto done;
847         }
848         m0 = NULL;
849
850         /* Wait for it to propagate through the chip */
851
852         DELAY(100000);
853         for (i = 0; i < RE_TIMEOUT; i++) {
854                 status = CSR_READ_2(sc, RE_ISR);
855                 CSR_WRITE_2(sc, RE_ISR, status);
856                 if ((status & (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) ==
857                     (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK))
858                         break;
859                 DELAY(10);
860         }
861
862         if (i == RE_TIMEOUT) {
863                 if_printf(ifp, "diagnostic failed to receive packet "
864                           "in loopback mode\n");
865                 error = EIO;
866                 goto done;
867         }
868
869         /*
870          * The packet should have been dumped into the first
871          * entry in the RX DMA ring. Grab it from there.
872          */
873
874         bus_dmamap_sync(sc->re_ldata.re_rx_mtag, sc->re_ldata.re_rx_dmamap[0],
875                         BUS_DMASYNC_POSTREAD);
876         bus_dmamap_unload(sc->re_ldata.re_rx_mtag,
877                           sc->re_ldata.re_rx_dmamap[0]);
878
879         m0 = sc->re_ldata.re_rx_mbuf[0];
880         sc->re_ldata.re_rx_mbuf[0] = NULL;
881         eh = mtod(m0, struct ether_header *);
882
883         cur_rx = &sc->re_ldata.re_rx_list[0];
884         total_len = RE_RXBYTES(cur_rx);
885
886         if (total_len != ETHER_MIN_LEN) {
887                 if_printf(ifp, "diagnostic failed, received short packet\n");
888                 error = EIO;
889                 goto done;
890         }
891
892         /* Test that the received packet data matches what we sent. */
893
894         if (bcmp(eh->ether_dhost, dst, ETHER_ADDR_LEN) ||
895             bcmp(eh->ether_shost, &src, ETHER_ADDR_LEN) ||
896             be16toh(eh->ether_type) != ETHERTYPE_IP) {
897                 if_printf(ifp, "WARNING, DMA FAILURE!\n");
898                 if_printf(ifp, "expected TX data: %6D/%6D/0x%x\n",
899                     dst, ":", src, ":", ETHERTYPE_IP);
900                 if_printf(ifp, "received RX data: %6D/%6D/0x%x\n",
901                     eh->ether_dhost, ":",  eh->ether_shost, ":",
902                     ntohs(eh->ether_type));
903                 if_printf(ifp, "You may have a defective 32-bit NIC plugged "
904                     "into a 64-bit PCI slot.\n");
905                 if_printf(ifp, "Please re-install the NIC in a 32-bit slot "
906                     "for proper operation.\n");
907                 if_printf(ifp, "Read the re(4) man page for more details.\n");
908                 error = EIO;
909         }
910
911 done:
912         /* Turn interface off, release resources */
913
914         sc->re_flags &= ~(RE_F_LINKED | RE_F_TESTMODE);
915         ifp->if_flags &= ~IFF_PROMISC;
916         re_stop(sc);
917         if (m0 != NULL)
918                 m_freem(m0);
919
920         return (error);
921 }
922 #endif  /* RE_DIAG */
923
924 /*
925  * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
926  * IDs against our list and return a device name if we find a match.
927  */
928 static int
929 re_probe(device_t dev)
930 {
931         const struct re_type *t;
932         const struct re_hwrev *hw_rev;
933         struct re_softc *sc;
934         int rid;
935         uint32_t hwrev, macmode, txcfg;
936         uint16_t vendor, product;
937
938         vendor = pci_get_vendor(dev);
939         product = pci_get_device(dev);
940
941         /*
942          * Only attach to rev.3 of the Linksys EG1032 adapter.
943          * Rev.2 is supported by sk(4).
944          */
945         if (vendor == PCI_VENDOR_LINKSYS &&
946             product == PCI_PRODUCT_LINKSYS_EG1032 &&
947             pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3)
948                 return ENXIO;
949
950         if (vendor == PCI_VENDOR_REALTEK &&
951             product == PCI_PRODUCT_REALTEK_RT8139 &&
952             pci_get_revid(dev) != PCI_REVID_REALTEK_RT8139CP) {
953                 /* Poor 8139 */
954                 return ENXIO;
955         }
956
957         for (t = re_devs; t->re_name != NULL; t++) {
958                 if (product == t->re_did && vendor == t->re_vid)
959                         break;
960         }
961
962         /*
963          * Check if we found a RealTek device.
964          */
965         if (t->re_name == NULL)
966                 return ENXIO;
967
968         /*
969          * Temporarily map the I/O space so we can read the chip ID register.
970          */
971         sc = kmalloc(sizeof(*sc), M_TEMP, M_WAITOK | M_ZERO);
972         rid = RE_PCI_LOIO;
973         sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
974                                             RF_ACTIVE);
975         if (sc->re_res == NULL) {
976                 device_printf(dev, "couldn't map ports/memory\n");
977                 kfree(sc, M_TEMP);
978                 return ENXIO;
979         }
980
981         sc->re_btag = rman_get_bustag(sc->re_res);
982         sc->re_bhandle = rman_get_bushandle(sc->re_res);
983
984         txcfg = CSR_READ_4(sc, RE_TXCFG);
985         hwrev = txcfg & RE_TXCFG_HWREV;
986         macmode = txcfg & RE_TXCFG_MACMODE;
987         bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, sc->re_res);
988         kfree(sc, M_TEMP);
989
990         /*
991          * and continue matching for the specific chip...
992          */
993         for (hw_rev = re_hwrevs; hw_rev->re_hwrev != RE_HWREV_NULL; hw_rev++) {
994                 if (hw_rev->re_hwrev == hwrev) {
995                         sc = device_get_softc(dev);
996
997                         sc->re_hwrev = hw_rev->re_hwrev;
998                         sc->re_macver = hw_rev->re_macver;
999                         sc->re_caps = hw_rev->re_caps;
1000                         sc->re_maxmtu = hw_rev->re_maxmtu;
1001
1002                         /*
1003                          * Apply chip property fixup
1004                          */
1005                         switch (sc->re_hwrev) {
1006                         case RE_HWREV_8101E1:
1007                         case RE_HWREV_8101E2:
1008                                 if (macmode == 0)
1009                                         sc->re_macver = RE_MACVER_11;
1010                                 else if (macmode == 0x200000)
1011                                         sc->re_macver = RE_MACVER_12;
1012                                 break;
1013                         case RE_HWREV_8102E:
1014                         case RE_HWREV_8102EL:
1015                                 if (macmode == 0)
1016                                         sc->re_macver = RE_MACVER_13;
1017                                 else if (macmode == 0x100000)
1018                                         sc->re_macver = RE_MACVER_14;
1019                                 break;
1020                         case RE_HWREV_8168B2:
1021                         case RE_HWREV_8168B3:
1022                                 if (macmode == 0)
1023                                         sc->re_macver = RE_MACVER_22;
1024                                 break;
1025                         case RE_HWREV_8168C:
1026                                 if (macmode == 0)
1027                                         sc->re_macver = RE_MACVER_24;
1028                                 else if (macmode == 0x200000)
1029                                         sc->re_macver = RE_MACVER_25;
1030                                 else if (macmode == 0x300000)
1031                                         sc->re_macver = RE_MACVER_27;
1032                                 break;
1033                         case RE_HWREV_8168CP:
1034                                 if (macmode == 0)
1035                                         sc->re_macver = RE_MACVER_26;
1036                                 else if (macmode == 0x100000)
1037                                         sc->re_macver = RE_MACVER_28;
1038                                 break;
1039                         case RE_HWREV_8168DP:
1040                                 if (macmode == 0)
1041                                         sc->re_macver = RE_MACVER_2B;
1042                                 else if (macmode == 0x200000)
1043                                         sc->re_macver = RE_MACVER_2C;
1044                                 break;
1045                         case RE_HWREV_8168E:
1046                                 if (macmode == 0x100000)
1047                                         sc->re_macver = RE_MACVER_2E;
1048                                 else if (macmode == 0x200000)
1049                                         sc->re_macver = RE_MACVER_2F;
1050                                 break;
1051                         case RE_HWREV_8168F:
1052                         case RE_HWREV_8111F:
1053                                 if (macmode == 0x000000)
1054                                         sc->re_macver = RE_MACVER_30;
1055                                 else if (macmode == 0x100000)
1056                                         sc->re_macver = RE_MACVER_31;
1057                                 break;
1058                         }
1059                         if (pci_is_pcie(dev))
1060                                 sc->re_caps |= RE_C_PCIE;
1061
1062                         device_set_desc(dev, t->re_name);
1063                         return 0;
1064                 }
1065         }
1066
1067         if (bootverbose) {
1068                 device_printf(dev, "unknown hwrev 0x%08x, macmode 0x%08x\n",
1069                               hwrev, macmode);
1070         }
1071         return ENXIO;
1072 }
1073
1074 static int
1075 re_allocmem(device_t dev)
1076 {
1077         struct re_softc *sc = device_get_softc(dev);
1078         bus_dmamem_t dmem;
1079         int error, i;
1080
1081         /*
1082          * Allocate list data
1083          */
1084         sc->re_ldata.re_tx_mbuf =
1085         kmalloc(sc->re_tx_desc_cnt * sizeof(struct mbuf *),
1086                 M_DEVBUF, M_ZERO | M_WAITOK);
1087
1088         sc->re_ldata.re_rx_mbuf =
1089         kmalloc(sc->re_rx_desc_cnt * sizeof(struct mbuf *),
1090                 M_DEVBUF, M_ZERO | M_WAITOK);
1091
1092         sc->re_ldata.re_rx_paddr =
1093         kmalloc(sc->re_rx_desc_cnt * sizeof(bus_addr_t),
1094                 M_DEVBUF, M_ZERO | M_WAITOK);
1095
1096         sc->re_ldata.re_tx_dmamap =
1097         kmalloc(sc->re_tx_desc_cnt * sizeof(bus_dmamap_t),
1098                 M_DEVBUF, M_ZERO | M_WAITOK);
1099
1100         sc->re_ldata.re_rx_dmamap =
1101         kmalloc(sc->re_rx_desc_cnt * sizeof(bus_dmamap_t),
1102                 M_DEVBUF, M_ZERO | M_WAITOK);
1103
1104         /*
1105          * Allocate the parent bus DMA tag appropriate for PCI.
1106          */
1107         error = bus_dma_tag_create(NULL,        /* parent */
1108                         1, 0,                   /* alignment, boundary */
1109                         BUS_SPACE_MAXADDR,      /* lowaddr */
1110                         BUS_SPACE_MAXADDR,      /* highaddr */
1111                         NULL, NULL,             /* filter, filterarg */
1112                         BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
1113                         0,                      /* nsegments */
1114                         BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1115                         0,                      /* flags */
1116                         &sc->re_parent_tag);
1117         if (error) {
1118                 device_printf(dev, "could not allocate parent dma tag\n");
1119                 return error;
1120         }
1121
1122         /* Allocate TX descriptor list. */
1123         error = bus_dmamem_coherent(sc->re_parent_tag,
1124                         RE_RING_ALIGN, 0,
1125                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1126                         RE_TX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO,
1127                         &dmem);
1128         if (error) {
1129                 device_printf(dev, "could not allocate TX ring\n");
1130                 return error;
1131         }
1132         sc->re_ldata.re_tx_list_tag = dmem.dmem_tag;
1133         sc->re_ldata.re_tx_list_map = dmem.dmem_map;
1134         sc->re_ldata.re_tx_list = dmem.dmem_addr;
1135         sc->re_ldata.re_tx_list_addr = dmem.dmem_busaddr;
1136
1137         /* Allocate RX descriptor list. */
1138         error = bus_dmamem_coherent(sc->re_parent_tag,
1139                         RE_RING_ALIGN, 0,
1140                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1141                         RE_RX_LIST_SZ(sc), BUS_DMA_WAITOK | BUS_DMA_ZERO,
1142                         &dmem);
1143         if (error) {
1144                 device_printf(dev, "could not allocate RX ring\n");
1145                 return error;
1146         }
1147         sc->re_ldata.re_rx_list_tag = dmem.dmem_tag;
1148         sc->re_ldata.re_rx_list_map = dmem.dmem_map;
1149         sc->re_ldata.re_rx_list = dmem.dmem_addr;
1150         sc->re_ldata.re_rx_list_addr = dmem.dmem_busaddr;
1151
1152         /* Allocate maps for TX mbufs. */
1153         error = bus_dma_tag_create(sc->re_parent_tag,
1154                         1, 0,
1155                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1156                         NULL, NULL,
1157                         RE_FRAMELEN_MAX, RE_MAXSEGS, MCLBYTES,
1158                         BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1159                         &sc->re_ldata.re_tx_mtag);
1160         if (error) {
1161                 device_printf(dev, "could not allocate TX buf dma tag\n");
1162                 return(error);
1163         }
1164
1165         /* Create DMA maps for TX buffers */
1166         for (i = 0; i < sc->re_tx_desc_cnt; i++) {
1167                 error = bus_dmamap_create(sc->re_ldata.re_tx_mtag,
1168                                 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1169                                 &sc->re_ldata.re_tx_dmamap[i]);
1170                 if (error) {
1171                         device_printf(dev, "can't create DMA map for TX buf\n");
1172                         re_freebufmem(sc, i, 0);
1173                         return(error);
1174                 }
1175         }
1176
1177         /* Allocate maps for RX mbufs. */
1178         error = bus_dma_tag_create(sc->re_parent_tag,
1179                         RE_RXBUF_ALIGN, 0,
1180                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1181                         NULL, NULL,
1182                         MCLBYTES, 1, MCLBYTES,
1183                         BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ALIGNED,
1184                         &sc->re_ldata.re_rx_mtag);
1185         if (error) {
1186                 device_printf(dev, "could not allocate RX buf dma tag\n");
1187                 return(error);
1188         }
1189
1190         /* Create spare DMA map for RX */
1191         error = bus_dmamap_create(sc->re_ldata.re_rx_mtag, BUS_DMA_WAITOK,
1192                         &sc->re_ldata.re_rx_spare);
1193         if (error) {
1194                 device_printf(dev, "can't create spare DMA map for RX\n");
1195                 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag);
1196                 sc->re_ldata.re_rx_mtag = NULL;
1197                 return error;
1198         }
1199
1200         /* Create DMA maps for RX buffers */
1201         for (i = 0; i < sc->re_rx_desc_cnt; i++) {
1202                 error = bus_dmamap_create(sc->re_ldata.re_rx_mtag,
1203                                 BUS_DMA_WAITOK, &sc->re_ldata.re_rx_dmamap[i]);
1204                 if (error) {
1205                         device_printf(dev, "can't create DMA map for RX buf\n");
1206                         re_freebufmem(sc, sc->re_tx_desc_cnt, i);
1207                         return(error);
1208                 }
1209         }
1210
1211         /* Create jumbo buffer pool for RX if required */
1212         if (sc->re_caps & RE_C_CONTIGRX) {
1213                 error = re_jpool_alloc(sc);
1214                 if (error) {
1215                         re_jpool_free(sc);
1216                         /* Disable jumbo frame support */
1217                         sc->re_maxmtu = ETHERMTU;
1218                 }
1219         }
1220         return(0);
1221 }
1222
1223 static void
1224 re_freebufmem(struct re_softc *sc, int tx_cnt, int rx_cnt)
1225 {
1226         int i;
1227
1228         /* Destroy all the RX and TX buffer maps */
1229         if (sc->re_ldata.re_tx_mtag) {
1230                 for (i = 0; i < tx_cnt; i++) {
1231                         bus_dmamap_destroy(sc->re_ldata.re_tx_mtag,
1232                                            sc->re_ldata.re_tx_dmamap[i]);
1233                 }
1234                 bus_dma_tag_destroy(sc->re_ldata.re_tx_mtag);
1235                 sc->re_ldata.re_tx_mtag = NULL;
1236         }
1237
1238         if (sc->re_ldata.re_rx_mtag) {
1239                 for (i = 0; i < rx_cnt; i++) {
1240                         bus_dmamap_destroy(sc->re_ldata.re_rx_mtag,
1241                                            sc->re_ldata.re_rx_dmamap[i]);
1242                 }
1243                 bus_dmamap_destroy(sc->re_ldata.re_rx_mtag,
1244                                    sc->re_ldata.re_rx_spare);
1245                 bus_dma_tag_destroy(sc->re_ldata.re_rx_mtag);
1246                 sc->re_ldata.re_rx_mtag = NULL;
1247         }
1248 }
1249
1250 static void
1251 re_freemem(device_t dev)
1252 {
1253         struct re_softc *sc = device_get_softc(dev);
1254
1255         /* Unload and free the RX DMA ring memory and map */
1256         if (sc->re_ldata.re_rx_list_tag) {
1257                 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag,
1258                                   sc->re_ldata.re_rx_list_map);
1259                 bus_dmamem_free(sc->re_ldata.re_rx_list_tag,
1260                                 sc->re_ldata.re_rx_list,
1261                                 sc->re_ldata.re_rx_list_map);
1262                 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag);
1263         }
1264
1265         /* Unload and free the TX DMA ring memory and map */
1266         if (sc->re_ldata.re_tx_list_tag) {
1267                 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag,
1268                                   sc->re_ldata.re_tx_list_map);
1269                 bus_dmamem_free(sc->re_ldata.re_tx_list_tag,
1270                                 sc->re_ldata.re_tx_list,
1271                                 sc->re_ldata.re_tx_list_map);
1272                 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag);
1273         }
1274
1275         /* Free RX/TX buf DMA stuffs */
1276         re_freebufmem(sc, sc->re_tx_desc_cnt, sc->re_rx_desc_cnt);
1277
1278         /* Unload and free the stats buffer and map */
1279         if (sc->re_ldata.re_stag) {
1280                 bus_dmamap_unload(sc->re_ldata.re_stag, sc->re_ldata.re_smap);
1281                 bus_dmamem_free(sc->re_ldata.re_stag,
1282                                 sc->re_ldata.re_stats,
1283                                 sc->re_ldata.re_smap);
1284                 bus_dma_tag_destroy(sc->re_ldata.re_stag);
1285         }
1286
1287         if (sc->re_caps & RE_C_CONTIGRX)
1288                 re_jpool_free(sc);
1289
1290         if (sc->re_parent_tag)
1291                 bus_dma_tag_destroy(sc->re_parent_tag);
1292
1293         if (sc->re_ldata.re_tx_mbuf != NULL)
1294                 kfree(sc->re_ldata.re_tx_mbuf, M_DEVBUF);
1295         if (sc->re_ldata.re_rx_mbuf != NULL)
1296                 kfree(sc->re_ldata.re_rx_mbuf, M_DEVBUF);
1297         if (sc->re_ldata.re_rx_paddr != NULL)
1298                 kfree(sc->re_ldata.re_rx_paddr, M_DEVBUF);
1299         if (sc->re_ldata.re_tx_dmamap != NULL)
1300                 kfree(sc->re_ldata.re_tx_dmamap, M_DEVBUF);
1301         if (sc->re_ldata.re_rx_dmamap != NULL)
1302                 kfree(sc->re_ldata.re_rx_dmamap, M_DEVBUF);
1303 }
1304
1305 /*
1306  * Attach the interface. Allocate softc structures, do ifmedia
1307  * setup and ethernet/BPF attach.
1308  */
1309 static int
1310 re_attach(device_t dev)
1311 {
1312         struct re_softc *sc = device_get_softc(dev);
1313         struct ifnet *ifp;
1314         uint8_t eaddr[ETHER_ADDR_LEN];
1315         int error = 0, rid, qlen;
1316         u_int irq_flags;
1317
1318         callout_init(&sc->re_timer);
1319         sc->re_dev = dev;
1320
1321         if (RE_IS_8139CP(sc)) {
1322                 sc->re_rx_desc_cnt = RE_RX_DESC_CNT_8139CP;
1323                 sc->re_tx_desc_cnt = RE_TX_DESC_CNT_8139CP;
1324         } else {
1325                 sc->re_rx_desc_cnt = re_rx_desc_count;
1326                 if (sc->re_rx_desc_cnt > RE_RX_DESC_CNT_MAX)
1327                         sc->re_rx_desc_cnt = RE_RX_DESC_CNT_MAX;
1328
1329                 sc->re_tx_desc_cnt = re_tx_desc_count;
1330                 if (sc->re_tx_desc_cnt > RE_TX_DESC_CNT_MAX)
1331                         sc->re_tx_desc_cnt = RE_TX_DESC_CNT_MAX;
1332         }
1333
1334         qlen = RE_IFQ_MAXLEN;
1335         if (sc->re_tx_desc_cnt > qlen)
1336                 qlen = sc->re_tx_desc_cnt;
1337
1338         sc->re_rxbuf_size = MCLBYTES;
1339         sc->re_newbuf = re_newbuf_std;
1340
1341         sc->re_tx_time = 5;             /* 125us */
1342         sc->re_rx_time = 2;             /* 50us */
1343         if (sc->re_caps & RE_C_PCIE)
1344                 sc->re_sim_time = 75;   /* 75us */
1345         else
1346                 sc->re_sim_time = 125;  /* 125us */
1347         if (!RE_IS_8139CP(sc)) {
1348                 /* simulated interrupt moderation */
1349                 sc->re_imtype = RE_IMTYPE_SIM;
1350         } else {
1351                 sc->re_imtype = RE_IMTYPE_NONE;
1352         }
1353         re_config_imtype(sc, sc->re_imtype);
1354
1355         sysctl_ctx_init(&sc->re_sysctl_ctx);
1356         sc->re_sysctl_tree = SYSCTL_ADD_NODE(&sc->re_sysctl_ctx,
1357                                              SYSCTL_STATIC_CHILDREN(_hw),
1358                                              OID_AUTO,
1359                                              device_get_nameunit(dev),
1360                                              CTLFLAG_RD, 0, "");
1361         if (sc->re_sysctl_tree == NULL) {
1362                 device_printf(dev, "can't add sysctl node\n");
1363                 error = ENXIO;
1364                 goto fail;
1365         }
1366         SYSCTL_ADD_INT(&sc->re_sysctl_ctx,
1367                        SYSCTL_CHILDREN(sc->re_sysctl_tree), OID_AUTO,
1368                        "rx_desc_count", CTLFLAG_RD, &sc->re_rx_desc_cnt,
1369                        0, "RX desc count");
1370         SYSCTL_ADD_INT(&sc->re_sysctl_ctx,
1371                        SYSCTL_CHILDREN(sc->re_sysctl_tree), OID_AUTO,
1372                        "tx_desc_count", CTLFLAG_RD, &sc->re_tx_desc_cnt,
1373                        0, "TX desc count");
1374         SYSCTL_ADD_PROC(&sc->re_sysctl_ctx,
1375                         SYSCTL_CHILDREN(sc->re_sysctl_tree),
1376                         OID_AUTO, "sim_time",
1377                         CTLTYPE_INT | CTLFLAG_RW,
1378                         sc, 0, re_sysctl_simtime, "I",
1379                         "Simulated interrupt moderation time (usec).");
1380         SYSCTL_ADD_PROC(&sc->re_sysctl_ctx,
1381                         SYSCTL_CHILDREN(sc->re_sysctl_tree),
1382                         OID_AUTO, "imtype",
1383                         CTLTYPE_INT | CTLFLAG_RW,
1384                         sc, 0, re_sysctl_imtype, "I",
1385                         "Interrupt moderation type -- "
1386                         "0:disable, 1:simulated, "
1387                         "2:hardware(if supported)");
1388         if (sc->re_caps & RE_C_HWIM) {
1389                 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx,
1390                                 SYSCTL_CHILDREN(sc->re_sysctl_tree),
1391                                 OID_AUTO, "hw_rxtime",
1392                                 CTLTYPE_INT | CTLFLAG_RW,
1393                                 sc, 0, re_sysctl_rxtime, "I",
1394                                 "Hardware interrupt moderation time "
1395                                 "(unit: 25usec).");
1396                 SYSCTL_ADD_PROC(&sc->re_sysctl_ctx,
1397                                 SYSCTL_CHILDREN(sc->re_sysctl_tree),
1398                                 OID_AUTO, "hw_txtime",
1399                                 CTLTYPE_INT | CTLFLAG_RW,
1400                                 sc, 0, re_sysctl_txtime, "I",
1401                                 "Hardware interrupt moderation time "
1402                                 "(unit: 25usec).");
1403         }
1404
1405 #ifndef BURN_BRIDGES
1406         /*
1407          * Handle power management nonsense.
1408          */
1409
1410         if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1411                 uint32_t membase, irq;
1412
1413                 /* Save important PCI config data. */
1414                 membase = pci_read_config(dev, RE_PCI_LOMEM, 4);
1415                 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1416
1417                 /* Reset the power state. */
1418                 device_printf(dev, "chip is in D%d power mode "
1419                     "-- setting to D0\n", pci_get_powerstate(dev));
1420
1421                 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1422
1423                 /* Restore PCI config data. */
1424                 pci_write_config(dev, RE_PCI_LOMEM, membase, 4);
1425                 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1426         }
1427 #endif
1428         /*
1429          * Map control/status registers.
1430          */
1431         pci_enable_busmaster(dev);
1432
1433         rid = RE_PCI_LOIO;
1434         sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
1435                                             RF_ACTIVE);
1436
1437         if (sc->re_res == NULL) {
1438                 device_printf(dev, "couldn't map ports\n");
1439                 error = ENXIO;
1440                 goto fail;
1441         }
1442
1443         sc->re_btag = rman_get_bustag(sc->re_res);
1444         sc->re_bhandle = rman_get_bushandle(sc->re_res);
1445
1446         /* Allocate interrupt */
1447         sc->re_irq_type = pci_alloc_1intr(dev, re_msi_enable,
1448                                            &sc->re_irq_rid, &irq_flags);
1449
1450         sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->re_irq_rid,
1451                                             irq_flags);
1452         if (sc->re_irq == NULL) {
1453                 device_printf(dev, "couldn't map interrupt\n");
1454                 error = ENXIO;
1455                 goto fail;
1456         }
1457
1458         /* Reset the adapter. */
1459         re_reset(sc, 0);
1460
1461         if (RE_IS_8139CP(sc)) {
1462                 sc->re_bus_speed = 33; /* XXX */
1463         } else if (sc->re_caps & RE_C_PCIE) {
1464                 sc->re_bus_speed = 125;
1465         } else {
1466                 uint8_t cfg2;
1467
1468                 cfg2 = CSR_READ_1(sc, RE_CFG2);
1469                 switch (cfg2 & RE_CFG2_PCICLK_MASK) {
1470                 case RE_CFG2_PCICLK_33MHZ:
1471                         sc->re_bus_speed = 33;
1472                         break;
1473                 case RE_CFG2_PCICLK_66MHZ:
1474                         sc->re_bus_speed = 66;
1475                         break;
1476                 default:
1477                         device_printf(dev, "unknown bus speed, assume 33MHz\n");
1478                         sc->re_bus_speed = 33;
1479                         break;
1480                 }
1481                 if (cfg2 & RE_CFG2_PCI64)
1482                         sc->re_caps |= RE_C_PCI64;
1483         }
1484         device_printf(dev, "Hardware rev. 0x%08x; MAC ver. 0x%02x; "
1485                       "PCI%s %dMHz\n",
1486                       sc->re_hwrev, sc->re_macver,
1487                       (sc->re_caps & RE_C_PCIE) ?
1488                       "-E" : ((sc->re_caps & RE_C_PCI64) ? "64" : "32"),
1489                       sc->re_bus_speed);
1490
1491         /*
1492          * NOTE:
1493          * DO NOT try to adjust config1 and config5 which was spotted in
1494          * Realtek's Linux drivers.  It will _permanently_ damage certain
1495          * cards EEPROM, e.g. one of my 8168B (0x38000000) card ...
1496          */
1497
1498         re_get_eaddr(sc, eaddr);
1499
1500         if (!RE_IS_8139CP(sc)) {
1501                 /* Set RX length mask */
1502                 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
1503                 sc->re_txstart = RE_GTXSTART;
1504         } else {
1505                 /* Set RX length mask */
1506                 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
1507                 sc->re_txstart = RE_TXSTART;
1508         }
1509
1510         /* Allocate DMA stuffs */
1511         error = re_allocmem(dev);
1512         if (error)
1513                 goto fail;
1514
1515         /*
1516          * Apply some magic PCI settings from Realtek ...
1517          */
1518         if (RE_IS_8169(sc)) {
1519                 CSR_WRITE_1(sc, 0x82, 1);
1520                 pci_write_config(dev, PCIR_CACHELNSZ, 0x8, 1);
1521         }
1522         pci_write_config(dev, PCIR_LATTIMER, 0x40, 1);
1523
1524         if (sc->re_caps & RE_C_MAC2) {
1525                 /*
1526                  * Following part is extracted from Realtek BSD driver v176.
1527                  * However, this does _not_ make much/any sense:
1528                  * 8168C's PCI Express device control is located at 0x78,
1529                  * so the reading from 0x79 (higher part of 0x78) and setting
1530                  * the 4~6bits intend to enlarge the "max read request size"
1531                  * (we will do it).  The content of the rest part of this
1532                  * register is not meaningful to other PCI registers, so
1533                  * writing the value to 0x54 could be completely wrong.
1534                  * 0x80 is the lower part of PCI Express device status, non-
1535                  * reserved bits are RW1C, writing 0 to them will not have
1536                  * any effect at all.
1537                  */
1538 #ifdef foo
1539                 uint8_t val;
1540
1541                 val = pci_read_config(dev, 0x79, 1);
1542                 val = (val & ~0x70) | 0x50;
1543                 pci_write_config(dev, 0x54, val, 1);
1544                 pci_write_config(dev, 0x80, 0, 1);
1545 #endif
1546         }
1547
1548         /*
1549          * Apply some PHY fixup from Realtek ...
1550          */
1551         if (sc->re_hwrev == RE_HWREV_8110S) {
1552                 CSR_WRITE_1(sc, 0x82, 1);
1553                 re_miibus_writereg(dev, 1, 0xb, 0);
1554         }
1555         if (sc->re_caps & RE_C_PHYPMGT) {
1556                 /* Power up PHY */
1557                 re_miibus_writereg(dev, 1, 0x1f, 0);
1558                 re_miibus_writereg(dev, 1, 0xe, 0);
1559         }
1560
1561         /* Do MII setup */
1562         if (mii_phy_probe(dev, &sc->re_miibus,
1563             re_ifmedia_upd, re_ifmedia_sts)) {
1564                 device_printf(dev, "MII without any phy!\n");
1565                 error = ENXIO;
1566                 goto fail;
1567         }
1568
1569         ifp = &sc->arpcom.ac_if;
1570         ifp->if_softc = sc;
1571         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1572         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1573         ifp->if_ioctl = re_ioctl;
1574         ifp->if_start = re_start;
1575 #ifdef IFPOLL_ENABLE
1576         ifp->if_npoll = re_npoll;
1577 #endif
1578         ifp->if_watchdog = re_watchdog;
1579         ifp->if_init = re_init;
1580         if (!RE_IS_8139CP(sc)) /* XXX */
1581                 ifp->if_baudrate = 1000000000;
1582         else
1583                 ifp->if_baudrate = 100000000;
1584         ifq_set_maxlen(&ifp->if_snd, qlen);
1585         ifq_set_ready(&ifp->if_snd);
1586
1587         ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
1588         if (sc->re_caps & RE_C_HWCSUM)
1589                 ifp->if_capabilities |= IFCAP_HWCSUM;
1590
1591         ifp->if_capenable = ifp->if_capabilities;
1592         if (ifp->if_capabilities & IFCAP_HWCSUM)
1593                 ifp->if_hwassist = RE_CSUM_FEATURES;
1594         else
1595                 ifp->if_hwassist = 0;
1596
1597         /*
1598          * Call MI attach routine.
1599          */
1600         ether_ifattach(ifp, eaddr, NULL);
1601
1602 #ifdef IFPOLL_ENABLE
1603         ifpoll_compat_setup(&sc->re_npoll,
1604             &sc->re_sysctl_ctx, sc->re_sysctl_tree, device_get_unit(dev),
1605             ifp->if_serializer);
1606 #endif
1607
1608 #ifdef RE_DIAG
1609         /*
1610          * Perform hardware diagnostic on the original RTL8169.
1611          * Some 32-bit cards were incorrectly wired and would
1612          * malfunction if plugged into a 64-bit slot.
1613          */
1614         if (sc->re_hwrev == RE_HWREV_8169) {
1615                 lwkt_serialize_enter(ifp->if_serializer);
1616                 error = re_diag(sc);
1617                 lwkt_serialize_exit(ifp->if_serializer);
1618
1619                 if (error) {
1620                         device_printf(dev, "hardware diagnostic failure\n");
1621                         ether_ifdetach(ifp);
1622                         goto fail;
1623                 }
1624         }
1625 #endif  /* RE_DIAG */
1626
1627         /* Hook interrupt last to avoid having to lock softc */
1628         error = bus_setup_intr(dev, sc->re_irq, INTR_MPSAFE, re_intr, sc,
1629                                &sc->re_intrhand, ifp->if_serializer);
1630
1631         if (error) {
1632                 device_printf(dev, "couldn't set up irq\n");
1633                 ether_ifdetach(ifp);
1634                 goto fail;
1635         }
1636
1637         ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq));
1638
1639 fail:
1640         if (error)
1641                 re_detach(dev);
1642
1643         return (error);
1644 }
1645
1646 /*
1647  * Shutdown hardware and free up resources. This can be called any
1648  * time after the mutex has been initialized. It is called in both
1649  * the error case in attach and the normal detach case so it needs
1650  * to be careful about only freeing resources that have actually been
1651  * allocated.
1652  */
1653 static int
1654 re_detach(device_t dev)
1655 {
1656         struct re_softc *sc = device_get_softc(dev);
1657         struct ifnet *ifp = &sc->arpcom.ac_if;
1658
1659         /* These should only be active if attach succeeded */
1660         if (device_is_attached(dev)) {
1661                 lwkt_serialize_enter(ifp->if_serializer);
1662                 re_stop(sc);
1663                 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand);
1664                 lwkt_serialize_exit(ifp->if_serializer);
1665
1666                 ether_ifdetach(ifp);
1667         }
1668         if (sc->re_miibus)
1669                 device_delete_child(dev, sc->re_miibus);
1670         bus_generic_detach(dev);
1671
1672         if (sc->re_sysctl_tree != NULL)
1673                 sysctl_ctx_free(&sc->re_sysctl_ctx);
1674
1675         if (sc->re_irq)
1676                 bus_release_resource(dev, SYS_RES_IRQ, sc->re_irq_rid,
1677                                      sc->re_irq);
1678
1679         if (sc->re_irq_type == PCI_INTR_TYPE_MSI)
1680                 pci_release_msi(dev);
1681
1682         if (sc->re_res) {
1683                 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO,
1684                                      sc->re_res);
1685         }
1686
1687         /* Free DMA stuffs */
1688         re_freemem(dev);
1689
1690         return(0);
1691 }
1692
1693 static void
1694 re_setup_rxdesc(struct re_softc *sc, int idx)
1695 {
1696         bus_addr_t paddr;
1697         uint32_t cmdstat;
1698         struct re_desc *d;
1699
1700         paddr = sc->re_ldata.re_rx_paddr[idx];
1701         d = &sc->re_ldata.re_rx_list[idx];
1702
1703         d->re_bufaddr_lo = htole32(RE_ADDR_LO(paddr));
1704         d->re_bufaddr_hi = htole32(RE_ADDR_HI(paddr));
1705
1706         cmdstat = sc->re_rxbuf_size | RE_RDESC_CMD_OWN;
1707         if (idx == (sc->re_rx_desc_cnt - 1))
1708                 cmdstat |= RE_RDESC_CMD_EOR;
1709         d->re_cmdstat = htole32(cmdstat);
1710 }
1711
1712 static int
1713 re_newbuf_std(struct re_softc *sc, int idx, int init)
1714 {
1715         bus_dma_segment_t seg;
1716         bus_dmamap_t map;
1717         struct mbuf *m;
1718         int error, nsegs;
1719
1720         m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
1721         if (m == NULL) {
1722                 error = ENOBUFS;
1723
1724                 if (init) {
1725                         if_printf(&sc->arpcom.ac_if, "m_getcl failed\n");
1726                         return error;
1727                 } else {
1728                         goto back;
1729                 }
1730         }
1731         m->m_len = m->m_pkthdr.len = MCLBYTES;
1732
1733         /*
1734          * NOTE:
1735          * re(4) chips need address of the receive buffer to be 8-byte
1736          * aligned, so don't call m_adj(m, ETHER_ALIGN) here.
1737          */
1738
1739         error = bus_dmamap_load_mbuf_segment(sc->re_ldata.re_rx_mtag,
1740                         sc->re_ldata.re_rx_spare, m,
1741                         &seg, 1, &nsegs, BUS_DMA_NOWAIT);
1742         if (error) {
1743                 m_freem(m);
1744                 if (init) {
1745                         if_printf(&sc->arpcom.ac_if, "can't load RX mbuf\n");
1746                         return error;
1747                 } else {
1748                         goto back;
1749                 }
1750         }
1751
1752         if (!init) {
1753                 bus_dmamap_sync(sc->re_ldata.re_rx_mtag,
1754                                 sc->re_ldata.re_rx_dmamap[idx],
1755                                 BUS_DMASYNC_POSTREAD);
1756                 bus_dmamap_unload(sc->re_ldata.re_rx_mtag,
1757                                   sc->re_ldata.re_rx_dmamap[idx]);
1758         }
1759         sc->re_ldata.re_rx_mbuf[idx] = m;
1760         sc->re_ldata.re_rx_paddr[idx] = seg.ds_addr;
1761
1762         map = sc->re_ldata.re_rx_dmamap[idx];
1763         sc->re_ldata.re_rx_dmamap[idx] = sc->re_ldata.re_rx_spare;
1764         sc->re_ldata.re_rx_spare = map;
1765 back:
1766         re_setup_rxdesc(sc, idx);
1767         return error;
1768 }
1769
1770 static int
1771 re_newbuf_jumbo(struct re_softc *sc, int idx, int init)
1772 {
1773         struct mbuf *m;
1774         struct re_jbuf *jbuf;
1775         int error = 0;
1776
1777         MGETHDR(m, init ? MB_WAIT : MB_DONTWAIT, MT_DATA);
1778         if (m == NULL) {
1779                 error = ENOBUFS;
1780                 if (init) {
1781                         if_printf(&sc->arpcom.ac_if, "MGETHDR failed\n");
1782                         return error;
1783                 } else {
1784                         goto back;
1785                 }
1786         }
1787
1788         jbuf = re_jbuf_alloc(sc);
1789         if (jbuf == NULL) {
1790                 m_freem(m);
1791
1792                 error = ENOBUFS;
1793                 if (init) {
1794                         if_printf(&sc->arpcom.ac_if, "jpool is empty\n");
1795                         return error;
1796                 } else {
1797                         goto back;
1798                 }
1799         }
1800
1801         m->m_ext.ext_arg = jbuf;
1802         m->m_ext.ext_buf = jbuf->re_buf;
1803         m->m_ext.ext_free = re_jbuf_free;
1804         m->m_ext.ext_ref = re_jbuf_ref;
1805         m->m_ext.ext_size = sc->re_rxbuf_size;
1806
1807         m->m_data = m->m_ext.ext_buf;
1808         m->m_flags |= M_EXT;
1809         m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
1810
1811         /*
1812          * NOTE:
1813          * Some re(4) chips(e.g. RTL8101E) need address of the receive buffer
1814          * to be 8-byte aligned, so don't call m_adj(m, ETHER_ALIGN) here.
1815          */
1816
1817         sc->re_ldata.re_rx_mbuf[idx] = m;
1818         sc->re_ldata.re_rx_paddr[idx] = jbuf->re_paddr;
1819 back:
1820         re_setup_rxdesc(sc, idx);
1821         return error;
1822 }
1823
1824 static int
1825 re_tx_list_init(struct re_softc *sc)
1826 {
1827         bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ(sc));
1828
1829         sc->re_ldata.re_tx_prodidx = 0;
1830         sc->re_ldata.re_tx_considx = 0;
1831         sc->re_ldata.re_tx_free = sc->re_tx_desc_cnt;
1832
1833         return(0);
1834 }
1835
1836 static int
1837 re_rx_list_init(struct re_softc *sc)
1838 {
1839         int i, error;
1840
1841         bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ(sc));
1842
1843         for (i = 0; i < sc->re_rx_desc_cnt; i++) {
1844                 error = sc->re_newbuf(sc, i, 1);
1845                 if (error)
1846                         return(error);
1847         }
1848
1849         sc->re_ldata.re_rx_prodidx = 0;
1850         sc->re_head = sc->re_tail = NULL;
1851
1852         return(0);
1853 }
1854
1855 #define RE_IP4_PACKET   0x1
1856 #define RE_TCP_PACKET   0x2
1857 #define RE_UDP_PACKET   0x4
1858
1859 static __inline uint8_t
1860 re_packet_type(struct re_softc *sc, uint32_t rxstat, uint32_t rxctrl)
1861 {
1862         uint8_t packet_type = 0;
1863
1864         if (sc->re_caps & RE_C_MAC2) {
1865                 if (rxctrl & RE_RDESC_CTL_PROTOIP4)
1866                         packet_type |= RE_IP4_PACKET;
1867         } else {
1868                 if (rxstat & RE_RDESC_STAT_PROTOID)
1869                         packet_type |= RE_IP4_PACKET;
1870         }
1871         if (RE_TCPPKT(rxstat))
1872                 packet_type |= RE_TCP_PACKET;
1873         else if (RE_UDPPKT(rxstat))
1874                 packet_type |= RE_UDP_PACKET;
1875         return packet_type;
1876 }
1877
1878 /*
1879  * RX handler for C+ and 8169. For the gigE chips, we support
1880  * the reception of jumbo frames that have been fragmented
1881  * across multiple 2K mbuf cluster buffers.
1882  */
1883 static int
1884 re_rxeof(struct re_softc *sc)
1885 {
1886         struct ifnet *ifp = &sc->arpcom.ac_if;
1887         struct mbuf *m;
1888         struct re_desc  *cur_rx;
1889         uint32_t rxstat, rxctrl;
1890         int i, total_len, rx = 0;
1891
1892         for (i = sc->re_ldata.re_rx_prodidx;
1893              RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0; RE_RXDESC_INC(sc, i)) {
1894                 cur_rx = &sc->re_ldata.re_rx_list[i];
1895                 m = sc->re_ldata.re_rx_mbuf[i];
1896                 total_len = RE_RXBYTES(cur_rx);
1897                 rxstat = le32toh(cur_rx->re_cmdstat);
1898                 rxctrl = le32toh(cur_rx->re_control);
1899
1900                 rx = 1;
1901
1902 #ifdef INVARIANTS
1903                 if (sc->re_flags & RE_F_USE_JPOOL)
1904                         KKASSERT(rxstat & RE_RDESC_STAT_EOF);
1905 #endif
1906
1907                 if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1908                         if (sc->re_flags & RE_F_DROP_RXFRAG) {
1909                                 re_setup_rxdesc(sc, i);
1910                                 continue;
1911                         }
1912
1913                         if (sc->re_newbuf(sc, i, 0)) {
1914                                 /* Drop upcoming fragments */
1915                                 sc->re_flags |= RE_F_DROP_RXFRAG;
1916                                 continue;
1917                         }
1918
1919                         m->m_len = MCLBYTES;
1920                         if (sc->re_head == NULL) {
1921                                 sc->re_head = sc->re_tail = m;
1922                         } else {
1923                                 sc->re_tail->m_next = m;
1924                                 sc->re_tail = m;
1925                         }
1926                         continue;
1927                 } else if (sc->re_flags & RE_F_DROP_RXFRAG) {
1928                         /*
1929                          * Last fragment of a multi-fragment packet.
1930                          *
1931                          * Since error already happened, this fragment
1932                          * must be dropped as well as the fragment chain.
1933                          */
1934                         re_setup_rxdesc(sc, i);
1935                         re_free_rxchain(sc);
1936                         sc->re_flags &= ~RE_F_DROP_RXFRAG;
1937                         continue;
1938                 }
1939
1940                 /*
1941                  * NOTE: for the 8139C+, the frame length field
1942                  * is always 12 bits in size, but for the gigE chips,
1943                  * it is 13 bits (since the max RX frame length is 16K).
1944                  * Unfortunately, all 32 bits in the status word
1945                  * were already used, so to make room for the extra
1946                  * length bit, RealTek took out the 'frame alignment
1947                  * error' bit and shifted the other status bits
1948                  * over one slot. The OWN, EOR, FS and LS bits are
1949                  * still in the same places. We have already extracted
1950                  * the frame length and checked the OWN bit, so rather
1951                  * than using an alternate bit mapping, we shift the
1952                  * status bits one space to the right so we can evaluate
1953                  * them using the 8169 status as though it was in the
1954                  * same format as that of the 8139C+.
1955                  */
1956                 if (!RE_IS_8139CP(sc))
1957                         rxstat >>= 1;
1958
1959                 if (rxstat & RE_RDESC_STAT_RXERRSUM) {
1960                         ifp->if_ierrors++;
1961                         /*
1962                          * If this is part of a multi-fragment packet,
1963                          * discard all the pieces.
1964                          */
1965                         re_free_rxchain(sc);
1966                         re_setup_rxdesc(sc, i);
1967                         continue;
1968                 }
1969
1970                 /*
1971                  * If allocating a replacement mbuf fails,
1972                  * reload the current one.
1973                  */
1974
1975                 if (sc->re_newbuf(sc, i, 0)) {
1976                         ifp->if_ierrors++;
1977                         continue;
1978                 }
1979
1980                 if (sc->re_head != NULL) {
1981                         m->m_len = total_len % MCLBYTES;
1982                         /* 
1983                          * Special case: if there's 4 bytes or less
1984                          * in this buffer, the mbuf can be discarded:
1985                          * the last 4 bytes is the CRC, which we don't
1986                          * care about anyway.
1987                          */
1988                         if (m->m_len <= ETHER_CRC_LEN) {
1989                                 sc->re_tail->m_len -=
1990                                     (ETHER_CRC_LEN - m->m_len);
1991                                 m_freem(m);
1992                         } else {
1993                                 m->m_len -= ETHER_CRC_LEN;
1994                                 sc->re_tail->m_next = m;
1995                         }
1996                         m = sc->re_head;
1997                         sc->re_head = sc->re_tail = NULL;
1998                         m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1999                 } else {
2000                         m->m_pkthdr.len = m->m_len =
2001                             (total_len - ETHER_CRC_LEN);
2002                 }
2003
2004                 ifp->if_ipackets++;
2005                 m->m_pkthdr.rcvif = ifp;
2006
2007                 /* Do RX checksumming if enabled */
2008
2009                 if (ifp->if_capenable & IFCAP_RXCSUM) {
2010                         uint8_t packet_type;
2011
2012                         packet_type = re_packet_type(sc, rxstat, rxctrl);
2013
2014                         /* Check IP header checksum */
2015                         if (packet_type & RE_IP4_PACKET) {
2016                                 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2017                                 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0)
2018                                         m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2019                         }
2020
2021                         /* Check TCP/UDP checksum */
2022                         if (((packet_type & RE_TCP_PACKET) &&
2023                              (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) ||
2024                             ((packet_type & RE_UDP_PACKET) &&
2025                              (rxstat & RE_RDESC_STAT_UDPSUMBAD) == 0)) {
2026                                 m->m_pkthdr.csum_flags |=
2027                                     CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
2028                                     CSUM_FRAG_NOT_CHECKED;
2029                                 m->m_pkthdr.csum_data = 0xffff;
2030                         }
2031                 }
2032
2033                 if (rxctrl & RE_RDESC_CTL_HASTAG) {
2034                         m->m_flags |= M_VLANTAG;
2035                         m->m_pkthdr.ether_vlantag =
2036                                 be16toh((rxctrl & RE_RDESC_CTL_TAGDATA));
2037                 }
2038                 ifp->if_input(ifp, m);
2039         }
2040
2041         sc->re_ldata.re_rx_prodidx = i;
2042
2043         return rx;
2044 }
2045
2046 #undef RE_IP4_PACKET
2047 #undef RE_TCP_PACKET
2048 #undef RE_UDP_PACKET
2049
2050 static int
2051 re_tx_collect(struct re_softc *sc)
2052 {
2053         struct ifnet *ifp = &sc->arpcom.ac_if;
2054         uint32_t txstat;
2055         int idx, tx = 0;
2056
2057         for (idx = sc->re_ldata.re_tx_considx;
2058              sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt;
2059              RE_TXDESC_INC(sc, idx)) {
2060                 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat);
2061                 if (txstat & RE_TDESC_CMD_OWN)
2062                         break;
2063
2064                 tx = 1;
2065
2066                 sc->re_ldata.re_tx_list[idx].re_bufaddr_lo = 0;
2067
2068                 /*
2069                  * We only stash mbufs in the last descriptor
2070                  * in a fragment chain, which also happens to
2071                  * be the only place where the TX status bits
2072                  * are valid.
2073                  */
2074                 if (txstat & RE_TDESC_CMD_EOF) {
2075                         bus_dmamap_unload(sc->re_ldata.re_tx_mtag,
2076                             sc->re_ldata.re_tx_dmamap[idx]);
2077                         m_freem(sc->re_ldata.re_tx_mbuf[idx]);
2078                         sc->re_ldata.re_tx_mbuf[idx] = NULL;
2079                         if (txstat & (RE_TDESC_STAT_EXCESSCOL|
2080                             RE_TDESC_STAT_COLCNT))
2081                                 ifp->if_collisions++;
2082                         if (txstat & RE_TDESC_STAT_TXERRSUM)
2083                                 ifp->if_oerrors++;
2084                         else
2085                                 ifp->if_opackets++;
2086                 }
2087                 sc->re_ldata.re_tx_free++;
2088         }
2089         sc->re_ldata.re_tx_considx = idx;
2090
2091         return tx;
2092 }
2093
2094 static int
2095 re_txeof(struct re_softc *sc)
2096 {
2097         struct ifnet *ifp = &sc->arpcom.ac_if;
2098         int tx;
2099
2100         tx = re_tx_collect(sc);
2101
2102         /* There is enough free TX descs */
2103         if (sc->re_ldata.re_tx_free > RE_TXDESC_SPARE)
2104                 ifq_clr_oactive(&ifp->if_snd);
2105
2106         /*
2107          * Some chips will ignore a second TX request issued while an
2108          * existing transmission is in progress. If the transmitter goes
2109          * idle but there are still packets waiting to be sent, we need
2110          * to restart the channel here to flush them out. This only seems
2111          * to be required with the PCIe devices.
2112          */
2113         if (sc->re_ldata.re_tx_free < sc->re_tx_desc_cnt)
2114                 CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START);
2115         else
2116                 ifp->if_timer = 0;
2117
2118         return tx;
2119 }
2120
2121 static void
2122 re_tick(void *xsc)
2123 {
2124         struct re_softc *sc = xsc;
2125
2126         lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
2127         re_tick_serialized(xsc);
2128         lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
2129 }
2130
2131 static void
2132 re_tick_serialized(void *xsc)
2133 {
2134         struct re_softc *sc = xsc;
2135         struct ifnet *ifp = &sc->arpcom.ac_if;
2136         struct mii_data *mii;
2137
2138         ASSERT_SERIALIZED(ifp->if_serializer);
2139
2140         mii = device_get_softc(sc->re_miibus);
2141         mii_tick(mii);
2142         if (sc->re_flags & RE_F_LINKED) {
2143                 if (!(mii->mii_media_status & IFM_ACTIVE))
2144                         sc->re_flags &= ~RE_F_LINKED;
2145         } else {
2146                 if (mii->mii_media_status & IFM_ACTIVE &&
2147                     IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
2148                         sc->re_flags |= RE_F_LINKED;
2149                         if (!ifq_is_empty(&ifp->if_snd))
2150                                 if_devstart(ifp);
2151                 }
2152         }
2153
2154         callout_reset(&sc->re_timer, hz, re_tick, sc);
2155 }
2156
2157 #ifdef IFPOLL_ENABLE
2158
2159 static void
2160 re_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
2161 {
2162         struct re_softc *sc = ifp->if_softc;
2163
2164         ASSERT_SERIALIZED(ifp->if_serializer);
2165
2166         if (sc->re_npoll.ifpc_stcount-- == 0) {
2167                 uint16_t       status;
2168
2169                 sc->re_npoll.ifpc_stcount = sc->re_npoll.ifpc_stfrac;
2170
2171                 status = CSR_READ_2(sc, RE_ISR);
2172                 if (status == 0xffff)
2173                         return;
2174                 if (status)
2175                         CSR_WRITE_2(sc, RE_ISR, status);
2176
2177                 /*
2178                  * XXX check behaviour on receiver stalls.
2179                  */
2180
2181                 if (status & RE_ISR_SYSTEM_ERR)
2182                         re_init(sc);
2183         }
2184
2185         sc->rxcycles = count;
2186         re_rxeof(sc);
2187         re_txeof(sc);
2188
2189         if (!ifq_is_empty(&ifp->if_snd))
2190                 if_devstart(ifp);
2191 }
2192
2193 static void
2194 re_npoll(struct ifnet *ifp, struct ifpoll_info *info)
2195 {
2196         struct re_softc *sc = ifp->if_softc;
2197
2198         ASSERT_SERIALIZED(ifp->if_serializer);
2199
2200         if (info != NULL) {
2201                 int cpuid = sc->re_npoll.ifpc_cpuid;
2202
2203                 info->ifpi_rx[cpuid].poll_func = re_npoll_compat;
2204                 info->ifpi_rx[cpuid].arg = NULL;
2205                 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
2206
2207                 if (ifp->if_flags & IFF_RUNNING)
2208                         re_setup_intr(sc, 0, RE_IMTYPE_NONE);
2209                 ifq_set_cpuid(&ifp->if_snd, cpuid);
2210         } else {
2211                 if (ifp->if_flags & IFF_RUNNING)
2212                         re_setup_intr(sc, 1, sc->re_imtype);
2213                 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(sc->re_irq));
2214         }
2215 }
2216 #endif /* IFPOLL_ENABLE */
2217
2218 static void
2219 re_intr(void *arg)
2220 {
2221         struct re_softc *sc = arg;
2222         struct ifnet *ifp = &sc->arpcom.ac_if;
2223         uint16_t status;
2224         int rx, tx;
2225
2226         ASSERT_SERIALIZED(ifp->if_serializer);
2227
2228         if ((sc->re_flags & RE_F_SUSPENDED) ||
2229             (ifp->if_flags & IFF_RUNNING) == 0)
2230                 return;
2231
2232         rx = tx = 0;
2233         for (;;) {
2234                 status = CSR_READ_2(sc, RE_ISR);
2235                 /* If the card has gone away the read returns 0xffff. */
2236                 if (status == 0xffff)
2237                         break;
2238                 if (status)
2239                         CSR_WRITE_2(sc, RE_ISR, status);
2240
2241                 if ((status & sc->re_intrs) == 0)
2242                         break;
2243
2244                 if (status & (sc->re_rx_ack | RE_ISR_RX_ERR))
2245                         rx |= re_rxeof(sc);
2246
2247                 if (status & (sc->re_tx_ack | RE_ISR_TX_ERR))
2248                         tx |= re_txeof(sc);
2249
2250                 if (status & RE_ISR_SYSTEM_ERR)
2251                         re_init(sc);
2252
2253                 if (status & RE_ISR_LINKCHG) {
2254                         callout_stop(&sc->re_timer);
2255                         re_tick_serialized(sc);
2256                 }
2257         }
2258
2259         if (sc->re_imtype == RE_IMTYPE_SIM) {
2260                 if ((sc->re_flags & RE_F_TIMER_INTR)) {
2261                         if ((tx | rx) == 0) {
2262                                 /*
2263                                  * Nothing needs to be processed, fallback
2264                                  * to use TX/RX interrupts.
2265                                  */
2266                                 re_setup_intr(sc, 1, RE_IMTYPE_NONE);
2267
2268                                 /*
2269                                  * Recollect, mainly to avoid the possible
2270                                  * race introduced by changing interrupt
2271                                  * masks.
2272                                  */
2273                                 re_rxeof(sc);
2274                                 tx = re_txeof(sc);
2275                         } else {
2276                                 CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */
2277                         }
2278                 } else if (tx | rx) {
2279                         /*
2280                          * Assume that using simulated interrupt moderation
2281                          * (hardware timer based) could reduce the interript
2282                          * rate.
2283                          */
2284                         re_setup_intr(sc, 1, RE_IMTYPE_SIM);
2285                 }
2286         }
2287
2288         if (tx && !ifq_is_empty(&ifp->if_snd))
2289                 if_devstart(ifp);
2290 }
2291
2292 static int
2293 re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx0)
2294 {
2295         struct mbuf *m = *m_head;
2296         bus_dma_segment_t segs[RE_MAXSEGS];
2297         bus_dmamap_t map;
2298         int error, maxsegs, idx, i, nsegs;
2299         struct re_desc *d, *tx_ring;
2300         uint32_t cmd_csum, ctl_csum, vlantag;
2301
2302         KASSERT(sc->re_ldata.re_tx_free > RE_TXDESC_SPARE,
2303                 ("not enough free TX desc"));
2304
2305         map = sc->re_ldata.re_tx_dmamap[*idx0];
2306
2307         /*
2308          * Set up checksum offload. Note: checksum offload bits must
2309          * appear in all descriptors of a multi-descriptor transmit
2310          * attempt. (This is according to testing done with an 8169
2311          * chip. I'm not sure if this is a requirement or a bug.)
2312          */
2313         cmd_csum = ctl_csum = 0;
2314         if (m->m_pkthdr.csum_flags & CSUM_IP) {
2315                 cmd_csum |= RE_TDESC_CMD_IPCSUM;
2316                 ctl_csum |= RE_TDESC_CTL_IPCSUM;
2317         }
2318         if (m->m_pkthdr.csum_flags & CSUM_TCP) {
2319                 cmd_csum |= RE_TDESC_CMD_TCPCSUM;
2320                 ctl_csum |= RE_TDESC_CTL_TCPCSUM;
2321         }
2322         if (m->m_pkthdr.csum_flags & CSUM_UDP) {
2323                 cmd_csum |= RE_TDESC_CMD_UDPCSUM;
2324                 ctl_csum |= RE_TDESC_CTL_UDPCSUM;
2325         }
2326
2327         /* For MAC2 chips, csum flags are set on re_control */
2328         if (sc->re_caps & RE_C_MAC2)
2329                 cmd_csum = 0;
2330         else
2331                 ctl_csum = 0;
2332
2333         if ((sc->re_caps & RE_C_AUTOPAD) == 0) {
2334                 /*
2335                  * With some of the RealTek chips, using the checksum offload
2336                  * support in conjunction with the autopadding feature results
2337                  * in the transmission of corrupt frames. For example, if we
2338                  * need to send a really small IP fragment that's less than 60
2339                  * bytes in size, and IP header checksumming is enabled, the
2340                  * resulting ethernet frame that appears on the wire will
2341                  * have garbled payload. To work around this, if TX checksum
2342                  * offload is enabled, we always manually pad short frames out
2343                  * to the minimum ethernet frame size.
2344                  *
2345                  * Note: this appears unnecessary for TCP, and doing it for TCP
2346                  * with PCIe adapters seems to result in bad checksums.
2347                  */
2348                 if ((m->m_pkthdr.csum_flags &
2349                      (CSUM_DELAY_IP | CSUM_DELAY_DATA)) &&
2350                     (m->m_pkthdr.csum_flags & CSUM_TCP) == 0 &&
2351                     m->m_pkthdr.len < RE_MIN_FRAMELEN) {
2352                         error = m_devpad(m, RE_MIN_FRAMELEN);
2353                         if (error)
2354                                 goto back;
2355                 }
2356         }
2357
2358         vlantag = 0;
2359         if (m->m_flags & M_VLANTAG) {
2360                 vlantag = htobe16(m->m_pkthdr.ether_vlantag) |
2361                           RE_TDESC_CTL_INSTAG;
2362         }
2363
2364         maxsegs = sc->re_ldata.re_tx_free;
2365         if (maxsegs > RE_MAXSEGS)
2366                 maxsegs = RE_MAXSEGS;
2367
2368         error = bus_dmamap_load_mbuf_defrag(sc->re_ldata.re_tx_mtag, map,
2369                         m_head, segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2370         if (error)
2371                 goto back;
2372
2373         m = *m_head;
2374         bus_dmamap_sync(sc->re_ldata.re_tx_mtag, map, BUS_DMASYNC_PREWRITE);
2375
2376         /*
2377          * Map the segment array into descriptors.  We also keep track
2378          * of the end of the ring and set the end-of-ring bits as needed,
2379          * and we set the ownership bits in all except the very first
2380          * descriptor, whose ownership bits will be turned on later.
2381          */
2382         tx_ring = sc->re_ldata.re_tx_list;
2383         idx = *idx0;
2384         i = 0;
2385         for (;;) {
2386                 uint32_t cmdstat;
2387
2388                 d = &tx_ring[idx];
2389
2390                 cmdstat = segs[i].ds_len;
2391                 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr));
2392                 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr));
2393                 if (i == 0)
2394                         cmdstat |= RE_TDESC_CMD_SOF;
2395                 else
2396                         cmdstat |= RE_TDESC_CMD_OWN;
2397                 if (idx == (sc->re_tx_desc_cnt - 1))
2398                         cmdstat |= RE_TDESC_CMD_EOR;
2399                 d->re_cmdstat = htole32(cmdstat | cmd_csum);
2400                 d->re_control = htole32(ctl_csum | vlantag);
2401
2402                 i++;
2403                 if (i == nsegs)
2404                         break;
2405                 RE_TXDESC_INC(sc, idx);
2406         }
2407         d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF);
2408
2409         /* Transfer ownership of packet to the chip. */
2410         d->re_cmdstat |= htole32(RE_TDESC_CMD_OWN);
2411         if (*idx0 != idx)
2412                 tx_ring[*idx0].re_cmdstat |= htole32(RE_TDESC_CMD_OWN);
2413
2414         /*
2415          * Insure that the map for this transmission
2416          * is placed at the array index of the last descriptor
2417          * in this chain.
2418          */
2419         sc->re_ldata.re_tx_dmamap[*idx0] = sc->re_ldata.re_tx_dmamap[idx];
2420         sc->re_ldata.re_tx_dmamap[idx] = map;
2421
2422         sc->re_ldata.re_tx_mbuf[idx] = m;
2423         sc->re_ldata.re_tx_free -= nsegs;
2424
2425         RE_TXDESC_INC(sc, idx);
2426         *idx0 = idx;
2427 back:
2428         if (error) {
2429                 m_freem(*m_head);
2430                 *m_head = NULL;
2431         }
2432         return error;
2433 }
2434
2435 /*
2436  * Main transmit routine for C+ and gigE NICs.
2437  */
2438
2439 static void
2440 re_start(struct ifnet *ifp)
2441 {
2442         struct re_softc *sc = ifp->if_softc;
2443         struct mbuf *m_head;
2444         int idx, need_trans, oactive, error;
2445
2446         ASSERT_SERIALIZED(ifp->if_serializer);
2447
2448         if ((sc->re_flags & RE_F_LINKED) == 0) {
2449                 ifq_purge(&ifp->if_snd);
2450                 return;
2451         }
2452
2453         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
2454                 return;
2455
2456         idx = sc->re_ldata.re_tx_prodidx;
2457
2458         need_trans = 0;
2459         oactive = 0;
2460         while (sc->re_ldata.re_tx_mbuf[idx] == NULL) {
2461                 if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) {
2462                         if (!oactive) {
2463                                 if (re_tx_collect(sc)) {
2464                                         oactive = 1;
2465                                         continue;
2466                                 }
2467                         }
2468                         ifq_set_oactive(&ifp->if_snd);
2469                         break;
2470                 }
2471
2472                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2473                 if (m_head == NULL)
2474                         break;
2475
2476                 error = re_encap(sc, &m_head, &idx);
2477                 if (error) {
2478                         /* m_head is freed by re_encap(), if we reach here */
2479                         ifp->if_oerrors++;
2480
2481                         if (error == EFBIG && !oactive) {
2482                                 if (re_tx_collect(sc)) {
2483                                         oactive = 1;
2484                                         continue;
2485                                 }
2486                         }
2487                         ifq_set_oactive(&ifp->if_snd);
2488                         break;
2489                 }
2490
2491                 oactive = 0;
2492                 need_trans = 1;
2493
2494                 /*
2495                  * If there's a BPF listener, bounce a copy of this frame
2496                  * to him.
2497                  */
2498                 ETHER_BPF_MTAP(ifp, m_head);
2499         }
2500
2501         /*
2502          * If sc->re_ldata.re_tx_mbuf[idx] is not NULL it is possible
2503          * for OACTIVE to not be properly set when we also do not
2504          * have sufficient free tx descriptors, leaving packet in
2505          * ifp->if_snd.  This can cause if_start_dispatch() to loop
2506          * infinitely so make sure OACTIVE is set properly.
2507          */
2508         if (sc->re_ldata.re_tx_free <= RE_TXDESC_SPARE) {
2509                 if (!ifq_is_oactive(&ifp->if_snd)) {
2510                         if_printf(ifp, "Debug: OACTIVE was not set when "
2511                             "re_tx_free was below minimum!\n");
2512                         ifq_set_oactive(&ifp->if_snd);
2513                 }
2514         }
2515         if (!need_trans)
2516                 return;
2517
2518         sc->re_ldata.re_tx_prodidx = idx;
2519
2520         /*
2521          * RealTek put the TX poll request register in a different
2522          * location on the 8169 gigE chip. I don't know why.
2523          */
2524         CSR_WRITE_1(sc, sc->re_txstart, RE_TXSTART_START);
2525
2526         /*
2527          * Set a timeout in case the chip goes out to lunch.
2528          */
2529         ifp->if_timer = 5;
2530 }
2531
2532 static void
2533 re_init(void *xsc)
2534 {
2535         struct re_softc *sc = xsc;
2536         struct ifnet *ifp = &sc->arpcom.ac_if;
2537         struct mii_data *mii;
2538         int error, framelen;
2539
2540         ASSERT_SERIALIZED(ifp->if_serializer);
2541
2542         mii = device_get_softc(sc->re_miibus);
2543
2544         /*
2545          * Cancel pending I/O and free all RX/TX buffers.
2546          */
2547         re_stop(sc);
2548
2549         if (sc->re_caps & RE_C_CONTIGRX) {
2550                 if (ifp->if_mtu > ETHERMTU) {
2551                         KKASSERT(sc->re_ldata.re_jbuf != NULL);
2552                         sc->re_flags |= RE_F_USE_JPOOL;
2553                         sc->re_rxbuf_size = RE_FRAMELEN_MAX;
2554                         sc->re_newbuf = re_newbuf_jumbo;
2555                 } else {
2556                         sc->re_flags &= ~RE_F_USE_JPOOL;
2557                         sc->re_rxbuf_size = MCLBYTES;
2558                         sc->re_newbuf = re_newbuf_std;
2559                 }
2560         }
2561
2562         /*
2563          * Adjust max read request size according to MTU; mainly to
2564          * improve TX performance for common case (ETHERMTU) on GigE
2565          * NICs.  However, this could _not_ be done on 10/100 only
2566          * NICs; their DMA engines will malfunction using non-default
2567          * max read request size.
2568          */
2569         if ((sc->re_caps & (RE_C_PCIE | RE_C_FASTE)) == RE_C_PCIE) {
2570                 if (ifp->if_mtu > ETHERMTU) {
2571                         /*
2572                          * 512 seems to be the only value that works
2573                          * reliably with jumbo frame
2574                          */
2575                         pcie_set_max_readrq(sc->re_dev,
2576                                 PCIEM_DEVCTL_MAX_READRQ_512);
2577                 } else {
2578                         pcie_set_max_readrq(sc->re_dev,
2579                                 PCIEM_DEVCTL_MAX_READRQ_4096);
2580                 }
2581         }
2582
2583         /*
2584          * Enable C+ RX and TX mode, as well as VLAN stripping and
2585          * RX checksum offload. We must configure the C+ register
2586          * before all others.
2587          */
2588         CSR_WRITE_2(sc, RE_CPLUS_CMD, RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB |
2589                     RE_CPLUSCMD_PCI_MRW |
2590                     (ifp->if_capenable & IFCAP_VLAN_HWTAGGING ?
2591                      RE_CPLUSCMD_VLANSTRIP : 0) |
2592                     (ifp->if_capenable & IFCAP_RXCSUM ?
2593                      RE_CPLUSCMD_RXCSUM_ENB : 0));
2594
2595         /*
2596          * Init our MAC address.  Even though the chipset
2597          * documentation doesn't mention it, we need to enter "Config
2598          * register write enable" mode to modify the ID registers.
2599          */
2600         CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_WRITECFG);
2601         CSR_WRITE_4(sc, RE_IDR0,
2602             htole32(*(uint32_t *)(&sc->arpcom.ac_enaddr[0])));
2603         CSR_WRITE_2(sc, RE_IDR4,
2604             htole16(*(uint16_t *)(&sc->arpcom.ac_enaddr[4])));
2605         CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF);
2606
2607         /*
2608          * For C+ mode, initialize the RX descriptors and mbufs.
2609          */
2610         error = re_rx_list_init(sc);
2611         if (error) {
2612                 re_stop(sc);
2613                 return;
2614         }
2615         error = re_tx_list_init(sc);
2616         if (error) {
2617                 re_stop(sc);
2618                 return;
2619         }
2620
2621         /*
2622          * Load the addresses of the RX and TX lists into the chip.
2623          */
2624         CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI,
2625             RE_ADDR_HI(sc->re_ldata.re_rx_list_addr));
2626         CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO,
2627             RE_ADDR_LO(sc->re_ldata.re_rx_list_addr));
2628
2629         CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI,
2630             RE_ADDR_HI(sc->re_ldata.re_tx_list_addr));
2631         CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO,
2632             RE_ADDR_LO(sc->re_ldata.re_tx_list_addr));
2633
2634         /*
2635          * Enable transmit and receive.
2636          */
2637         CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB);
2638
2639         /*
2640          * Set the initial TX and RX configuration.
2641          */
2642         if (sc->re_flags & RE_F_TESTMODE) {
2643                 if (!RE_IS_8139CP(sc))
2644                         CSR_WRITE_4(sc, RE_TXCFG,
2645                                     RE_TXCFG_CONFIG | RE_LOOPTEST_ON);
2646                 else
2647                         CSR_WRITE_4(sc, RE_TXCFG,
2648                                     RE_TXCFG_CONFIG | RE_LOOPTEST_ON_CPLUS);
2649         } else
2650                 CSR_WRITE_4(sc, RE_TXCFG, RE_TXCFG_CONFIG);
2651
2652         framelen = RE_FRAMELEN(ifp->if_mtu);
2653         if (framelen < MCLBYTES)
2654                 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, howmany(MCLBYTES, 128));
2655         else
2656                 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, howmany(framelen, 128));
2657
2658         CSR_WRITE_4(sc, RE_RXCFG, RE_RXCFG_CONFIG);
2659
2660         /*
2661          * Program the multicast filter, if necessary.
2662          */
2663         re_setmulti(sc);
2664
2665 #ifdef IFPOLL_ENABLE
2666         /*
2667          * Disable interrupts if we are polling.
2668          */
2669         if (ifp->if_flags & IFF_NPOLLING)
2670                 re_setup_intr(sc, 0, RE_IMTYPE_NONE);
2671         else    /* otherwise ... */
2672 #endif /* IFPOLL_ENABLE */
2673         /*
2674          * Enable interrupts.
2675          */
2676         if (sc->re_flags & RE_F_TESTMODE)
2677                 CSR_WRITE_2(sc, RE_IMR, 0);
2678         else
2679                 re_setup_intr(sc, 1, sc->re_imtype);
2680         CSR_WRITE_2(sc, RE_ISR, sc->re_intrs);
2681
2682         /* Start RX/TX process. */
2683         CSR_WRITE_4(sc, RE_MISSEDPKT, 0);
2684
2685 #ifdef notdef
2686         /* Enable receiver and transmitter. */
2687         CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB);
2688 #endif
2689
2690         /*
2691          * For 8169 gigE NICs, set the max allowed RX packet
2692          * size so we can receive jumbo frames.
2693          */
2694         if (!RE_IS_8139CP(sc)) {
2695                 if (sc->re_caps & RE_C_CONTIGRX)
2696                         CSR_WRITE_2(sc, RE_MAXRXPKTLEN, sc->re_rxbuf_size);
2697                 else
2698                         CSR_WRITE_2(sc, RE_MAXRXPKTLEN, 16383);
2699         }
2700
2701         if (sc->re_flags & RE_F_TESTMODE)
2702                 return;
2703
2704         mii_mediachg(mii);
2705
2706         CSR_WRITE_1(sc, RE_CFG1, RE_CFG1_DRVLOAD|RE_CFG1_FULLDUPLEX);
2707
2708         ifp->if_flags |= IFF_RUNNING;
2709         ifq_clr_oactive(&ifp->if_snd);
2710
2711         callout_reset(&sc->re_timer, hz, re_tick, sc);
2712 }
2713
2714 /*
2715  * Set media options.
2716  */
2717 static int
2718 re_ifmedia_upd(struct ifnet *ifp)
2719 {
2720         struct re_softc *sc = ifp->if_softc;
2721         struct mii_data *mii;
2722
2723         ASSERT_SERIALIZED(ifp->if_serializer);
2724
2725         mii = device_get_softc(sc->re_miibus);
2726         mii_mediachg(mii);
2727
2728         return(0);
2729 }
2730
2731 /*
2732  * Report current media status.
2733  */
2734 static void
2735 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2736 {
2737         struct re_softc *sc = ifp->if_softc;
2738         struct mii_data *mii;
2739
2740         ASSERT_SERIALIZED(ifp->if_serializer);
2741
2742         mii = device_get_softc(sc->re_miibus);
2743
2744         mii_pollstat(mii);
2745         ifmr->ifm_active = mii->mii_media_active;
2746         ifmr->ifm_status = mii->mii_media_status;
2747 }
2748
2749 static int
2750 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2751 {
2752         struct re_softc *sc = ifp->if_softc;
2753         struct ifreq *ifr = (struct ifreq *) data;
2754         struct mii_data *mii;
2755         int error = 0, mask;
2756
2757         ASSERT_SERIALIZED(ifp->if_serializer);
2758
2759         switch(command) {
2760         case SIOCSIFMTU:
2761                 if (ifr->ifr_mtu > sc->re_maxmtu) {
2762                         error = EINVAL;
2763                 } else if (ifp->if_mtu != ifr->ifr_mtu) {
2764                         ifp->if_mtu = ifr->ifr_mtu;
2765                         if (ifp->if_flags & IFF_RUNNING)
2766                                 ifp->if_init(sc);
2767                 }
2768                 break;
2769
2770         case SIOCSIFFLAGS:
2771                 if (ifp->if_flags & IFF_UP) {
2772                         if (ifp->if_flags & IFF_RUNNING) {
2773                                 if ((ifp->if_flags ^ sc->re_if_flags) &
2774                                     (IFF_PROMISC | IFF_ALLMULTI))
2775                                         re_setmulti(sc);
2776                         } else {
2777                                 re_init(sc);
2778                         }
2779                 } else if (ifp->if_flags & IFF_RUNNING) {
2780                         re_stop(sc);
2781                 }
2782                 sc->re_if_flags = ifp->if_flags;
2783                 break;
2784
2785         case SIOCADDMULTI:
2786         case SIOCDELMULTI:
2787                 re_setmulti(sc);
2788                 break;
2789
2790         case SIOCGIFMEDIA:
2791         case SIOCSIFMEDIA:
2792                 mii = device_get_softc(sc->re_miibus);
2793                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2794                 break;
2795
2796         case SIOCSIFCAP:
2797                 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) &
2798                        ifp->if_capabilities;
2799                 ifp->if_capenable ^= mask;
2800
2801                 if (mask & IFCAP_HWCSUM) {
2802                         if (ifp->if_capenable & IFCAP_TXCSUM)
2803                                 ifp->if_hwassist = RE_CSUM_FEATURES;
2804                         else
2805                                 ifp->if_hwassist = 0;
2806                 }
2807                 if (mask && (ifp->if_flags & IFF_RUNNING))
2808                         re_init(sc);
2809                 break;
2810
2811         default:
2812                 error = ether_ioctl(ifp, command, data);
2813                 break;
2814         }
2815         return(error);
2816 }
2817
2818 static void
2819 re_watchdog(struct ifnet *ifp)
2820 {
2821         struct re_softc *sc = ifp->if_softc;
2822
2823         ASSERT_SERIALIZED(ifp->if_serializer);
2824
2825         if_printf(ifp, "watchdog timeout\n");
2826
2827         ifp->if_oerrors++;
2828
2829         re_txeof(sc);
2830         re_rxeof(sc);
2831
2832         re_init(sc);
2833
2834         if (!ifq_is_empty(&ifp->if_snd))
2835                 if_devstart(ifp);
2836 }
2837
2838 /*
2839  * Stop the adapter and free any mbufs allocated to the
2840  * RX and TX lists.
2841  */
2842 static void
2843 re_stop(struct re_softc *sc)
2844 {
2845         struct ifnet *ifp = &sc->arpcom.ac_if;
2846         int i;
2847
2848         ASSERT_SERIALIZED(ifp->if_serializer);
2849
2850         /* Reset the adapter. */
2851         re_reset(sc, ifp->if_flags & IFF_RUNNING);
2852
2853         ifp->if_timer = 0;
2854         callout_stop(&sc->re_timer);
2855
2856         ifp->if_flags &= ~IFF_RUNNING;
2857         ifq_clr_oactive(&ifp->if_snd);
2858         sc->re_flags &= ~(RE_F_TIMER_INTR | RE_F_DROP_RXFRAG | RE_F_LINKED);
2859
2860         CSR_WRITE_1(sc, RE_COMMAND, 0x00);
2861         CSR_WRITE_2(sc, RE_IMR, 0x0000);
2862         CSR_WRITE_2(sc, RE_ISR, 0xFFFF);
2863
2864         re_free_rxchain(sc);
2865
2866         /* Free the TX list buffers. */
2867         for (i = 0; i < sc->re_tx_desc_cnt; i++) {
2868                 if (sc->re_ldata.re_tx_mbuf[i] != NULL) {
2869                         bus_dmamap_unload(sc->re_ldata.re_tx_mtag,
2870                                           sc->re_ldata.re_tx_dmamap[i]);
2871                         m_freem(sc->re_ldata.re_tx_mbuf[i]);
2872                         sc->re_ldata.re_tx_mbuf[i] = NULL;
2873                 }
2874         }
2875
2876         /* Free the RX list buffers. */
2877         for (i = 0; i < sc->re_rx_desc_cnt; i++) {
2878                 if (sc->re_ldata.re_rx_mbuf[i] != NULL) {
2879                         if ((sc->re_flags & RE_F_USE_JPOOL) == 0) {
2880                                 bus_dmamap_unload(sc->re_ldata.re_rx_mtag,
2881                                                   sc->re_ldata.re_rx_dmamap[i]);
2882                         }
2883                         m_freem(sc->re_ldata.re_rx_mbuf[i]);
2884                         sc->re_ldata.re_rx_mbuf[i] = NULL;
2885                 }
2886         }
2887 }
2888
2889 /*
2890  * Device suspend routine.  Stop the interface and save some PCI
2891  * settings in case the BIOS doesn't restore them properly on
2892  * resume.
2893  */
2894 static int
2895 re_suspend(device_t dev)
2896 {
2897 #ifndef BURN_BRIDGES
2898         int i;
2899 #endif
2900         struct re_softc *sc = device_get_softc(dev);
2901         struct ifnet *ifp = &sc->arpcom.ac_if;
2902
2903         lwkt_serialize_enter(ifp->if_serializer);
2904
2905         re_stop(sc);
2906
2907 #ifndef BURN_BRIDGES
2908         for (i = 0; i < 5; i++)
2909                 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2910         sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2911         sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2912         sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2913         sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2914 #endif
2915
2916         sc->re_flags |= RE_F_SUSPENDED;
2917
2918         lwkt_serialize_exit(ifp->if_serializer);
2919
2920         return (0);
2921 }
2922
2923 /*
2924  * Device resume routine.  Restore some PCI settings in case the BIOS
2925  * doesn't, re-enable busmastering, and restart the interface if
2926  * appropriate.
2927  */
2928 static int
2929 re_resume(device_t dev)
2930 {
2931         struct re_softc *sc = device_get_softc(dev);
2932         struct ifnet *ifp = &sc->arpcom.ac_if;
2933 #ifndef BURN_BRIDGES
2934         int i;
2935 #endif
2936
2937         lwkt_serialize_enter(ifp->if_serializer);
2938
2939 #ifndef BURN_BRIDGES
2940         /* better way to do this? */
2941         for (i = 0; i < 5; i++)
2942                 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2943         pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2944         pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2945         pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2946         pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2947
2948         /* reenable busmastering */
2949         pci_enable_busmaster(dev);
2950         pci_enable_io(dev, SYS_RES_IOPORT);
2951 #endif
2952
2953         /* reinitialize interface if necessary */
2954         if (ifp->if_flags & IFF_UP)
2955                 re_init(sc);
2956
2957         sc->re_flags &= ~RE_F_SUSPENDED;
2958
2959         lwkt_serialize_exit(ifp->if_serializer);
2960
2961         return (0);
2962 }
2963
2964 /*
2965  * Stop all chip I/O so that the kernel's probe routines don't
2966  * get confused by errant DMAs when rebooting.
2967  */
2968 static void
2969 re_shutdown(device_t dev)
2970 {
2971         struct re_softc *sc = device_get_softc(dev);
2972         struct ifnet *ifp = &sc->arpcom.ac_if;
2973
2974         lwkt_serialize_enter(ifp->if_serializer);
2975         re_stop(sc);
2976         lwkt_serialize_exit(ifp->if_serializer);
2977 }
2978
2979 static int
2980 re_sysctl_rxtime(SYSCTL_HANDLER_ARGS)
2981 {
2982         struct re_softc *sc = arg1;
2983
2984         return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_rx_time);
2985 }
2986
2987 static int
2988 re_sysctl_txtime(SYSCTL_HANDLER_ARGS)
2989 {
2990         struct re_softc *sc = arg1;
2991
2992         return re_sysctl_hwtime(oidp, arg1, arg2, req, &sc->re_tx_time);
2993 }
2994
2995 static int
2996 re_sysctl_hwtime(SYSCTL_HANDLER_ARGS, int *hwtime)
2997 {
2998         struct re_softc *sc = arg1;
2999         struct ifnet *ifp = &sc->arpcom.ac_if;
3000         int error, v;
3001
3002         lwkt_serialize_enter(ifp->if_serializer);
3003
3004         v = *hwtime;
3005         error = sysctl_handle_int(oidp, &v, 0, req);
3006         if (error || req->newptr == NULL)
3007                 goto back;
3008
3009         if (v <= 0) {
3010                 error = EINVAL;
3011                 goto back;
3012         }
3013
3014         if (v != *hwtime) {
3015                 *hwtime = v;
3016
3017                 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
3018                     IFF_RUNNING && sc->re_imtype == RE_IMTYPE_HW)
3019                         re_setup_hw_im(sc);
3020         }
3021 back:
3022         lwkt_serialize_exit(ifp->if_serializer);
3023         return error;
3024 }
3025
3026 static int
3027 re_sysctl_simtime(SYSCTL_HANDLER_ARGS)
3028 {
3029         struct re_softc *sc = arg1;
3030         struct ifnet *ifp = &sc->arpcom.ac_if;
3031         int error, v;
3032
3033         lwkt_serialize_enter(ifp->if_serializer);
3034
3035         v = sc->re_sim_time;
3036         error = sysctl_handle_int(oidp, &v, 0, req);
3037         if (error || req->newptr == NULL)
3038                 goto back;
3039
3040         if (v <= 0) {
3041                 error = EINVAL;
3042                 goto back;
3043         }
3044
3045         if (v != sc->re_sim_time) {
3046                 sc->re_sim_time = v;
3047
3048                 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
3049                     IFF_RUNNING && sc->re_imtype == RE_IMTYPE_SIM) {
3050 #ifdef foo
3051                         int reg;
3052
3053                         /*
3054                          * Following code causes various strange
3055                          * performance problems.  Hmm ...
3056                          */
3057                         CSR_WRITE_2(sc, RE_IMR, 0);
3058                         if (!RE_IS_8139CP(sc))
3059                                 reg = RE_TIMERINT_8169;
3060                         else
3061                                 reg = RE_TIMERINT;
3062                         CSR_WRITE_4(sc, reg, 0);
3063                         CSR_READ_4(sc, reg); /* flush */
3064
3065                         CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
3066                         re_setup_sim_im(sc);
3067 #else
3068                         re_setup_intr(sc, 0, RE_IMTYPE_NONE);
3069                         DELAY(10);
3070                         re_setup_intr(sc, 1, RE_IMTYPE_SIM);
3071 #endif
3072                 }
3073         }
3074 back:
3075         lwkt_serialize_exit(ifp->if_serializer);
3076         return error;
3077 }
3078
3079 static int
3080 re_sysctl_imtype(SYSCTL_HANDLER_ARGS)
3081 {
3082         struct re_softc *sc = arg1;
3083         struct ifnet *ifp = &sc->arpcom.ac_if;
3084         int error, v;
3085
3086         lwkt_serialize_enter(ifp->if_serializer);
3087
3088         v = sc->re_imtype;
3089         error = sysctl_handle_int(oidp, &v, 0, req);
3090         if (error || req->newptr == NULL)
3091                 goto back;
3092
3093         if (v != RE_IMTYPE_HW && v != RE_IMTYPE_SIM && v != RE_IMTYPE_NONE) {
3094                 error = EINVAL;
3095                 goto back;
3096         }
3097         if (v == RE_IMTYPE_HW && (sc->re_caps & RE_C_HWIM) == 0) {
3098                 /* Can't do hardware interrupt moderation */
3099                 error = EOPNOTSUPP;
3100                 goto back;
3101         }
3102
3103         if (v != sc->re_imtype) {
3104                 sc->re_imtype = v;
3105                 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) ==
3106                     IFF_RUNNING)
3107                         re_setup_intr(sc, 1, sc->re_imtype);
3108         }
3109 back:
3110         lwkt_serialize_exit(ifp->if_serializer);
3111         return error;
3112 }
3113
3114 static void
3115 re_setup_hw_im(struct re_softc *sc)
3116 {
3117         KKASSERT(sc->re_caps & RE_C_HWIM);
3118
3119         /*
3120          * Interrupt moderation
3121          *
3122          * 0xABCD
3123          * A - unknown (maybe TX related)
3124          * B - TX timer (unit: 25us)
3125          * C - unknown (maybe RX related)
3126          * D - RX timer (unit: 25us)
3127          *
3128          *
3129          * re(4)'s interrupt moderation is actually controlled by
3130          * two variables, like most other NICs (bge, bce etc.)
3131          * o  timer
3132          * o  number of packets [P]
3133          *
3134          * The logic relationship between these two variables is
3135          * similar to other NICs too:
3136          * if (timer expire || packets > [P])
3137          *     Interrupt is delivered
3138          *
3139          * Currently we only know how to set 'timer', but not
3140          * 'number of packets', which should be ~30, as far as I
3141          * tested (sink ~900Kpps, interrupt rate is 30KHz)
3142          */
3143         CSR_WRITE_2(sc, RE_IM,
3144                     RE_IM_RXTIME(sc->re_rx_time) |
3145                     RE_IM_TXTIME(sc->re_tx_time) |
3146                     RE_IM_MAGIC);
3147 }
3148
3149 static void
3150 re_disable_hw_im(struct re_softc *sc)
3151 {
3152         if (sc->re_caps & RE_C_HWIM)
3153                 CSR_WRITE_2(sc, RE_IM, 0);
3154 }
3155
3156 static void
3157 re_setup_sim_im(struct re_softc *sc)
3158 {
3159         if (!RE_IS_8139CP(sc)) {
3160                 uint32_t ticks;
3161
3162                 /*
3163                  * Datasheet says tick decreases at bus speed,
3164                  * but it seems the clock runs a little bit
3165                  * faster, so we do some compensation here.
3166                  */
3167                 ticks = (sc->re_sim_time * sc->re_bus_speed * 8) / 5;
3168                 CSR_WRITE_4(sc, RE_TIMERINT_8169, ticks);
3169         } else {
3170                 CSR_WRITE_4(sc, RE_TIMERINT, 0x400); /* XXX */
3171         }
3172         CSR_WRITE_4(sc, RE_TIMERCNT, 1); /* reload */
3173         sc->re_flags |= RE_F_TIMER_INTR;
3174 }
3175
3176 static void
3177 re_disable_sim_im(struct re_softc *sc)
3178 {
3179         if (!RE_IS_8139CP(sc))
3180                 CSR_WRITE_4(sc, RE_TIMERINT_8169, 0);
3181         else
3182                 CSR_WRITE_4(sc, RE_TIMERINT, 0);
3183         sc->re_flags &= ~RE_F_TIMER_INTR;
3184 }
3185
3186 static void
3187 re_config_imtype(struct re_softc *sc, int imtype)
3188 {
3189         switch (imtype) {
3190         case RE_IMTYPE_HW:
3191                 KKASSERT(sc->re_caps & RE_C_HWIM);
3192                 /* FALL THROUGH */
3193         case RE_IMTYPE_NONE:
3194                 sc->re_intrs = RE_INTRS;
3195                 sc->re_rx_ack = RE_ISR_RX_OK | RE_ISR_FIFO_OFLOW |
3196                                 RE_ISR_RX_OVERRUN;
3197                 sc->re_tx_ack = RE_ISR_TX_OK;
3198                 break;
3199
3200         case RE_IMTYPE_SIM:
3201                 sc->re_intrs = RE_INTRS_TIMER;
3202                 sc->re_rx_ack = RE_ISR_TIMEOUT_EXPIRED;
3203                 sc->re_tx_ack = RE_ISR_TIMEOUT_EXPIRED;
3204                 break;
3205
3206         default:
3207                 panic("%s: unknown imtype %d",
3208                       sc->arpcom.ac_if.if_xname, imtype);
3209         }
3210 }
3211
3212 static void
3213 re_setup_intr(struct re_softc *sc, int enable_intrs, int imtype)
3214 {
3215         re_config_imtype(sc, imtype);
3216
3217         if (enable_intrs)
3218                 CSR_WRITE_2(sc, RE_IMR, sc->re_intrs);
3219         else
3220                 CSR_WRITE_2(sc, RE_IMR, 0); 
3221
3222         sc->re_npoll.ifpc_stcount = 0;
3223
3224         switch (imtype) {
3225         case RE_IMTYPE_NONE:
3226                 re_disable_sim_im(sc);
3227                 re_disable_hw_im(sc);
3228                 break;
3229
3230         case RE_IMTYPE_HW:
3231                 KKASSERT(sc->re_caps & RE_C_HWIM);
3232                 re_disable_sim_im(sc);
3233                 re_setup_hw_im(sc);
3234                 break;
3235
3236         case RE_IMTYPE_SIM:
3237                 re_disable_hw_im(sc);
3238                 re_setup_sim_im(sc);
3239                 break;
3240
3241         default:
3242                 panic("%s: unknown imtype %d",
3243                       sc->arpcom.ac_if.if_xname, imtype);
3244         }
3245 }
3246
3247 static void
3248 re_get_eaddr(struct re_softc *sc, uint8_t *eaddr)
3249 {
3250         int i;
3251
3252         if (sc->re_macver == RE_MACVER_11 ||
3253             sc->re_macver == RE_MACVER_12 ||
3254             sc->re_macver == RE_MACVER_30 ||
3255             sc->re_macver == RE_MACVER_31) {
3256                 uint16_t re_did;
3257
3258                 re_get_eewidth(sc);
3259                 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1);
3260                 if (re_did == 0x8128) {
3261                         uint16_t as[ETHER_ADDR_LEN / 2];
3262                         int eaddr_off;
3263
3264                         if (sc->re_macver == RE_MACVER_30 ||
3265                             sc->re_macver == RE_MACVER_31)
3266                                 eaddr_off = RE_EE_EADDR1;
3267                         else
3268                                 eaddr_off = RE_EE_EADDR0;
3269
3270                         /*
3271                          * Get station address from the EEPROM.
3272                          */
3273                         re_read_eeprom(sc, (caddr_t)as, eaddr_off, 3);
3274                         for (i = 0; i < ETHER_ADDR_LEN / 2; i++)
3275                                 as[i] = le16toh(as[i]);
3276                         bcopy(as, eaddr, ETHER_ADDR_LEN);
3277                         return;
3278                 }
3279         }
3280
3281         /*
3282          * Get station address from IDRx.
3283          */
3284         for (i = 0; i < ETHER_ADDR_LEN; ++i)
3285                 eaddr[i] = CSR_READ_1(sc, RE_IDR0 + i);
3286 }
3287
3288 static int
3289 re_jpool_alloc(struct re_softc *sc)
3290 {
3291         struct re_list_data *ldata = &sc->re_ldata;
3292         struct re_jbuf *jbuf;
3293         bus_addr_t paddr;
3294         bus_size_t jpool_size;
3295         bus_dmamem_t dmem;
3296         caddr_t buf;
3297         int i, error;
3298
3299         lwkt_serialize_init(&ldata->re_jbuf_serializer);
3300
3301         ldata->re_jbuf = kmalloc(sizeof(struct re_jbuf) * RE_JBUF_COUNT(sc),
3302                                  M_DEVBUF, M_WAITOK | M_ZERO);
3303
3304         jpool_size = RE_JBUF_COUNT(sc) * RE_JBUF_SIZE;
3305
3306         error = bus_dmamem_coherent(sc->re_parent_tag,
3307                         RE_RXBUF_ALIGN, 0,
3308                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3309                         jpool_size, BUS_DMA_WAITOK, &dmem);
3310         if (error) {
3311                 device_printf(sc->re_dev, "could not allocate jumbo memory\n");
3312                 return error;
3313         }
3314         ldata->re_jpool_tag = dmem.dmem_tag;
3315         ldata->re_jpool_map = dmem.dmem_map;
3316         ldata->re_jpool = dmem.dmem_addr;
3317         paddr = dmem.dmem_busaddr;
3318
3319         /* ..and split it into 9KB chunks */
3320         SLIST_INIT(&ldata->re_jbuf_free);
3321
3322         buf = ldata->re_jpool;
3323         for (i = 0; i < RE_JBUF_COUNT(sc); i++) {
3324                 jbuf = &ldata->re_jbuf[i];
3325
3326                 jbuf->re_sc = sc;
3327                 jbuf->re_inuse = 0;
3328                 jbuf->re_slot = i;
3329                 jbuf->re_buf = buf;
3330                 jbuf->re_paddr = paddr;
3331
3332                 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link);
3333
3334                 buf += RE_JBUF_SIZE;
3335                 paddr += RE_JBUF_SIZE;
3336         }
3337         return 0;
3338 }
3339
3340 static void
3341 re_jpool_free(struct re_softc *sc)
3342 {
3343         struct re_list_data *ldata = &sc->re_ldata;
3344
3345         if (ldata->re_jpool_tag != NULL) {
3346                 bus_dmamap_unload(ldata->re_jpool_tag, ldata->re_jpool_map);
3347                 bus_dmamem_free(ldata->re_jpool_tag, ldata->re_jpool,
3348                                 ldata->re_jpool_map);
3349                 bus_dma_tag_destroy(ldata->re_jpool_tag);
3350                 ldata->re_jpool_tag = NULL;
3351         }
3352
3353         if (ldata->re_jbuf != NULL) {
3354                 kfree(ldata->re_jbuf, M_DEVBUF);
3355                 ldata->re_jbuf = NULL;
3356         }
3357 }
3358
3359 static struct re_jbuf *
3360 re_jbuf_alloc(struct re_softc *sc)
3361 {
3362         struct re_list_data *ldata = &sc->re_ldata;
3363         struct re_jbuf *jbuf;
3364
3365         lwkt_serialize_enter(&ldata->re_jbuf_serializer);
3366
3367         jbuf = SLIST_FIRST(&ldata->re_jbuf_free);
3368         if (jbuf != NULL) {
3369                 SLIST_REMOVE_HEAD(&ldata->re_jbuf_free, re_link);
3370                 jbuf->re_inuse = 1;
3371         }
3372
3373         lwkt_serialize_exit(&ldata->re_jbuf_serializer);
3374
3375         return jbuf;
3376 }
3377
3378 static void
3379 re_jbuf_free(void *arg)
3380 {
3381         struct re_jbuf *jbuf = arg;
3382         struct re_softc *sc = jbuf->re_sc;
3383         struct re_list_data *ldata = &sc->re_ldata;
3384
3385         if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) {
3386                 panic("%s: free wrong jumbo buffer",
3387                       sc->arpcom.ac_if.if_xname);
3388         } else if (jbuf->re_inuse == 0) {
3389                 panic("%s: jumbo buffer already freed",
3390                       sc->arpcom.ac_if.if_xname);
3391         }
3392
3393         lwkt_serialize_enter(&ldata->re_jbuf_serializer);
3394         atomic_subtract_int(&jbuf->re_inuse, 1);
3395         if (jbuf->re_inuse == 0)
3396                 SLIST_INSERT_HEAD(&ldata->re_jbuf_free, jbuf, re_link);
3397         lwkt_serialize_exit(&ldata->re_jbuf_serializer);
3398 }
3399
3400 static void
3401 re_jbuf_ref(void *arg)
3402 {
3403         struct re_jbuf *jbuf = arg;
3404         struct re_softc *sc = jbuf->re_sc;
3405         struct re_list_data *ldata = &sc->re_ldata;
3406
3407         if (&ldata->re_jbuf[jbuf->re_slot] != jbuf) {
3408                 panic("%s: ref wrong jumbo buffer",
3409                       sc->arpcom.ac_if.if_xname);
3410         } else if (jbuf->re_inuse == 0) {
3411                 panic("%s: jumbo buffer already freed",
3412                       sc->arpcom.ac_if.if_xname);
3413         }
3414         atomic_add_int(&jbuf->re_inuse, 1);
3415 }