Do a major clean-up of the BUSDMA architecture. A large number of
[dragonfly.git] / sys / dev / netif / re / if_re.c
CommitLineData
af51229a
JS
1/*
2 * Copyright (c) 2004
3 * Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
4 *
5 * Copyright (c) 1997, 1998-2003
6 * Bill Paul <wpaul@windriver.com>. All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Bill Paul.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33 * THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 * $FreeBSD: src/sys/dev/re/if_re.c,v 1.25 2004/06/09 14:34:01 naddy Exp $
1f7ab7c9 36 * $DragonFly: src/sys/dev/netif/re/if_re.c,v 1.27 2006/10/25 20:55:58 dillon Exp $
af51229a
JS
37 */
38
39/*
40 * RealTek 8139C+/8169/8169S/8110S PCI NIC driver
41 *
42 * Written by Bill Paul <wpaul@windriver.com>
43 * Senior Networking Software Engineer
44 * Wind River Systems
45 */
46
47/*
48 * This driver is designed to support RealTek's next generation of
49 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently
50 * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S
51 * and the RTL8110S.
52 *
53 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible
54 * with the older 8139 family, however it also supports a special
55 * C+ mode of operation that provides several new performance enhancing
56 * features. These include:
57 *
58 * o Descriptor based DMA mechanism. Each descriptor represents
59 * a single packet fragment. Data buffers may be aligned on
60 * any byte boundary.
61 *
62 * o 64-bit DMA
63 *
64 * o TCP/IP checksum offload for both RX and TX
65 *
66 * o High and normal priority transmit DMA rings
67 *
68 * o VLAN tag insertion and extraction
69 *
70 * o TCP large send (segmentation offload)
71 *
72 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+
73 * programming API is fairly straightforward. The RX filtering, EEPROM
74 * access and PHY access is the same as it is on the older 8139 series
75 * chips.
76 *
77 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the
78 * same programming API and feature set as the 8139C+ with the following
79 * differences and additions:
80 *
81 * o 1000Mbps mode
82 *
83 * o Jumbo frames
84 *
85 * o GMII and TBI ports/registers for interfacing with copper
86 * or fiber PHYs
87 *
88 * o RX and TX DMA rings can have up to 1024 descriptors
89 * (the 8139C+ allows a maximum of 64)
90 *
91 * o Slight differences in register layout from the 8139C+
92 *
93 * The TX start and timer interrupt registers are at different locations
94 * on the 8169 than they are on the 8139C+. Also, the status word in the
95 * RX descriptor has a slightly different bit layout. The 8169 does not
96 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska'
97 * copper gigE PHY.
98 *
99 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs
100 * (the 'S' stands for 'single-chip'). These devices have the same
101 * programming API as the older 8169, but also have some vendor-specific
102 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard
103 * part designed to be pin-compatible with the RealTek 8100 10/100 chip.
104 *
105 * This driver takes advantage of the RX and TX checksum offload and
106 * VLAN tag insertion/extraction features. It also implements TX
107 * interrupt moderation using the timer interrupt registers, which
108 * significantly reduces TX interrupt load. There is also support
109 * for jumbo frames, however the 8169/8169S/8110S can not transmit
25c5ec5f
MD
110 * jumbo frames larger than 7440, so the max MTU possible with this
111 * driver is 7422 bytes.
af51229a
JS
112 */
113
2b71c8f1
SZ
114#include "opt_polling.h"
115
af51229a
JS
116#include <sys/param.h>
117#include <sys/endian.h>
118#include <sys/systm.h>
119#include <sys/sockio.h>
120#include <sys/mbuf.h>
121#include <sys/malloc.h>
122#include <sys/module.h>
123#include <sys/kernel.h>
124#include <sys/socket.h>
78195a76 125#include <sys/serialize.h>
1f7ab7c9
MD
126#include <sys/bus.h>
127#include <sys/rman.h>
f0ee8b5b 128#include <sys/thread2.h>
af51229a
JS
129
130#include <net/if.h>
4d723e5a 131#include <net/ifq_var.h>
af51229a
JS
132#include <net/if_arp.h>
133#include <net/ethernet.h>
134#include <net/if_dl.h>
135#include <net/if_media.h>
136#include <net/if_types.h>
137#include <net/vlan/if_vlan_var.h>
138
139#include <net/bpf.h>
140
af51229a
JS
141#include <dev/netif/mii_layer/mii.h>
142#include <dev/netif/mii_layer/miivar.h>
143
3ff737e6 144#include <bus/pci/pcidevs.h>
af51229a
JS
145#include <bus/pci/pcireg.h>
146#include <bus/pci/pcivar.h>
147
148/* "controller miibus0" required. See GENERIC if you get errors here. */
149#include "miibus_if.h"
150
151#include <dev/netif/re/if_rereg.h>
152
8497ad23
MD
153/*
154 * The hardware supports checksumming but, as usual, some chipsets screw it
155 * all up and produce bogus packets, so we disable it by default.
156 */
af51229a 157#define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
8497ad23 158#define RE_DISABLE_HWCSUM
af51229a
JS
159
160/*
161 * Various supported device vendors/types and their names.
162 */
163static struct re_type re_devs[] = {
3ff737e6 164 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE528T, RE_HWREV_8169S,
25c5ec5f 165 "D-Link DGE-528(T) Gigabit Ethernet Adapter" },
3ff737e6 166 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8139, RE_HWREV_8139CPLUS,
af51229a 167 "RealTek 8139C+ 10/100BaseTX" },
3ff737e6 168 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169,
af51229a 169 "RealTek 8169 Gigabit Ethernet" },
3ff737e6 170 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8169S,
af51229a 171 "RealTek 8169S Single-chip Gigabit Ethernet" },
3ff737e6 172 { PCI_VENDOR_REALTEK, PCI_PRODUCT_REALTEK_RT8169, RE_HWREV_8110S,
af51229a 173 "RealTek 8110S Single-chip Gigabit Ethernet" },
139efd88
SZ
174 { PCI_VENDOR_COREGA, PCI_PRODUCT_COREGA_CG_LAPCIGT, RE_HWREV_8169S,
175 "Corega CG-LAPCIGT Gigabit Ethernet" },
5fdf38d0
SZ
176 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1032, RE_HWREV_8169S,
177 "Linksys EG1032 Gigabit Ethernet" },
af51229a
JS
178 { 0, 0, 0, NULL }
179};
180
181static struct re_hwrev re_hwrevs[] = {
182 { RE_HWREV_8139CPLUS, RE_8139CPLUS, "C+"},
183 { RE_HWREV_8169, RE_8169, "8169"},
184 { RE_HWREV_8169S, RE_8169, "8169S"},
185 { RE_HWREV_8110S, RE_8169, "8110S"},
186 { 0, 0, NULL }
187};
188
189static int re_probe(device_t);
190static int re_attach(device_t);
191static int re_detach(device_t);
192
8f77d350 193static int re_encap(struct re_softc *, struct mbuf **, int *, int *);
af51229a
JS
194
195static void re_dma_map_addr(void *, bus_dma_segment_t *, int, int);
196static void re_dma_map_desc(void *, bus_dma_segment_t *, int,
197 bus_size_t, int);
198static int re_allocmem(device_t, struct re_softc *);
199static int re_newbuf(struct re_softc *, int, struct mbuf *);
200static int re_rx_list_init(struct re_softc *);
201static int re_tx_list_init(struct re_softc *);
202static void re_rxeof(struct re_softc *);
203static void re_txeof(struct re_softc *);
204static void re_intr(void *);
205static void re_tick(void *);
78195a76 206static void re_tick_serialized(void *);
af51229a
JS
207static void re_start(struct ifnet *);
208static int re_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
209static void re_init(void *);
210static void re_stop(struct re_softc *);
211static void re_watchdog(struct ifnet *);
212static int re_suspend(device_t);
213static int re_resume(device_t);
214static void re_shutdown(device_t);
215static int re_ifmedia_upd(struct ifnet *);
216static void re_ifmedia_sts(struct ifnet *, struct ifmediareq *);
217
218static void re_eeprom_putbyte(struct re_softc *, int);
219static void re_eeprom_getword(struct re_softc *, int, u_int16_t *);
220static void re_read_eeprom(struct re_softc *, caddr_t, int, int, int);
221static int re_gmii_readreg(device_t, int, int);
222static int re_gmii_writereg(device_t, int, int, int);
223
224static int re_miibus_readreg(device_t, int, int);
225static int re_miibus_writereg(device_t, int, int, int);
226static void re_miibus_statchg(device_t);
227
228static void re_setmulti(struct re_softc *);
229static void re_reset(struct re_softc *);
230
231static int re_diag(struct re_softc *);
9c095379
MD
232#ifdef DEVICE_POLLING
233static void re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count);
234#endif
af51229a
JS
235
236static device_method_t re_methods[] = {
237 /* Device interface */
238 DEVMETHOD(device_probe, re_probe),
239 DEVMETHOD(device_attach, re_attach),
240 DEVMETHOD(device_detach, re_detach),
241 DEVMETHOD(device_suspend, re_suspend),
242 DEVMETHOD(device_resume, re_resume),
243 DEVMETHOD(device_shutdown, re_shutdown),
244
245 /* bus interface */
246 DEVMETHOD(bus_print_child, bus_generic_print_child),
247 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
248
249 /* MII interface */
250 DEVMETHOD(miibus_readreg, re_miibus_readreg),
251 DEVMETHOD(miibus_writereg, re_miibus_writereg),
252 DEVMETHOD(miibus_statchg, re_miibus_statchg),
253
254 { 0, 0 }
255};
256
257static driver_t re_driver = {
258 "re",
259 re_methods,
260 sizeof(struct re_softc)
261};
262
263static devclass_t re_devclass;
264
265DECLARE_DUMMY_MODULE(if_re);
266DRIVER_MODULE(if_re, pci, re_driver, re_devclass, 0, 0);
267DRIVER_MODULE(if_re, cardbus, re_driver, re_devclass, 0, 0);
268DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0);
269
270#define EE_SET(x) \
271 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) | (x))
272
273#define EE_CLR(x) \
274 CSR_WRITE_1(sc, RE_EECMD, CSR_READ_1(sc, RE_EECMD) & ~(x))
275
276/*
277 * Send a read command and address to the EEPROM, check for ACK.
278 */
279static void
280re_eeprom_putbyte(struct re_softc *sc, int addr)
281{
282 int d, i;
283
284 d = addr | sc->re_eecmd_read;
285
286 /*
287 * Feed in each bit and strobe the clock.
288 */
289 for (i = 0x400; i != 0; i >>= 1) {
290 if (d & i)
291 EE_SET(RE_EE_DATAIN);
292 else
293 EE_CLR(RE_EE_DATAIN);
294 DELAY(100);
295 EE_SET(RE_EE_CLK);
296 DELAY(150);
297 EE_CLR(RE_EE_CLK);
298 DELAY(100);
299 }
300}
301
302/*
303 * Read a word of data stored in the EEPROM at address 'addr.'
304 */
305static void
306re_eeprom_getword(struct re_softc *sc, int addr, uint16_t *dest)
307{
308 int i;
309 uint16_t word = 0;
310
311 /* Enter EEPROM access mode. */
312 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_PROGRAM|RE_EE_SEL);
313
314 /*
315 * Send address of word we want to read.
316 */
317 re_eeprom_putbyte(sc, addr);
318
319 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_PROGRAM|RE_EE_SEL);
320
321 /*
322 * Start reading bits from EEPROM.
323 */
324 for (i = 0x8000; i != 0; i >>= 1) {
325 EE_SET(RE_EE_CLK);
326 DELAY(100);
327 if (CSR_READ_1(sc, RE_EECMD) & RE_EE_DATAOUT)
328 word |= i;
329 EE_CLR(RE_EE_CLK);
330 DELAY(100);
331 }
332
333 /* Turn off EEPROM access mode. */
334 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF);
335
336 *dest = word;
337}
338
339/*
340 * Read a sequence of words from the EEPROM.
341 */
342static void
343re_read_eeprom(struct re_softc *sc, caddr_t dest, int off, int cnt, int swap)
344{
345 int i;
346 uint16_t word = 0, *ptr;
347
348 for (i = 0; i < cnt; i++) {
349 re_eeprom_getword(sc, off + i, &word);
350 ptr = (u_int16_t *)(dest + (i * 2));
351 if (swap)
352 *ptr = be16toh(word);
353 else
354 *ptr = word;
355 }
356}
357
358static int
359re_gmii_readreg(device_t dev, int phy, int reg)
360{
361 struct re_softc *sc = device_get_softc(dev);
362 u_int32_t rval;
363 int i;
364
365 if (phy != 1)
366 return(0);
367
368 /* Let the rgephy driver read the GMEDIASTAT register */
369
370 if (reg == RE_GMEDIASTAT)
371 return(CSR_READ_1(sc, RE_GMEDIASTAT));
372
373 CSR_WRITE_4(sc, RE_PHYAR, reg << 16);
374 DELAY(1000);
375
376 for (i = 0; i < RE_TIMEOUT; i++) {
377 rval = CSR_READ_4(sc, RE_PHYAR);
378 if (rval & RE_PHYAR_BUSY)
379 break;
380 DELAY(100);
381 }
382
383 if (i == RE_TIMEOUT) {
384 device_printf(dev, "PHY read failed\n");
385 return(0);
386 }
387
388 return(rval & RE_PHYAR_PHYDATA);
389}
390
391static int
392re_gmii_writereg(device_t dev, int phy, int reg, int data)
393{
394 struct re_softc *sc = device_get_softc(dev);
395 uint32_t rval;
396 int i;
397
398 CSR_WRITE_4(sc, RE_PHYAR,
399 (reg << 16) | (data & RE_PHYAR_PHYDATA) | RE_PHYAR_BUSY);
400 DELAY(1000);
401
402 for (i = 0; i < RE_TIMEOUT; i++) {
403 rval = CSR_READ_4(sc, RE_PHYAR);
404 if ((rval & RE_PHYAR_BUSY) == 0)
405 break;
406 DELAY(100);
407 }
408
409 if (i == RE_TIMEOUT)
410 device_printf(dev, "PHY write failed\n");
411
412 return(0);
413}
414
415static int
416re_miibus_readreg(device_t dev, int phy, int reg)
417{
418 struct re_softc *sc = device_get_softc(dev);
419 uint16_t rval = 0;
420 uint16_t re8139_reg = 0;
421
422 if (sc->re_type == RE_8169) {
423 rval = re_gmii_readreg(dev, phy, reg);
424 return(rval);
425 }
426
427 /* Pretend the internal PHY is only at address 0 */
428 if (phy)
429 return(0);
430
431 switch(reg) {
432 case MII_BMCR:
433 re8139_reg = RE_BMCR;
434 break;
435 case MII_BMSR:
436 re8139_reg = RE_BMSR;
437 break;
438 case MII_ANAR:
439 re8139_reg = RE_ANAR;
440 break;
441 case MII_ANER:
442 re8139_reg = RE_ANER;
443 break;
444 case MII_ANLPAR:
445 re8139_reg = RE_LPAR;
446 break;
447 case MII_PHYIDR1:
448 case MII_PHYIDR2:
449 return(0);
450 /*
451 * Allow the rlphy driver to read the media status
452 * register. If we have a link partner which does not
453 * support NWAY, this is the register which will tell
454 * us the results of parallel detection.
455 */
456 case RE_MEDIASTAT:
457 return(CSR_READ_1(sc, RE_MEDIASTAT));
458 default:
459 device_printf(dev, "bad phy register\n");
460 return(0);
461 }
462 rval = CSR_READ_2(sc, re8139_reg);
463 return(rval);
464}
465
466static int
467re_miibus_writereg(device_t dev, int phy, int reg, int data)
468{
469 struct re_softc *sc= device_get_softc(dev);
470 u_int16_t re8139_reg = 0;
471
472 if (sc->re_type == RE_8169)
473 return(re_gmii_writereg(dev, phy, reg, data));
474
475 /* Pretend the internal PHY is only at address 0 */
476 if (phy)
477 return(0);
478
479 switch(reg) {
480 case MII_BMCR:
481 re8139_reg = RE_BMCR;
482 break;
483 case MII_BMSR:
484 re8139_reg = RE_BMSR;
485 break;
486 case MII_ANAR:
487 re8139_reg = RE_ANAR;
488 break;
489 case MII_ANER:
490 re8139_reg = RE_ANER;
491 break;
492 case MII_ANLPAR:
493 re8139_reg = RE_LPAR;
494 break;
495 case MII_PHYIDR1:
496 case MII_PHYIDR2:
497 return(0);
498 default:
499 device_printf(dev, "bad phy register\n");
500 return(0);
501 }
502 CSR_WRITE_2(sc, re8139_reg, data);
503 return(0);
504}
505
506static void
507re_miibus_statchg(device_t dev)
508{
509}
510
511/*
512 * Program the 64-bit multicast hash filter.
513 */
514static void
515re_setmulti(struct re_softc *sc)
516{
517 struct ifnet *ifp = &sc->arpcom.ac_if;
518 int h = 0;
519 uint32_t hashes[2] = { 0, 0 };
520 struct ifmultiaddr *ifma;
521 uint32_t rxfilt;
522 int mcnt = 0;
523
524 rxfilt = CSR_READ_4(sc, RE_RXCFG);
525
526 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
527 rxfilt |= RE_RXCFG_RX_MULTI;
528 CSR_WRITE_4(sc, RE_RXCFG, rxfilt);
529 CSR_WRITE_4(sc, RE_MAR0, 0xFFFFFFFF);
530 CSR_WRITE_4(sc, RE_MAR4, 0xFFFFFFFF);
531 return;
532 }
533
534 /* first, zot all the existing hash bits */
535 CSR_WRITE_4(sc, RE_MAR0, 0);
536 CSR_WRITE_4(sc, RE_MAR4, 0);
537
538 /* now program new ones */
539 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
540 if (ifma->ifma_addr->sa_family != AF_LINK)
541 continue;
542 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
543 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
544 if (h < 32)
545 hashes[0] |= (1 << h);
546 else
547 hashes[1] |= (1 << (h - 32));
548 mcnt++;
549 }
550
551 if (mcnt)
552 rxfilt |= RE_RXCFG_RX_MULTI;
553 else
554 rxfilt &= ~RE_RXCFG_RX_MULTI;
555
556 CSR_WRITE_4(sc, RE_RXCFG, rxfilt);
557 CSR_WRITE_4(sc, RE_MAR0, hashes[0]);
558 CSR_WRITE_4(sc, RE_MAR4, hashes[1]);
559}
560
561static void
562re_reset(struct re_softc *sc)
563{
564 int i;
565
566 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_RESET);
567
568 for (i = 0; i < RE_TIMEOUT; i++) {
569 DELAY(10);
570 if ((CSR_READ_1(sc, RE_COMMAND) & RE_CMD_RESET) == 0)
571 break;
572 }
573 if (i == RE_TIMEOUT)
574 if_printf(&sc->arpcom.ac_if, "reset never completed!\n");
575
576 CSR_WRITE_1(sc, 0x82, 1);
577}
578
579/*
580 * The following routine is designed to test for a defect on some
581 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64#
582 * lines connected to the bus, however for a 32-bit only card, they
583 * should be pulled high. The result of this defect is that the
584 * NIC will not work right if you plug it into a 64-bit slot: DMA
585 * operations will be done with 64-bit transfers, which will fail
586 * because the 64-bit data lines aren't connected.
587 *
588 * There's no way to work around this (short of talking a soldering
589 * iron to the board), however we can detect it. The method we use
590 * here is to put the NIC into digital loopback mode, set the receiver
591 * to promiscuous mode, and then try to send a frame. We then compare
592 * the frame data we sent to what was received. If the data matches,
593 * then the NIC is working correctly, otherwise we know the user has
594 * a defective NIC which has been mistakenly plugged into a 64-bit PCI
595 * slot. In the latter case, there's no way the NIC can work correctly,
596 * so we print out a message on the console and abort the device attach.
597 */
598
599static int
600re_diag(struct re_softc *sc)
601{
602 struct ifnet *ifp = &sc->arpcom.ac_if;
603 struct mbuf *m0;
604 struct ether_header *eh;
605 struct re_desc *cur_rx;
606 uint16_t status;
607 uint32_t rxstat;
608 int total_len, i, error = 0;
609 uint8_t dst[ETHER_ADDR_LEN] = { 0x00, 'h', 'e', 'l', 'l', 'o' };
610 uint8_t src[ETHER_ADDR_LEN] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
611
612 /* Allocate a single mbuf */
613
614 MGETHDR(m0, MB_DONTWAIT, MT_DATA);
615 if (m0 == NULL)
616 return(ENOBUFS);
617
618 /*
619 * Initialize the NIC in test mode. This sets the chip up
620 * so that it can send and receive frames, but performs the
621 * following special functions:
622 * - Puts receiver in promiscuous mode
623 * - Enables digital loopback mode
624 * - Leaves interrupts turned off
625 */
626
627 ifp->if_flags |= IFF_PROMISC;
628 sc->re_testmode = 1;
629 re_init(sc);
630 re_stop(sc);
631 DELAY(100000);
632 re_init(sc);
633
634 /* Put some data in the mbuf */
635
636 eh = mtod(m0, struct ether_header *);
637 bcopy (dst, eh->ether_dhost, ETHER_ADDR_LEN);
638 bcopy (src, eh->ether_shost, ETHER_ADDR_LEN);
639 eh->ether_type = htons(ETHERTYPE_IP);
640 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN;
641
642 /*
643 * Queue the packet, start transmission.
4d723e5a 644 * Note: ifq_handoff() ultimately calls re_start() for us.
af51229a
JS
645 */
646
647 CSR_WRITE_2(sc, RE_ISR, 0xFFFF);
4d723e5a
JS
648 error = ifq_handoff(ifp, m0, NULL);
649 if (error) {
650 m0 = NULL;
651 goto done;
652 }
af51229a
JS
653 m0 = NULL;
654
655 /* Wait for it to propagate through the chip */
656
657 DELAY(100000);
658 for (i = 0; i < RE_TIMEOUT; i++) {
659 status = CSR_READ_2(sc, RE_ISR);
660 if ((status & (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK)) ==
661 (RE_ISR_TIMEOUT_EXPIRED|RE_ISR_RX_OK))
662 break;
663 DELAY(10);
664 }
665
666 if (i == RE_TIMEOUT) {
667 if_printf(ifp, "diagnostic failed to receive packet "
668 "in loopback mode\n");
669 error = EIO;
670 goto done;
671 }
672
673 /*
674 * The packet should have been dumped into the first
675 * entry in the RX DMA ring. Grab it from there.
676 */
677
678 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag,
679 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD);
680 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0],
681 BUS_DMASYNC_POSTWRITE);
682 bus_dmamap_unload(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[0]);
683
684 m0 = sc->re_ldata.re_rx_mbuf[0];
685 sc->re_ldata.re_rx_mbuf[0] = NULL;
686 eh = mtod(m0, struct ether_header *);
687
688 cur_rx = &sc->re_ldata.re_rx_list[0];
689 total_len = RE_RXBYTES(cur_rx);
690 rxstat = le32toh(cur_rx->re_cmdstat);
691
692 if (total_len != ETHER_MIN_LEN) {
693 if_printf(ifp, "diagnostic failed, received short packet\n");
694 error = EIO;
695 goto done;
696 }
697
698 /* Test that the received packet data matches what we sent. */
699
700 if (bcmp(eh->ether_dhost, dst, ETHER_ADDR_LEN) ||
701 bcmp(eh->ether_shost, &src, ETHER_ADDR_LEN) ||
702 be16toh(eh->ether_type) != ETHERTYPE_IP) {
703 if_printf(ifp, "WARNING, DMA FAILURE!\n");
704 if_printf(ifp, "expected TX data: %6D/%6D/0x%x\n",
705 dst, ":", src, ":", ETHERTYPE_IP);
706 if_printf(ifp, "received RX data: %6D/%6D/0x%x\n",
707 eh->ether_dhost, ":", eh->ether_shost, ":",
708 ntohs(eh->ether_type));
709 if_printf(ifp, "You may have a defective 32-bit NIC plugged "
710 "into a 64-bit PCI slot.\n");
711 if_printf(ifp, "Please re-install the NIC in a 32-bit slot "
712 "for proper operation.\n");
713 if_printf(ifp, "Read the re(4) man page for more details.\n");
714 error = EIO;
715 }
716
717done:
718 /* Turn interface off, release resources */
719
720 sc->re_testmode = 0;
721 ifp->if_flags &= ~IFF_PROMISC;
722 re_stop(sc);
723 if (m0 != NULL)
724 m_freem(m0);
725
726 return (error);
727}
728
729/*
730 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device
731 * IDs against our list and return a device name if we find a match.
732 */
733static int
734re_probe(device_t dev)
735{
736 struct re_type *t;
737 struct re_softc *sc;
738 int rid;
739 uint32_t hwrev;
740 uint16_t vendor, product;
741
742 t = re_devs;
743
744 vendor = pci_get_vendor(dev);
745 product = pci_get_device(dev);
746
5fdf38d0
SZ
747 /*
748 * Only attach to rev.3 of the Linksys EG1032 adapter.
749 * Rev.2 is supported by sk(4).
750 */
751 if (vendor == PCI_VENDOR_LINKSYS &&
752 product == PCI_PRODUCT_LINKSYS_EG1032 &&
753 pci_get_subdevice(dev) != PCI_SUBDEVICE_LINKSYS_EG1032_REV3)
754 return ENXIO;
755
af51229a
JS
756 for (t = re_devs; t->re_name != NULL; t++) {
757 if (product == t->re_did && vendor == t->re_vid)
758 break;
759 }
760
761 /*
762 * Check if we found a RealTek device.
763 */
764 if (t->re_name == NULL)
765 return(ENXIO);
766
767 /*
768 * Temporarily map the I/O space so we can read the chip ID register.
769 */
efda3bd0 770 sc = kmalloc(sizeof(*sc), M_TEMP, M_WAITOK | M_ZERO);
ecd80f47
JS
771 rid = RE_PCI_LOIO;
772 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
af51229a
JS
773 RF_ACTIVE);
774 if (sc->re_res == NULL) {
775 device_printf(dev, "couldn't map ports/memory\n");
efda3bd0 776 kfree(sc, M_TEMP);
af51229a
JS
777 return(ENXIO);
778 }
779
780 sc->re_btag = rman_get_bustag(sc->re_res);
781 sc->re_bhandle = rman_get_bushandle(sc->re_res);
782
783 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV;
ecd80f47 784 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO, sc->re_res);
efda3bd0 785 kfree(sc, M_TEMP);
af51229a
JS
786
787 /*
788 * and continue matching for the specific chip...
789 */
790 for (; t->re_name != NULL; t++) {
791 if (product == t->re_did && vendor == t->re_vid &&
792 t->re_basetype == hwrev) {
793 device_set_desc(dev, t->re_name);
794 return(0);
795 }
796 }
797 return(ENXIO);
798}
799
800/*
801 * This routine takes the segment list provided as the result of
802 * a bus_dma_map_load() operation and assigns the addresses/lengths
803 * to RealTek DMA descriptors. This can be called either by the RX
804 * code or the TX code. In the RX case, we'll probably wind up mapping
805 * at most one segment. For the TX case, there could be any number of
806 * segments since TX packets may span multiple mbufs. In either case,
807 * if the number of segments is larger than the re_maxsegs limit
808 * specified by the caller, we abort the mapping operation. Sadly,
809 * whoever designed the buffer mapping API did not provide a way to
810 * return an error from here, so we have to fake it a bit.
811 */
812
813static void
814re_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg,
815 bus_size_t mapsize, int error)
816{
817 struct re_dmaload_arg *ctx;
818 struct re_desc *d = NULL;
819 int i = 0, idx;
820 uint32_t cmdstat;
821
822 if (error)
823 return;
824
825 ctx = arg;
826
827 /* Signal error to caller if there's too many segments */
828 if (nseg > ctx->re_maxsegs) {
829 ctx->re_maxsegs = 0;
830 return;
831 }
832
833 /*
834 * Map the segment array into descriptors. Note that we set the
835 * start-of-frame and end-of-frame markers for either TX or RX, but
836 * they really only have meaning in the TX case. (In the RX case,
837 * it's the chip that tells us where packets begin and end.)
838 * We also keep track of the end of the ring and set the
839 * end-of-ring bits as needed, and we set the ownership bits
840 * in all except the very first descriptor. (The caller will
841 * set this descriptor later when it start transmission or
842 * reception.)
843 */
844 idx = ctx->re_idx;
845 for (;;) {
846 d = &ctx->re_ring[idx];
847 if (le32toh(d->re_cmdstat) & RE_RDESC_STAT_OWN) {
848 ctx->re_maxsegs = 0;
849 return;
850 }
851 cmdstat = segs[i].ds_len;
852 d->re_bufaddr_lo = htole32(RE_ADDR_LO(segs[i].ds_addr));
853 d->re_bufaddr_hi = htole32(RE_ADDR_HI(segs[i].ds_addr));
854 if (i == 0)
855 cmdstat |= RE_TDESC_CMD_SOF;
856 else
857 cmdstat |= RE_TDESC_CMD_OWN;
858 if (idx == (RE_RX_DESC_CNT - 1))
859 cmdstat |= RE_TDESC_CMD_EOR;
860 d->re_cmdstat = htole32(cmdstat | ctx->re_flags);
861 i++;
862 if (i == nseg)
863 break;
864 RE_DESC_INC(idx);
865 }
866
867 d->re_cmdstat |= htole32(RE_TDESC_CMD_EOF);
868 ctx->re_maxsegs = nseg;
869 ctx->re_idx = idx;
870}
871
872/*
873 * Map a single buffer address.
874 */
875
876static void
877re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
878{
879 uint32_t *addr;
880
881 if (error)
882 return;
883
884 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
885 addr = arg;
886 *addr = segs->ds_addr;
887}
888
889static int
890re_allocmem(device_t dev, struct re_softc *sc)
891{
892 int error, i, nseg;
893
894 /*
895 * Allocate map for RX mbufs.
896 */
897 nseg = 32;
898 error = bus_dma_tag_create(sc->re_parent_tag, ETHER_ALIGN, 0,
899 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
900 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW,
901 &sc->re_ldata.re_mtag);
902 if (error) {
903 device_printf(dev, "could not allocate dma tag\n");
904 return(error);
905 }
906
907 /*
908 * Allocate map for TX descriptor list.
909 */
910 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN,
911 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
912 NULL, RE_TX_LIST_SZ, 1, RE_TX_LIST_SZ, BUS_DMA_ALLOCNOW,
913 &sc->re_ldata.re_tx_list_tag);
914 if (error) {
915 device_printf(dev, "could not allocate dma tag\n");
916 return(error);
917 }
918
919 /* Allocate DMA'able memory for the TX ring */
920
921 error = bus_dmamem_alloc(sc->re_ldata.re_tx_list_tag,
922 (void **)&sc->re_ldata.re_tx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO,
923 &sc->re_ldata.re_tx_list_map);
924 if (error) {
925 device_printf(dev, "could not allocate TX ring\n");
926 return(error);
927 }
928
929 /* Load the map for the TX ring. */
930
931 error = bus_dmamap_load(sc->re_ldata.re_tx_list_tag,
932 sc->re_ldata.re_tx_list_map, sc->re_ldata.re_tx_list,
933 RE_TX_LIST_SZ, re_dma_map_addr,
934 &sc->re_ldata.re_tx_list_addr, BUS_DMA_NOWAIT);
935 if (error) {
936 device_printf(dev, "could not get addres of TX ring\n");
937 return(error);
938 }
939
940 /* Create DMA maps for TX buffers */
941
942 for (i = 0; i < RE_TX_DESC_CNT; i++) {
943 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0,
944 &sc->re_ldata.re_tx_dmamap[i]);
945 if (error) {
946 device_printf(dev, "can't create DMA map for TX\n");
947 return(error);
948 }
949 }
950
951 /*
952 * Allocate map for RX descriptor list.
953 */
954 error = bus_dma_tag_create(sc->re_parent_tag, RE_RING_ALIGN,
955 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL,
7b4b20cd 956 NULL, RE_RX_LIST_SZ, 1, RE_RX_LIST_SZ, BUS_DMA_ALLOCNOW,
af51229a
JS
957 &sc->re_ldata.re_rx_list_tag);
958 if (error) {
959 device_printf(dev, "could not allocate dma tag\n");
960 return(error);
961 }
962
963 /* Allocate DMA'able memory for the RX ring */
964
965 error = bus_dmamem_alloc(sc->re_ldata.re_rx_list_tag,
966 (void **)&sc->re_ldata.re_rx_list, BUS_DMA_WAITOK | BUS_DMA_ZERO,
967 &sc->re_ldata.re_rx_list_map);
968 if (error) {
969 device_printf(dev, "could not allocate RX ring\n");
970 return(error);
971 }
972
973 /* Load the map for the RX ring. */
974
975 error = bus_dmamap_load(sc->re_ldata.re_rx_list_tag,
976 sc->re_ldata.re_rx_list_map, sc->re_ldata.re_rx_list,
7b4b20cd 977 RE_RX_LIST_SZ, re_dma_map_addr,
af51229a
JS
978 &sc->re_ldata.re_rx_list_addr, BUS_DMA_NOWAIT);
979 if (error) {
980 device_printf(dev, "could not get address of RX ring\n");
981 return(error);
982 }
983
984 /* Create DMA maps for RX buffers */
985
986 for (i = 0; i < RE_RX_DESC_CNT; i++) {
987 error = bus_dmamap_create(sc->re_ldata.re_mtag, 0,
988 &sc->re_ldata.re_rx_dmamap[i]);
989 if (error) {
990 device_printf(dev, "can't create DMA map for RX\n");
991 return(ENOMEM);
992 }
993 }
994
995 return(0);
996}
997
998/*
999 * Attach the interface. Allocate softc structures, do ifmedia
1000 * setup and ethernet/BPF attach.
1001 */
1002static int
1003re_attach(device_t dev)
1004{
1005 struct re_softc *sc = device_get_softc(dev);
1006 struct ifnet *ifp;
1007 struct re_hwrev *hw_rev;
1008 uint8_t eaddr[ETHER_ADDR_LEN];
1009 int hwrev;
1010 u_int16_t re_did = 0;
1011 int error = 0, rid, i;
1012
1013 callout_init(&sc->re_timer);
1014
1015#ifndef BURN_BRIDGES
1016 /*
1017 * Handle power management nonsense.
1018 */
1019
1020 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1021 uint32_t membase, irq;
1022
1023 /* Save important PCI config data. */
1024 membase = pci_read_config(dev, RE_PCI_LOMEM, 4);
1025 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1026
1027 /* Reset the power state. */
1028 device_printf(dev, "chip is is in D%d power mode "
1029 "-- setting to D0\n", pci_get_powerstate(dev));
1030
1031 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1032
1033 /* Restore PCI config data. */
1034 pci_write_config(dev, RE_PCI_LOMEM, membase, 4);
1035 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1036 }
1037#endif
1038 /*
1039 * Map control/status registers.
1040 */
1041 pci_enable_busmaster(dev);
1042
ecd80f47
JS
1043 rid = RE_PCI_LOIO;
1044 sc->re_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
af51229a
JS
1045 RF_ACTIVE);
1046
1047 if (sc->re_res == NULL) {
1048 device_printf(dev, "couldn't map ports/memory\n");
1049 error = ENXIO;
1050 goto fail;
1051 }
1052
1053 sc->re_btag = rman_get_bustag(sc->re_res);
1054 sc->re_bhandle = rman_get_bushandle(sc->re_res);
1055
1056 /* Allocate interrupt */
1057 rid = 0;
1058 sc->re_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1059 RF_SHAREABLE | RF_ACTIVE);
1060
1061 if (sc->re_irq == NULL) {
1062 device_printf(dev, "couldn't map interrupt\n");
1063 error = ENXIO;
1064 goto fail;
1065 }
1066
1067 /* Reset the adapter. */
1068 re_reset(sc);
1069
1070 hwrev = CSR_READ_4(sc, RE_TXCFG) & RE_TXCFG_HWREV;
1071 for (hw_rev = re_hwrevs; hw_rev->re_desc != NULL; hw_rev++) {
1072 if (hw_rev->re_rev == hwrev) {
1073 sc->re_type = hw_rev->re_type;
1074 break;
1075 }
1076 }
1077
1078 if (sc->re_type == RE_8169) {
1079 /* Set RX length mask */
1080 sc->re_rxlenmask = RE_RDESC_STAT_GFRAGLEN;
1081
1082 /* Force station address autoload from the EEPROM */
1083 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_AUTOLOAD);
1084 for (i = 0; i < RE_TIMEOUT; i++) {
1085 if ((CSR_READ_1(sc, RE_EECMD) & RE_EEMODE_AUTOLOAD) == 0)
1086 break;
1087 DELAY(100);
1088 }
1089 if (i == RE_TIMEOUT)
1090 device_printf(dev, "eeprom autoload timed out\n");
1091
1092 for (i = 0; i < ETHER_ADDR_LEN; i++)
1093 eaddr[i] = CSR_READ_1(sc, RE_IDR0 + i);
1094 } else {
1095 uint16_t as[3];
1096
1097 /* Set RX length mask */
1098 sc->re_rxlenmask = RE_RDESC_STAT_FRAGLEN;
1099
1100 sc->re_eecmd_read = RE_EECMD_READ_6BIT;
1101 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0);
1102 if (re_did != 0x8129)
1103 sc->re_eecmd_read = RE_EECMD_READ_8BIT;
1104
1105 /*
1106 * Get station address from the EEPROM.
1107 */
1108 re_read_eeprom(sc, (caddr_t)as, RE_EE_EADDR, 3, 0);
1109 for (i = 0; i < 3; i++) {
1110 eaddr[(i * 2) + 0] = as[i] & 0xff;
1111 eaddr[(i * 2) + 1] = as[i] >> 8;
1112 }
1113 }
1114
1115 /*
1116 * Allocate the parent bus DMA tag appropriate for PCI.
1117 */
1118#define RE_NSEG_NEW 32
1119 error = bus_dma_tag_create(NULL, /* parent */
1120 1, 0, /* alignment, boundary */
1121 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
1122 BUS_SPACE_MAXADDR, /* highaddr */
1123 NULL, NULL, /* filter, filterarg */
1124 MAXBSIZE, RE_NSEG_NEW, /* maxsize, nsegments */
1125 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
1126 BUS_DMA_ALLOCNOW, /* flags */
1127 &sc->re_parent_tag);
1128 if (error)
1129 goto fail;
1130
1131 error = re_allocmem(dev, sc);
1132
1133 if (error)
1134 goto fail;
1135
1136 /* Do MII setup */
1137 if (mii_phy_probe(dev, &sc->re_miibus,
1138 re_ifmedia_upd, re_ifmedia_sts)) {
1139 device_printf(dev, "MII without any phy!\n");
1140 error = ENXIO;
1141 goto fail;
1142 }
1143
1144 ifp = &sc->arpcom.ac_if;
1145 ifp->if_softc = sc;
1146 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1147 ifp->if_mtu = ETHERMTU;
1148 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1149 ifp->if_ioctl = re_ioctl;
af51229a
JS
1150 ifp->if_capabilities = IFCAP_VLAN_MTU;
1151 ifp->if_start = re_start;
af51229a
JS
1152 ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING;
1153#ifdef DEVICE_POLLING
9c095379 1154 ifp->if_poll = re_poll;
af51229a
JS
1155#endif
1156 ifp->if_watchdog = re_watchdog;
1157 ifp->if_init = re_init;
1158 if (sc->re_type == RE_8169)
1159 ifp->if_baudrate = 1000000000;
1160 else
1161 ifp->if_baudrate = 100000000;
000181b2
JS
1162 ifq_set_maxlen(&ifp->if_snd, RE_IFQ_MAXLEN);
1163 ifq_set_ready(&ifp->if_snd);
8497ad23
MD
1164#ifdef RE_DISABLE_HWCSUM
1165 ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM;
1166 ifp->if_hwassist = 0;
1167#else
af51229a 1168 ifp->if_capenable = ifp->if_capabilities;
8497ad23
MD
1169 ifp->if_hwassist = RE_CSUM_FEATURES;
1170#endif
af51229a 1171
af51229a
JS
1172 /*
1173 * Call MI attach routine.
1174 */
78195a76 1175 ether_ifattach(ifp, eaddr, NULL);
af51229a 1176
78195a76 1177 lwkt_serialize_enter(ifp->if_serializer);
af51229a
JS
1178 /* Perform hardware diagnostic. */
1179 error = re_diag(sc);
78195a76 1180 lwkt_serialize_exit(ifp->if_serializer);
af51229a
JS
1181
1182 if (error) {
1183 device_printf(dev, "hardware diagnostic failure\n");
1184 ether_ifdetach(ifp);
1185 goto fail;
1186 }
1187
1188 /* Hook interrupt last to avoid having to lock softc */
78195a76
MD
1189 error = bus_setup_intr(dev, sc->re_irq, INTR_NETSAFE, re_intr, sc,
1190 &sc->re_intrhand, ifp->if_serializer);
af51229a
JS
1191
1192 if (error) {
1193 device_printf(dev, "couldn't set up irq\n");
1194 ether_ifdetach(ifp);
1195 goto fail;
1196 }
1197
1198fail:
1199 if (error)
1200 re_detach(dev);
1201
1202 return (error);
1203}
1204
1205/*
1206 * Shutdown hardware and free up resources. This can be called any
1207 * time after the mutex has been initialized. It is called in both
1208 * the error case in attach and the normal detach case so it needs
1209 * to be careful about only freeing resources that have actually been
1210 * allocated.
1211 */
1212static int
1213re_detach(device_t dev)
1214{
1215 struct re_softc *sc = device_get_softc(dev);
1216 struct ifnet *ifp = &sc->arpcom.ac_if;
f0ee8b5b 1217 int i;
af51229a 1218
af51229a
JS
1219 /* These should only be active if attach succeeded */
1220 if (device_is_attached(dev)) {
cdf89432 1221 lwkt_serialize_enter(ifp->if_serializer);
af51229a 1222 re_stop(sc);
cdf89432
SZ
1223 bus_teardown_intr(dev, sc->re_irq, sc->re_intrhand);
1224 lwkt_serialize_exit(ifp->if_serializer);
1225
af51229a
JS
1226 ether_ifdetach(ifp);
1227 }
1228 if (sc->re_miibus)
1229 device_delete_child(dev, sc->re_miibus);
1230 bus_generic_detach(dev);
1231
af51229a
JS
1232 if (sc->re_irq)
1233 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->re_irq);
cdf89432 1234 if (sc->re_res) {
6ffa456d 1235 bus_release_resource(dev, SYS_RES_IOPORT, RE_PCI_LOIO,
af51229a 1236 sc->re_res);
cdf89432 1237 }
af51229a
JS
1238
1239 /* Unload and free the RX DMA ring memory and map */
1240
1241 if (sc->re_ldata.re_rx_list_tag) {
1242 bus_dmamap_unload(sc->re_ldata.re_rx_list_tag,
1243 sc->re_ldata.re_rx_list_map);
1244 bus_dmamem_free(sc->re_ldata.re_rx_list_tag,
1245 sc->re_ldata.re_rx_list,
1246 sc->re_ldata.re_rx_list_map);
1247 bus_dma_tag_destroy(sc->re_ldata.re_rx_list_tag);
1248 }
1249
1250 /* Unload and free the TX DMA ring memory and map */
1251
1252 if (sc->re_ldata.re_tx_list_tag) {
1253 bus_dmamap_unload(sc->re_ldata.re_tx_list_tag,
1254 sc->re_ldata.re_tx_list_map);
1255 bus_dmamem_free(sc->re_ldata.re_tx_list_tag,
1256 sc->re_ldata.re_tx_list,
1257 sc->re_ldata.re_tx_list_map);
1258 bus_dma_tag_destroy(sc->re_ldata.re_tx_list_tag);
1259 }
1260
1261 /* Destroy all the RX and TX buffer maps */
1262
1263 if (sc->re_ldata.re_mtag) {
1264 for (i = 0; i < RE_TX_DESC_CNT; i++)
1265 bus_dmamap_destroy(sc->re_ldata.re_mtag,
1266 sc->re_ldata.re_tx_dmamap[i]);
1267 for (i = 0; i < RE_RX_DESC_CNT; i++)
1268 bus_dmamap_destroy(sc->re_ldata.re_mtag,
1269 sc->re_ldata.re_rx_dmamap[i]);
1270 bus_dma_tag_destroy(sc->re_ldata.re_mtag);
1271 }
1272
1273 /* Unload and free the stats buffer and map */
1274
1275 if (sc->re_ldata.re_stag) {
1276 bus_dmamap_unload(sc->re_ldata.re_stag,
1277 sc->re_ldata.re_rx_list_map);
1278 bus_dmamem_free(sc->re_ldata.re_stag,
1279 sc->re_ldata.re_stats,
1280 sc->re_ldata.re_smap);
1281 bus_dma_tag_destroy(sc->re_ldata.re_stag);
1282 }
1283
1284 if (sc->re_parent_tag)
1285 bus_dma_tag_destroy(sc->re_parent_tag);
1286
af51229a
JS
1287 return(0);
1288}
1289
1290static int
1291re_newbuf(struct re_softc *sc, int idx, struct mbuf *m)
1292{
1293 struct re_dmaload_arg arg;
1294 struct mbuf *n = NULL;
1295 int error;
1296
1297 if (m == NULL) {
1298 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1299 if (n == NULL)
1300 return(ENOBUFS);
1301 m = n;
1302 } else
1303 m->m_data = m->m_ext.ext_buf;
1304
1305 /*
1306 * Initialize mbuf length fields and fixup
1307 * alignment so that the frame payload is
1308 * longword aligned.
1309 */
1310 m->m_len = m->m_pkthdr.len = MCLBYTES;
1311 m_adj(m, ETHER_ALIGN);
1312
1313 arg.sc = sc;
1314 arg.re_idx = idx;
1315 arg.re_maxsegs = 1;
1316 arg.re_flags = 0;
1317 arg.re_ring = sc->re_ldata.re_rx_list;
1318
1319 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag,
1320 sc->re_ldata.re_rx_dmamap[idx], m, re_dma_map_desc,
1321 &arg, BUS_DMA_NOWAIT);
1322 if (error || arg.re_maxsegs != 1) {
1323 if (n != NULL)
1324 m_freem(n);
1325 return (ENOMEM);
1326 }
1327
1328 sc->re_ldata.re_rx_list[idx].re_cmdstat |= htole32(RE_RDESC_CMD_OWN);
1329 sc->re_ldata.re_rx_mbuf[idx] = m;
1330
1331 bus_dmamap_sync(sc->re_ldata.re_mtag, sc->re_ldata.re_rx_dmamap[idx],
1332 BUS_DMASYNC_PREREAD);
1333
1334 return(0);
1335}
1336
1337static int
1338re_tx_list_init(struct re_softc *sc)
1339{
1340 bzero(sc->re_ldata.re_tx_list, RE_TX_LIST_SZ);
1341 bzero(&sc->re_ldata.re_tx_mbuf, RE_TX_DESC_CNT * sizeof(struct mbuf *));
1342
1343 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag,
1344 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE);
1345 sc->re_ldata.re_tx_prodidx = 0;
1346 sc->re_ldata.re_tx_considx = 0;
1347 sc->re_ldata.re_tx_free = RE_TX_DESC_CNT;
1348
1349 return(0);
1350}
1351
1352static int
1353re_rx_list_init(struct re_softc *sc)
1354{
1355 int i, error;
1356
1357 bzero(sc->re_ldata.re_rx_list, RE_RX_LIST_SZ);
1358 bzero(&sc->re_ldata.re_rx_mbuf, RE_RX_DESC_CNT * sizeof(struct mbuf *));
1359
1360 for (i = 0; i < RE_RX_DESC_CNT; i++) {
1361 error = re_newbuf(sc, i, NULL);
1362 if (error)
1363 return(error);
1364 }
1365
1366 /* Flush the RX descriptors */
1367
1368 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag,
7b4b20cd 1369 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE);
af51229a
JS
1370
1371 sc->re_ldata.re_rx_prodidx = 0;
1372 sc->re_head = sc->re_tail = NULL;
1373
1374 return(0);
1375}
1376
1377/*
1378 * RX handler for C+ and 8169. For the gigE chips, we support
1379 * the reception of jumbo frames that have been fragmented
1380 * across multiple 2K mbuf cluster buffers.
1381 */
1382static void
1383re_rxeof(struct re_softc *sc)
1384{
1385 struct ifnet *ifp = &sc->arpcom.ac_if;
1386 struct mbuf *m;
1387 struct re_desc *cur_rx;
af51229a
JS
1388 uint32_t rxstat, rxvlan;
1389 int i, total_len;
1390
1391 /* Invalidate the descriptor memory */
1392
1393 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag,
1394 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_POSTREAD);
1395
1396 for (i = sc->re_ldata.re_rx_prodidx;
1397 RE_OWN(&sc->re_ldata.re_rx_list[i]) == 0 ; RE_DESC_INC(i)) {
1398 cur_rx = &sc->re_ldata.re_rx_list[i];
1399 m = sc->re_ldata.re_rx_mbuf[i];
1400 total_len = RE_RXBYTES(cur_rx);
1401 rxstat = le32toh(cur_rx->re_cmdstat);
1402 rxvlan = le32toh(cur_rx->re_vlanctl);
1403
1404 /* Invalidate the RX mbuf and unload its map */
1405
1406 bus_dmamap_sync(sc->re_ldata.re_mtag,
1407 sc->re_ldata.re_rx_dmamap[i],
1408 BUS_DMASYNC_POSTWRITE);
1409 bus_dmamap_unload(sc->re_ldata.re_mtag,
1410 sc->re_ldata.re_rx_dmamap[i]);
1411
1412 if ((rxstat & RE_RDESC_STAT_EOF) == 0) {
1413 m->m_len = MCLBYTES - ETHER_ALIGN;
1414 if (sc->re_head == NULL) {
1415 sc->re_head = sc->re_tail = m;
1416 } else {
af51229a
JS
1417 sc->re_tail->m_next = m;
1418 sc->re_tail = m;
1419 }
1420 re_newbuf(sc, i, NULL);
1421 continue;
1422 }
1423
1424 /*
1425 * NOTE: for the 8139C+, the frame length field
1426 * is always 12 bits in size, but for the gigE chips,
1427 * it is 13 bits (since the max RX frame length is 16K).
1428 * Unfortunately, all 32 bits in the status word
1429 * were already used, so to make room for the extra
1430 * length bit, RealTek took out the 'frame alignment
1431 * error' bit and shifted the other status bits
1432 * over one slot. The OWN, EOR, FS and LS bits are
1433 * still in the same places. We have already extracted
1434 * the frame length and checked the OWN bit, so rather
1435 * than using an alternate bit mapping, we shift the
1436 * status bits one space to the right so we can evaluate
1437 * them using the 8169 status as though it was in the
1438 * same format as that of the 8139C+.
1439 */
1440 if (sc->re_type == RE_8169)
1441 rxstat >>= 1;
1442
1443 if (rxstat & RE_RDESC_STAT_RXERRSUM) {
1444 ifp->if_ierrors++;
1445 /*
1446 * If this is part of a multi-fragment packet,
1447 * discard all the pieces.
1448 */
1449 if (sc->re_head != NULL) {
1450 m_freem(sc->re_head);
1451 sc->re_head = sc->re_tail = NULL;
1452 }
1453 re_newbuf(sc, i, m);
1454 continue;
1455 }
1456
1457 /*
1458 * If allocating a replacement mbuf fails,
1459 * reload the current one.
1460 */
1461
1462 if (re_newbuf(sc, i, NULL)) {
1463 ifp->if_ierrors++;
1464 if (sc->re_head != NULL) {
1465 m_freem(sc->re_head);
1466 sc->re_head = sc->re_tail = NULL;
1467 }
1468 re_newbuf(sc, i, m);
1469 continue;
1470 }
1471
af51229a
JS
1472 if (sc->re_head != NULL) {
1473 m->m_len = total_len % (MCLBYTES - ETHER_ALIGN);
1474 /*
1475 * Special case: if there's 4 bytes or less
1476 * in this buffer, the mbuf can be discarded:
1477 * the last 4 bytes is the CRC, which we don't
1478 * care about anyway.
1479 */
1480 if (m->m_len <= ETHER_CRC_LEN) {
1481 sc->re_tail->m_len -=
1482 (ETHER_CRC_LEN - m->m_len);
1483 m_freem(m);
1484 } else {
1485 m->m_len -= ETHER_CRC_LEN;
af51229a
JS
1486 sc->re_tail->m_next = m;
1487 }
1488 m = sc->re_head;
1489 sc->re_head = sc->re_tail = NULL;
1490 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1491 } else
1492 m->m_pkthdr.len = m->m_len =
1493 (total_len - ETHER_CRC_LEN);
1494
1495 ifp->if_ipackets++;
af51229a 1496 m->m_pkthdr.rcvif = ifp;
af51229a
JS
1497
1498 /* Do RX checksumming if enabled */
1499
1500 if (ifp->if_capenable & IFCAP_RXCSUM) {
1501
1502 /* Check IP header checksum */
1503 if (rxstat & RE_RDESC_STAT_PROTOID)
1504 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1505 if ((rxstat & RE_RDESC_STAT_IPSUMBAD) == 0)
1506 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1507
1508 /* Check TCP/UDP checksum */
1509 if ((RE_TCPPKT(rxstat) &&
1510 (rxstat & RE_RDESC_STAT_TCPSUMBAD) == 0) ||
1511 (RE_UDPPKT(rxstat) &&
1512 (rxstat & RE_RDESC_STAT_UDPSUMBAD)) == 0) {
1513 m->m_pkthdr.csum_flags |=
1514 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1515 m->m_pkthdr.csum_data = 0xffff;
1516 }
1517 }
1518
78195a76 1519 if (rxvlan & RE_RDESC_VLANCTL_TAG) {
3013ac0e
JS
1520 VLAN_INPUT_TAG(m,
1521 be16toh((rxvlan & RE_RDESC_VLANCTL_DATA)));
78195a76
MD
1522 } else {
1523 ifp->if_input(ifp, m);
1524 }
af51229a
JS
1525 }
1526
1527 /* Flush the RX DMA ring */
1528
1529 bus_dmamap_sync(sc->re_ldata.re_rx_list_tag,
7b4b20cd 1530 sc->re_ldata.re_rx_list_map, BUS_DMASYNC_PREWRITE);
af51229a
JS
1531
1532 sc->re_ldata.re_rx_prodidx = i;
1533}
1534
1535static void
1536re_txeof(struct re_softc *sc)
1537{
1538 struct ifnet *ifp = &sc->arpcom.ac_if;
1539 uint32_t txstat;
1540 int idx;
1541
1542 /* Invalidate the TX descriptor list */
1543
1544 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag,
7b4b20cd 1545 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_POSTREAD);
af51229a
JS
1546
1547 for (idx = sc->re_ldata.re_tx_considx;
1548 idx != sc->re_ldata.re_tx_prodidx; RE_DESC_INC(idx)) {
1549 txstat = le32toh(sc->re_ldata.re_tx_list[idx].re_cmdstat);
1550 if (txstat & RE_TDESC_CMD_OWN)
1551 break;
1552
1553 /*
1554 * We only stash mbufs in the last descriptor
1555 * in a fragment chain, which also happens to
1556 * be the only place where the TX status bits
1557 * are valid.
1558 */
1559 if (txstat & RE_TDESC_CMD_EOF) {
1560 m_freem(sc->re_ldata.re_tx_mbuf[idx]);
1561 sc->re_ldata.re_tx_mbuf[idx] = NULL;
1562 bus_dmamap_unload(sc->re_ldata.re_mtag,
1563 sc->re_ldata.re_tx_dmamap[idx]);
1564 if (txstat & (RE_TDESC_STAT_EXCESSCOL|
1565 RE_TDESC_STAT_COLCNT))
1566 ifp->if_collisions++;
1567 if (txstat & RE_TDESC_STAT_TXERRSUM)
1568 ifp->if_oerrors++;
1569 else
1570 ifp->if_opackets++;
1571 }
1572 sc->re_ldata.re_tx_free++;
1573 }
1574
1575 /* No changes made to the TX ring, so no flush needed */
1576 if (idx != sc->re_ldata.re_tx_considx) {
1577 sc->re_ldata.re_tx_considx = idx;
1578 ifp->if_flags &= ~IFF_OACTIVE;
1579 ifp->if_timer = 0;
1580 }
1581
1582 /*
1583 * If not all descriptors have been released reaped yet,
1584 * reload the timer so that we will eventually get another
1585 * interrupt that will cause us to re-enter this routine.
1586 * This is done in case the transmitter has gone idle.
1587 */
1588 if (sc->re_ldata.re_tx_free != RE_TX_DESC_CNT)
1589 CSR_WRITE_4(sc, RE_TIMERCNT, 1);
1590}
1591
1592static void
1593re_tick(void *xsc)
1594{
1595 struct re_softc *sc = xsc;
af51229a 1596
78195a76
MD
1597 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
1598 re_tick_serialized(xsc);
1599 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
1600}
1601
1602static void
1603re_tick_serialized(void *xsc)
1604{
1605 struct re_softc *sc = xsc;
1606 struct mii_data *mii;
af51229a
JS
1607
1608 mii = device_get_softc(sc->re_miibus);
1609 mii_tick(mii);
1610
1611 callout_reset(&sc->re_timer, hz, re_tick, sc);
af51229a
JS
1612}
1613
1614#ifdef DEVICE_POLLING
9c095379 1615
af51229a
JS
1616static void
1617re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1618{
1619 struct re_softc *sc = ifp->if_softc;
1620
9c095379
MD
1621 switch(cmd) {
1622 case POLL_REGISTER:
1623 /* disable interrupts */
1624 CSR_WRITE_2(sc, RE_IMR, 0x0000);
1625 break;
1626 case POLL_DEREGISTER:
1627 /* enable interrupts */
af51229a 1628 CSR_WRITE_2(sc, RE_IMR, RE_INTRS_CPLUS);
9c095379
MD
1629 break;
1630 default:
1631 sc->rxcycles = count;
1632 re_rxeof(sc);
1633 re_txeof(sc);
af51229a 1634
9c095379
MD
1635 if (!ifq_is_empty(&ifp->if_snd))
1636 (*ifp->if_start)(ifp);
af51229a 1637
9c095379
MD
1638 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */
1639 uint16_t status;
af51229a 1640
9c095379
MD
1641 status = CSR_READ_2(sc, RE_ISR);
1642 if (status == 0xffff)
1643 return;
1644 if (status)
1645 CSR_WRITE_2(sc, RE_ISR, status);
af51229a 1646
9c095379
MD
1647 /*
1648 * XXX check behaviour on receiver stalls.
1649 */
af51229a 1650
9c095379
MD
1651 if (status & RE_ISR_SYSTEM_ERR) {
1652 re_reset(sc);
1653 re_init(sc);
1654 }
af51229a 1655 }
9c095379 1656 break;
af51229a
JS
1657 }
1658}
1659#endif /* DEVICE_POLLING */
1660
1661static void
1662re_intr(void *arg)
1663{
1664 struct re_softc *sc = arg;
1665 struct ifnet *ifp = &sc->arpcom.ac_if;
1666 uint16_t status;
af51229a
JS
1667
1668 if (sc->suspended || (ifp->if_flags & IFF_UP) == 0)
1669 return;
1670
af51229a
JS
1671 for (;;) {
1672 status = CSR_READ_2(sc, RE_ISR);
1673 /* If the card has gone away the read returns 0xffff. */
1674 if (status == 0xffff)
1675 break;
1676 if (status)
1677 CSR_WRITE_2(sc, RE_ISR, status);
1678
1679 if ((status & RE_INTRS_CPLUS) == 0)
1680 break;
1681
1682 if (status & RE_ISR_RX_OK)
1683 re_rxeof(sc);
1684
1685 if (status & RE_ISR_RX_ERR)
1686 re_rxeof(sc);
1687
1688 if ((status & RE_ISR_TIMEOUT_EXPIRED) ||
1689 (status & RE_ISR_TX_ERR) ||
1690 (status & RE_ISR_TX_DESC_UNAVAIL))
1691 re_txeof(sc);
1692
1693 if (status & RE_ISR_SYSTEM_ERR) {
1694 re_reset(sc);
1695 re_init(sc);
1696 }
1697
1698 if (status & RE_ISR_LINKCHG)
78195a76 1699 re_tick_serialized(sc);
af51229a
JS
1700 }
1701
000181b2 1702 if (!ifq_is_empty(&ifp->if_snd))
af51229a 1703 (*ifp->if_start)(ifp);
af51229a
JS
1704}
1705
1706static int
8f77d350 1707re_encap(struct re_softc *sc, struct mbuf **m_head, int *idx, int *called_defrag)
af51229a
JS
1708{
1709 struct ifnet *ifp = &sc->arpcom.ac_if;
8f77d350 1710 struct mbuf *m, *m_new = NULL;
af51229a
JS
1711 struct re_dmaload_arg arg;
1712 bus_dmamap_t map;
1713 int error;
1714
8f77d350 1715 *called_defrag = 0;
af51229a
JS
1716 if (sc->re_ldata.re_tx_free <= 4)
1717 return(EFBIG);
1718
8f77d350
JS
1719 m = *m_head;
1720
af51229a
JS
1721 /*
1722 * Set up checksum offload. Note: checksum offload bits must
1723 * appear in all descriptors of a multi-descriptor transmit
1724 * attempt. (This is according to testing done with an 8169
1725 * chip. I'm not sure if this is a requirement or a bug.)
1726 */
1727
1728 arg.re_flags = 0;
1729
8f77d350 1730 if (m->m_pkthdr.csum_flags & CSUM_IP)
af51229a 1731 arg.re_flags |= RE_TDESC_CMD_IPCSUM;
8f77d350 1732 if (m->m_pkthdr.csum_flags & CSUM_TCP)
af51229a 1733 arg.re_flags |= RE_TDESC_CMD_TCPCSUM;
8f77d350 1734 if (m->m_pkthdr.csum_flags & CSUM_UDP)
af51229a
JS
1735 arg.re_flags |= RE_TDESC_CMD_UDPCSUM;
1736
1737 arg.sc = sc;
1738 arg.re_idx = *idx;
1739 arg.re_maxsegs = sc->re_ldata.re_tx_free;
1740 if (arg.re_maxsegs > 4)
1741 arg.re_maxsegs -= 4;
1742 arg.re_ring = sc->re_ldata.re_tx_list;
1743
1744 map = sc->re_ldata.re_tx_dmamap[*idx];
1745 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map,
8f77d350 1746 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
af51229a
JS
1747
1748 if (error && error != EFBIG) {
1749 if_printf(ifp, "can't map mbuf (error %d)\n", error);
1750 return(ENOBUFS);
1751 }
1752
1753 /* Too many segments to map, coalesce into a single mbuf */
1754
1755 if (error || arg.re_maxsegs == 0) {
8f77d350 1756 m_new = m_defrag_nofree(m, MB_DONTWAIT);
af51229a
JS
1757 if (m_new == NULL)
1758 return(1);
8f77d350
JS
1759 else {
1760 m = m_new;
1761 *m_head = m;
1762 }
af51229a 1763
8f77d350 1764 *called_defrag = 1;
af51229a
JS
1765 arg.sc = sc;
1766 arg.re_idx = *idx;
1767 arg.re_maxsegs = sc->re_ldata.re_tx_free;
1768 arg.re_ring = sc->re_ldata.re_tx_list;
1769
1770 error = bus_dmamap_load_mbuf(sc->re_ldata.re_mtag, map,
8f77d350 1771 m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT);
af51229a 1772 if (error) {
8f77d350 1773 m_freem(m);
af51229a
JS
1774 if_printf(ifp, "can't map mbuf (error %d)\n", error);
1775 return(EFBIG);
1776 }
1777 }
1778
1779 /*
1780 * Insure that the map for this transmission
1781 * is placed at the array index of the last descriptor
1782 * in this chain.
1783 */
1784 sc->re_ldata.re_tx_dmamap[*idx] =
1785 sc->re_ldata.re_tx_dmamap[arg.re_idx];
1786 sc->re_ldata.re_tx_dmamap[arg.re_idx] = map;
1787
8f77d350 1788 sc->re_ldata.re_tx_mbuf[arg.re_idx] = m;
af51229a
JS
1789 sc->re_ldata.re_tx_free -= arg.re_maxsegs;
1790
1791 /*
1792 * Set up hardware VLAN tagging. Note: vlan tag info must
1793 * appear in the first descriptor of a multi-descriptor
1794 * transmission attempt.
1795 */
1796
8f77d350
JS
1797 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) &&
1798 m->m_pkthdr.rcvif != NULL &&
1799 m->m_pkthdr.rcvif->if_type == IFT_L2VLAN) {
af51229a 1800 struct ifvlan *ifv;
8f77d350 1801 ifv = m->m_pkthdr.rcvif->if_softc;
af51229a
JS
1802 if (ifv != NULL)
1803 sc->re_ldata.re_tx_list[*idx].re_vlanctl =
1804 htole32(htobe16(ifv->ifv_tag) | RE_TDESC_VLANCTL_TAG);
1805 }
1806
1807 /* Transfer ownership of packet to the chip. */
1808
1809 sc->re_ldata.re_tx_list[arg.re_idx].re_cmdstat |=
1810 htole32(RE_TDESC_CMD_OWN);
1811 if (*idx != arg.re_idx)
1812 sc->re_ldata.re_tx_list[*idx].re_cmdstat |=
1813 htole32(RE_TDESC_CMD_OWN);
1814
1815 RE_DESC_INC(arg.re_idx);
1816 *idx = arg.re_idx;
1817
1818 return(0);
1819}
1820
1821/*
1822 * Main transmit routine for C+ and gigE NICs.
1823 */
1824
1825static void
1826re_start(struct ifnet *ifp)
1827{
1828 struct re_softc *sc = ifp->if_softc;
d2c71fa0
MD
1829 struct mbuf *m_head;
1830 struct mbuf *m_head2;
2f54d1d2 1831 int called_defrag, idx, need_trans;
af51229a 1832
af51229a
JS
1833 idx = sc->re_ldata.re_tx_prodidx;
1834
2f54d1d2 1835 need_trans = 0;
af51229a 1836 while (sc->re_ldata.re_tx_mbuf[idx] == NULL) {
000181b2 1837 m_head = ifq_poll(&ifp->if_snd);
af51229a
JS
1838 if (m_head == NULL)
1839 break;
d2c71fa0
MD
1840 m_head2 = m_head;
1841 if (re_encap(sc, &m_head2, &idx, &called_defrag)) {
1842 /*
1843 * If we could not encapsulate the defragged packet,
1844 * the returned m_head2 is garbage and we must dequeue
1845 * and throw away the original packet.
1846 */
8f77d350 1847 if (called_defrag) {
d2c71fa0
MD
1848 ifq_dequeue(&ifp->if_snd, m_head);
1849 m_freem(m_head);
8f77d350 1850 }
af51229a
JS
1851 ifp->if_flags |= IFF_OACTIVE;
1852 break;
1853 }
8f77d350 1854
d2c71fa0
MD
1855 /*
1856 * Clean out the packet we encapsulated. If we defragged
1857 * the packet the m_head2 is the one that got encapsulated
1858 * and the original must be thrown away. Otherwise m_head2
1859 * *IS* the original.
1860 */
1861 ifq_dequeue(&ifp->if_snd, m_head);
8f77d350 1862 if (called_defrag)
d2c71fa0 1863 m_freem(m_head);
2f54d1d2 1864 need_trans = 1;
af51229a
JS
1865
1866 /*
1867 * If there's a BPF listener, bounce a copy of this frame
1868 * to him.
1869 */
d2c71fa0 1870 BPF_MTAP(ifp, m_head2);
af51229a
JS
1871 }
1872
2f54d1d2 1873 if (!need_trans) {
2f54d1d2
SZ
1874 return;
1875 }
1876
af51229a
JS
1877 /* Flush the TX descriptors */
1878 bus_dmamap_sync(sc->re_ldata.re_tx_list_tag,
7b4b20cd 1879 sc->re_ldata.re_tx_list_map, BUS_DMASYNC_PREWRITE);
af51229a
JS
1880
1881 sc->re_ldata.re_tx_prodidx = idx;
1882
1883 /*
1884 * RealTek put the TX poll request register in a different
1885 * location on the 8169 gigE chip. I don't know why.
1886 */
1887 if (sc->re_type == RE_8169)
1888 CSR_WRITE_2(sc, RE_GTXSTART, RE_TXSTART_START);
1889 else
1890 CSR_WRITE_2(sc, RE_TXSTART, RE_TXSTART_START);
1891
1892 /*
1893 * Use the countdown timer for interrupt moderation.
1894 * 'TX done' interrupts are disabled. Instead, we reset the
1895 * countdown timer, which will begin counting until it hits
1896 * the value in the TIMERINT register, and then trigger an
1897 * interrupt. Each time we write to the TIMERCNT register,
1898 * the timer count is reset to 0.
1899 */
1900 CSR_WRITE_4(sc, RE_TIMERCNT, 1);
1901
af51229a
JS
1902 /*
1903 * Set a timeout in case the chip goes out to lunch.
1904 */
1905 ifp->if_timer = 5;
1906}
1907
1908static void
1909re_init(void *xsc)
1910{
1911 struct re_softc *sc = xsc;
1912 struct ifnet *ifp = &sc->arpcom.ac_if;
1913 struct mii_data *mii;
1914 uint32_t rxcfg = 0;
af51229a 1915
af51229a
JS
1916 mii = device_get_softc(sc->re_miibus);
1917
1918 /*
1919 * Cancel pending I/O and free all RX/TX buffers.
1920 */
1921 re_stop(sc);
1922
1923 /*
1924 * Enable C+ RX and TX mode, as well as VLAN stripping and
1925 * RX checksum offload. We must configure the C+ register
1926 * before all others.
1927 */
1928 CSR_WRITE_2(sc, RE_CPLUS_CMD, RE_CPLUSCMD_RXENB | RE_CPLUSCMD_TXENB |
1929 RE_CPLUSCMD_PCI_MRW | RE_CPLUSCMD_VLANSTRIP |
1930 (ifp->if_capenable & IFCAP_RXCSUM ?
1931 RE_CPLUSCMD_RXCSUM_ENB : 0));
1932
1933 /*
1934 * Init our MAC address. Even though the chipset
1935 * documentation doesn't mention it, we need to enter "Config
1936 * register write enable" mode to modify the ID registers.
1937 */
1938 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_WRITECFG);
1939 CSR_WRITE_STREAM_4(sc, RE_IDR0,
1940 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1941 CSR_WRITE_STREAM_4(sc, RE_IDR4,
1942 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1943 CSR_WRITE_1(sc, RE_EECMD, RE_EEMODE_OFF);
1944
1945 /*
1946 * For C+ mode, initialize the RX descriptors and mbufs.
1947 */
1948 re_rx_list_init(sc);
1949 re_tx_list_init(sc);
1950
1951 /*
1952 * Enable transmit and receive.
1953 */
1954 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB);
1955
1956 /*
1957 * Set the initial TX and RX configuration.
1958 */
1959 if (sc->re_testmode) {
1960 if (sc->re_type == RE_8169)
1961 CSR_WRITE_4(sc, RE_TXCFG,
1962 RE_TXCFG_CONFIG | RE_LOOPTEST_ON);
1963 else
1964 CSR_WRITE_4(sc, RE_TXCFG,
1965 RE_TXCFG_CONFIG | RE_LOOPTEST_ON_CPLUS);
1966 } else
1967 CSR_WRITE_4(sc, RE_TXCFG, RE_TXCFG_CONFIG);
1968 CSR_WRITE_4(sc, RE_RXCFG, RE_RXCFG_CONFIG);
1969
1970 /* Set the individual bit to receive frames for this host only. */
1971 rxcfg = CSR_READ_4(sc, RE_RXCFG);
1972 rxcfg |= RE_RXCFG_RX_INDIV;
1973
1974 /* If we want promiscuous mode, set the allframes bit. */
1975 if (ifp->if_flags & IFF_PROMISC) {
1976 rxcfg |= RE_RXCFG_RX_ALLPHYS;
1977 CSR_WRITE_4(sc, RE_RXCFG, rxcfg);
1978 } else {
1979 rxcfg &= ~RE_RXCFG_RX_ALLPHYS;
1980 CSR_WRITE_4(sc, RE_RXCFG, rxcfg);
1981 }
1982
1983 /*
1984 * Set capture broadcast bit to capture broadcast frames.
1985 */
1986 if (ifp->if_flags & IFF_BROADCAST) {
1987 rxcfg |= RE_RXCFG_RX_BROAD;
1988 CSR_WRITE_4(sc, RE_RXCFG, rxcfg);
1989 } else {
1990 rxcfg &= ~RE_RXCFG_RX_BROAD;
1991 CSR_WRITE_4(sc, RE_RXCFG, rxcfg);
1992 }
1993
1994 /*
1995 * Program the multicast filter, if necessary.
1996 */
1997 re_setmulti(sc);
1998
1999#ifdef DEVICE_POLLING
2000 /*
2001 * Disable interrupts if we are polling.
2002 */
2003 if (ifp->if_flags & IFF_POLLING)
2004 CSR_WRITE_2(sc, RE_IMR, 0);
2005 else /* otherwise ... */
2006#endif /* DEVICE_POLLING */
2007 /*
2008 * Enable interrupts.
2009 */
2010 if (sc->re_testmode)
2011 CSR_WRITE_2(sc, RE_IMR, 0);
2012 else
2013 CSR_WRITE_2(sc, RE_IMR, RE_INTRS_CPLUS);
2014
2015 /* Set initial TX threshold */
2016 sc->re_txthresh = RE_TX_THRESH_INIT;
2017
2018 /* Start RX/TX process. */
2019 CSR_WRITE_4(sc, RE_MISSEDPKT, 0);
2020#ifdef notdef
2021 /* Enable receiver and transmitter. */
2022 CSR_WRITE_1(sc, RE_COMMAND, RE_CMD_TX_ENB|RE_CMD_RX_ENB);
2023#endif
2024 /*
2025 * Load the addresses of the RX and TX lists into the chip.
2026 */
2027
2028 CSR_WRITE_4(sc, RE_RXLIST_ADDR_HI,
2029 RE_ADDR_HI(sc->re_ldata.re_rx_list_addr));
2030 CSR_WRITE_4(sc, RE_RXLIST_ADDR_LO,
2031 RE_ADDR_LO(sc->re_ldata.re_rx_list_addr));
2032
2033 CSR_WRITE_4(sc, RE_TXLIST_ADDR_HI,
2034 RE_ADDR_HI(sc->re_ldata.re_tx_list_addr));
2035 CSR_WRITE_4(sc, RE_TXLIST_ADDR_LO,
2036 RE_ADDR_LO(sc->re_ldata.re_tx_list_addr));
2037
2038 CSR_WRITE_1(sc, RE_EARLY_TX_THRESH, 16);
2039
2040 /*
2041 * Initialize the timer interrupt register so that
2042 * a timer interrupt will be generated once the timer
2043 * reaches a certain number of ticks. The timer is
2044 * reloaded on each transmit. This gives us TX interrupt
2045 * moderation, which dramatically improves TX frame rate.
2046 */
2047
2048 if (sc->re_type == RE_8169)
2049 CSR_WRITE_4(sc, RE_TIMERINT_8169, 0x800);
2050 else
2051 CSR_WRITE_4(sc, RE_TIMERINT, 0x400);
2052
2053 /*
2054 * For 8169 gigE NICs, set the max allowed RX packet
2055 * size so we can receive jumbo frames.
2056 */
2057 if (sc->re_type == RE_8169)
2058 CSR_WRITE_2(sc, RE_MAXRXPKTLEN, 16383);
2059
2060 if (sc->re_testmode) {
af51229a
JS
2061 return;
2062 }
2063
2064 mii_mediachg(mii);
2065
2066 CSR_WRITE_1(sc, RE_CFG1, RE_CFG1_DRVLOAD|RE_CFG1_FULLDUPLEX);
2067
2068 ifp->if_flags |= IFF_RUNNING;
2069 ifp->if_flags &= ~IFF_OACTIVE;
2070
2071 callout_reset(&sc->re_timer, hz, re_tick, sc);
af51229a
JS
2072}
2073
2074/*
2075 * Set media options.
2076 */
2077static int
2078re_ifmedia_upd(struct ifnet *ifp)
2079{
2080 struct re_softc *sc = ifp->if_softc;
2081 struct mii_data *mii;
2082
2083 mii = device_get_softc(sc->re_miibus);
2084 mii_mediachg(mii);
2085
2086 return(0);
2087}
2088
2089/*
2090 * Report current media status.
2091 */
2092static void
2093re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2094{
2095 struct re_softc *sc = ifp->if_softc;
2096 struct mii_data *mii;
2097
2098 mii = device_get_softc(sc->re_miibus);
2099
2100 mii_pollstat(mii);
2101 ifmr->ifm_active = mii->mii_media_active;
2102 ifmr->ifm_status = mii->mii_media_status;
2103}
2104
2105static int
2106re_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2107{
2108 struct re_softc *sc = ifp->if_softc;
2109 struct ifreq *ifr = (struct ifreq *) data;
2110 struct mii_data *mii;
f0ee8b5b 2111 int error = 0;
af51229a 2112
af51229a
JS
2113 switch(command) {
2114 case SIOCSIFMTU:
2115 if (ifr->ifr_mtu > RE_JUMBO_MTU)
2116 error = EINVAL;
2117 ifp->if_mtu = ifr->ifr_mtu;
2118 break;
2119 case SIOCSIFFLAGS:
2120 if (ifp->if_flags & IFF_UP)
2121 re_init(sc);
2122 else if (ifp->if_flags & IFF_RUNNING)
2123 re_stop(sc);
2124 error = 0;
2125 break;
2126 case SIOCADDMULTI:
2127 case SIOCDELMULTI:
2128 re_setmulti(sc);
2129 error = 0;
2130 break;
2131 case SIOCGIFMEDIA:
2132 case SIOCSIFMEDIA:
2133 mii = device_get_softc(sc->re_miibus);
2134 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2135 break;
2136 case SIOCSIFCAP:
9c095379 2137 ifp->if_capenable &= ~(IFCAP_HWCSUM);
af51229a 2138 ifp->if_capenable |=
9c095379 2139 ifr->ifr_reqcap & (IFCAP_HWCSUM);
af51229a
JS
2140 if (ifp->if_capenable & IFCAP_TXCSUM)
2141 ifp->if_hwassist = RE_CSUM_FEATURES;
2142 else
2143 ifp->if_hwassist = 0;
2144 if (ifp->if_flags & IFF_RUNNING)
2145 re_init(sc);
2146 break;
2147 default:
2148 error = ether_ioctl(ifp, command, data);
2149 break;
2150 }
af51229a
JS
2151 return(error);
2152}
2153
2154static void
2155re_watchdog(struct ifnet *ifp)
2156{
2157 struct re_softc *sc = ifp->if_softc;
af51229a 2158
af51229a 2159 if_printf(ifp, "watchdog timeout\n");
f0ee8b5b 2160
af51229a
JS
2161 ifp->if_oerrors++;
2162
2163 re_txeof(sc);
2164 re_rxeof(sc);
2165
2166 re_init(sc);
2167
2f54d1d2
SZ
2168 if (!ifq_is_empty(&ifp->if_snd))
2169 ifp->if_start(ifp);
af51229a
JS
2170}
2171
2172/*
2173 * Stop the adapter and free any mbufs allocated to the
2174 * RX and TX lists.
2175 */
2176static void
2177re_stop(struct re_softc *sc)
2178{
2179 struct ifnet *ifp = &sc->arpcom.ac_if;
f0ee8b5b
JS
2180 int i;
2181
af51229a
JS
2182 ifp->if_timer = 0;
2183 callout_stop(&sc->re_timer);
2184
2185 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
af51229a
JS
2186
2187 CSR_WRITE_1(sc, RE_COMMAND, 0x00);
2188 CSR_WRITE_2(sc, RE_IMR, 0x0000);
2189
2190 if (sc->re_head != NULL) {
2191 m_freem(sc->re_head);
2192 sc->re_head = sc->re_tail = NULL;
2193 }
2194
2195 /* Free the TX list buffers. */
2196 for (i = 0; i < RE_TX_DESC_CNT; i++) {
2197 if (sc->re_ldata.re_tx_mbuf[i] != NULL) {
2198 bus_dmamap_unload(sc->re_ldata.re_mtag,
2199 sc->re_ldata.re_tx_dmamap[i]);
2200 m_freem(sc->re_ldata.re_tx_mbuf[i]);
2201 sc->re_ldata.re_tx_mbuf[i] = NULL;
2202 }
2203 }
2204
2205 /* Free the RX list buffers. */
2206 for (i = 0; i < RE_RX_DESC_CNT; i++) {
2207 if (sc->re_ldata.re_rx_mbuf[i] != NULL) {
2208 bus_dmamap_unload(sc->re_ldata.re_mtag,
2209 sc->re_ldata.re_rx_dmamap[i]);
2210 m_freem(sc->re_ldata.re_rx_mbuf[i]);
2211 sc->re_ldata.re_rx_mbuf[i] = NULL;
2212 }
2213 }
af51229a
JS
2214}
2215
2216/*
2217 * Device suspend routine. Stop the interface and save some PCI
2218 * settings in case the BIOS doesn't restore them properly on
2219 * resume.
2220 */
2221static int
2222re_suspend(device_t dev)
2223{
2224#ifndef BURN_BRIDGES
2225 int i;
2226#endif
2227 struct re_softc *sc = device_get_softc(dev);
2228
2229 re_stop(sc);
2230
2231#ifndef BURN_BRIDGES
2232 for (i = 0; i < 5; i++)
2233 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2234 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2235 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2236 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2237 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2238#endif
2239
2240 sc->suspended = 1;
2241
2242 return (0);
2243}
2244
2245/*
2246 * Device resume routine. Restore some PCI settings in case the BIOS
2247 * doesn't, re-enable busmastering, and restart the interface if
2248 * appropriate.
2249 */
2250static int
2251re_resume(device_t dev)
2252{
2253 struct re_softc *sc = device_get_softc(dev);
2254 struct ifnet *ifp = &sc->arpcom.ac_if;
2255#ifndef BURN_BRIDGES
2256 int i;
2257#endif
2258
2259#ifndef BURN_BRIDGES
2260 /* better way to do this? */
2261 for (i = 0; i < 5; i++)
2262 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
2263 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
2264 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
2265 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
2266 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
2267
2268 /* reenable busmastering */
2269 pci_enable_busmaster(dev);
ecd80f47 2270 pci_enable_io(dev, SYS_RES_IOPORT);
af51229a
JS
2271#endif
2272
2273 /* reinitialize interface if necessary */
2274 if (ifp->if_flags & IFF_UP)
2275 re_init(sc);
2276
2277 sc->suspended = 0;
2278
2279 return (0);
2280}
2281
2282/*
2283 * Stop all chip I/O so that the kernel's probe routines don't
2284 * get confused by errant DMAs when rebooting.
2285 */
2286static void
2287re_shutdown(device_t dev)
2288{
2289 struct re_softc *sc = device_get_softc(dev);
2290
2291 re_stop(sc);
2292}