if: Move IFF_OACTIVE bit into ifaltq; prepare multiple TX queues support
[dragonfly.git] / sys / dev / netif / vge / if_vge.c
CommitLineData
13bca4c6
SZ
1/*
2 * Copyright (c) 2004
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/dev/vge/if_vge.c,v 1.24 2006/02/14 12:44:56 glebius Exp $
13bca4c6
SZ
33 */
34
35/*
36 * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
37 *
38 * Written by Bill Paul <wpaul@windriver.com>
39 * Senior Networking Software Engineer
40 * Wind River Systems
41 */
42
43/*
44 * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that
45 * combines a tri-speed ethernet MAC and PHY, with the following
46 * features:
47 *
48 * o Jumbo frame support up to 16K
49 * o Transmit and receive flow control
50 * o IPv4 checksum offload
51 * o VLAN tag insertion and stripping
52 * o TCP large send
53 * o 64-bit multicast hash table filter
54 * o 64 entry CAM filter
55 * o 16K RX FIFO and 48K TX FIFO memory
56 * o Interrupt moderation
57 *
58 * The VT6122 supports up to four transmit DMA queues. The descriptors
59 * in the transmit ring can address up to 7 data fragments; frames which
60 * span more than 7 data buffers must be coalesced, but in general the
61 * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
62 * long. The receive descriptors address only a single buffer.
63 *
64 * There are two peculiar design issues with the VT6122. One is that
65 * receive data buffers must be aligned on a 32-bit boundary. This is
66 * not a problem where the VT6122 is used as a LOM device in x86-based
67 * systems, but on architectures that generate unaligned access traps, we
68 * have to do some copying.
69 *
70 * The other issue has to do with the way 64-bit addresses are handled.
71 * The DMA descriptors only allow you to specify 48 bits of addressing
72 * information. The remaining 16 bits are specified using one of the
73 * I/O registers. If you only have a 32-bit system, then this isn't
74 * an issue, but if you have a 64-bit system and more than 4GB of
75 * memory, you must have to make sure your network data buffers reside
76 * in the same 48-bit 'segment.'
77 *
78 * Special thanks to Ryan Fu at VIA Networking for providing documentation
79 * and sample NICs for testing.
80 */
81
4e5366b5 82#include "opt_ifpoll.h"
13bca4c6
SZ
83
84#include <sys/param.h>
85#include <sys/endian.h>
86#include <sys/systm.h>
87#include <sys/sockio.h>
88#include <sys/mbuf.h>
89#include <sys/malloc.h>
90#include <sys/module.h>
91#include <sys/kernel.h>
92#include <sys/socket.h>
93#include <sys/serialize.h>
94#include <sys/proc.h>
1f7ab7c9
MD
95#include <sys/bus.h>
96#include <sys/rman.h>
9db4b353 97#include <sys/interrupt.h>
13bca4c6
SZ
98
99#include <net/if.h>
100#include <net/if_arp.h>
101#include <net/ethernet.h>
102#include <net/if_dl.h>
103#include <net/if_media.h>
4e5366b5 104#include <net/if_poll.h>
13bca4c6
SZ
105#include <net/ifq_var.h>
106#include <net/if_types.h>
107#include <net/vlan/if_vlan_var.h>
b637f170 108#include <net/vlan/if_vlan_ether.h>
13bca4c6
SZ
109
110#include <net/bpf.h>
111
13bca4c6
SZ
112#include <dev/netif/mii_layer/mii.h>
113#include <dev/netif/mii_layer/miivar.h>
114
115#include <bus/pci/pcireg.h>
116#include <bus/pci/pcivar.h>
117#include <bus/pci/pcidevs.h>
118
119#include "miibus_if.h"
120
121#include <dev/netif/vge/if_vgereg.h>
122#include <dev/netif/vge/if_vgevar.h>
123
124#define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
125
126/*
127 * Various supported device vendors/types and their names.
128 */
129static const struct vge_type vge_devs[] = {
130 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT612X,
131 "VIA Networking Gigabit Ethernet" },
132 { 0, 0, NULL }
133};
134
135static int vge_probe (device_t);
136static int vge_attach (device_t);
137static int vge_detach (device_t);
138
139static int vge_encap (struct vge_softc *, struct mbuf *, int);
140
141static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int);
142static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int,
143 bus_size_t, int);
144static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int,
145 bus_size_t, int);
146static int vge_dma_alloc (device_t);
147static void vge_dma_free (struct vge_softc *);
148static int vge_newbuf (struct vge_softc *, int, struct mbuf *);
149static int vge_rx_list_init (struct vge_softc *);
150static int vge_tx_list_init (struct vge_softc *);
151#ifdef VGE_FIXUP_RX
152static __inline void vge_fixup_rx
153 (struct mbuf *);
154#endif
155static void vge_rxeof (struct vge_softc *, int);
156static void vge_txeof (struct vge_softc *);
157static void vge_intr (void *);
158static void vge_tick (struct vge_softc *);
159static void vge_start (struct ifnet *);
160static int vge_ioctl (struct ifnet *, u_long, caddr_t,
161 struct ucred *);
162static void vge_init (void *);
163static void vge_stop (struct vge_softc *);
164static void vge_watchdog (struct ifnet *);
165static int vge_suspend (device_t);
166static int vge_resume (device_t);
167static void vge_shutdown (device_t);
168static int vge_ifmedia_upd (struct ifnet *);
169static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *);
170
171#ifdef VGE_EEPROM
172static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *);
173#endif
174static void vge_read_eeprom (struct vge_softc *, uint8_t *, int, int, int);
175
176static void vge_miipoll_start (struct vge_softc *);
177static void vge_miipoll_stop (struct vge_softc *);
178static int vge_miibus_readreg (device_t, int, int);
179static int vge_miibus_writereg (device_t, int, int, int);
180static void vge_miibus_statchg (device_t);
181
182static void vge_cam_clear (struct vge_softc *);
183static int vge_cam_set (struct vge_softc *, uint8_t *);
184static void vge_setmulti (struct vge_softc *);
185static void vge_reset (struct vge_softc *);
186
4e5366b5
SZ
187#ifdef IFPOLL_ENABLE
188static void vge_npoll(struct ifnet *, struct ifpoll_info *);
189static void vge_npoll_compat(struct ifnet *, void *, int);
13bca4c6
SZ
190static void vge_disable_intr(struct vge_softc *);
191#endif
192static void vge_enable_intr(struct vge_softc *, uint32_t);
193
194#define VGE_PCI_LOIO 0x10
195#define VGE_PCI_LOMEM 0x14
196
197static device_method_t vge_methods[] = {
198 /* Device interface */
199 DEVMETHOD(device_probe, vge_probe),
200 DEVMETHOD(device_attach, vge_attach),
201 DEVMETHOD(device_detach, vge_detach),
202 DEVMETHOD(device_suspend, vge_suspend),
203 DEVMETHOD(device_resume, vge_resume),
204 DEVMETHOD(device_shutdown, vge_shutdown),
205
206 /* bus interface */
207 DEVMETHOD(bus_print_child, bus_generic_print_child),
208 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209
210 /* MII interface */
211 DEVMETHOD(miibus_readreg, vge_miibus_readreg),
212 DEVMETHOD(miibus_writereg, vge_miibus_writereg),
213 DEVMETHOD(miibus_statchg, vge_miibus_statchg),
214
215 { 0, 0 }
216};
217
218static driver_t vge_driver = {
219 "vge",
220 vge_methods,
221 sizeof(struct vge_softc)
222};
223
224static devclass_t vge_devclass;
225
226DECLARE_DUMMY_MODULE(if_vge);
227MODULE_DEPEND(if_vge, miibus, 1, 1, 1);
aa2b9d05
SW
228DRIVER_MODULE(if_vge, pci, vge_driver, vge_devclass, NULL, NULL);
229DRIVER_MODULE(if_vge, cardbus, vge_driver, vge_devclass, NULL, NULL);
230DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, NULL, NULL);
13bca4c6
SZ
231
232#ifdef VGE_EEPROM
233/*
234 * Read a word of data stored in the EEPROM at address 'addr.'
235 */
236static void
237vge_eeprom_getword(struct vge_softc *sc, int addr, uint16_t dest)
238{
239 uint16_t word = 0;
240 int i;
241
242 /*
243 * Enter EEPROM embedded programming mode. In order to
244 * access the EEPROM at all, we first have to set the
245 * EELOAD bit in the CHIPCFG2 register.
246 */
247 CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
248 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
249
250 /* Select the address of the word we want to read */
251 CSR_WRITE_1(sc, VGE_EEADDR, addr);
252
253 /* Issue read command */
254 CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
255
256 /* Wait for the done bit to be set. */
257 for (i = 0; i < VGE_TIMEOUT; i++) {
258 if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
259 break;
260 }
261 if (i == VGE_TIMEOUT) {
262 device_printf(sc->vge_dev, "EEPROM read timed out\n");
263 *dest = 0;
264 return;
265 }
266
267 /* Read the result */
268 word = CSR_READ_2(sc, VGE_EERDDAT);
269
270 /* Turn off EEPROM access mode. */
271 CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
272 CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
273
274 *dest = word;
275}
276#endif
277
278/*
279 * Read a sequence of words from the EEPROM.
280 */
281static void
282vge_read_eeprom(struct vge_softc *sc, uint8_t *dest, int off, int cnt, int swap)
283{
284 int i;
285#ifdef VGE_EEPROM
286 uint16_t word = 0, *ptr;
287
288 for (i = 0; i < cnt; i++) {
289 vge_eeprom_getword(sc, off + i, &word);
290 ptr = (uint16_t *)(dest + (i * 2));
291 if (swap)
292 *ptr = ntohs(word);
293 else
294 *ptr = word;
295 }
296#else
297 for (i = 0; i < ETHER_ADDR_LEN; i++)
298 dest[i] = CSR_READ_1(sc, VGE_PAR0 + i);
299#endif
300}
301
302static void
303vge_miipoll_stop(struct vge_softc *sc)
304{
305 int i;
306
307 CSR_WRITE_1(sc, VGE_MIICMD, 0);
308
309 for (i = 0; i < VGE_TIMEOUT; i++) {
310 DELAY(1);
311 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
312 break;
313 }
314 if (i == VGE_TIMEOUT)
315 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
316}
317
318static void
319vge_miipoll_start(struct vge_softc *sc)
320{
321 int i;
322
323 /* First, make sure we're idle. */
324 CSR_WRITE_1(sc, VGE_MIICMD, 0);
325 CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
326
327 for (i = 0; i < VGE_TIMEOUT; i++) {
328 DELAY(1);
329 if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
330 break;
331 }
332 if (i == VGE_TIMEOUT) {
333 if_printf(&sc->arpcom.ac_if, "failed to idle MII autopoll\n");
334 return;
335 }
336
337 /* Now enable auto poll mode. */
338 CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
339
340 /* And make sure it started. */
341 for (i = 0; i < VGE_TIMEOUT; i++) {
342 DELAY(1);
343 if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
344 break;
345 }
346 if (i == VGE_TIMEOUT)
347 if_printf(&sc->arpcom.ac_if, "failed to start MII autopoll\n");
348}
349
350static int
351vge_miibus_readreg(device_t dev, int phy, int reg)
352{
353 struct vge_softc *sc;
354 int i;
355 uint16_t rval = 0;
356
357 sc = device_get_softc(dev);
358
359 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
360 return(0);
361
362 vge_miipoll_stop(sc);
363
364 /* Specify the register we want to read. */
365 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
366
367 /* Issue read command. */
368 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
369
370 /* Wait for the read command bit to self-clear. */
371 for (i = 0; i < VGE_TIMEOUT; i++) {
372 DELAY(1);
373 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
374 break;
375 }
376 if (i == VGE_TIMEOUT)
377 if_printf(&sc->arpcom.ac_if, "MII read timed out\n");
378 else
379 rval = CSR_READ_2(sc, VGE_MIIDATA);
380
381 vge_miipoll_start(sc);
382
383 return (rval);
384}
385
386static int
387vge_miibus_writereg(device_t dev, int phy, int reg, int data)
388{
389 struct vge_softc *sc;
390 int i, rval = 0;
391
392 sc = device_get_softc(dev);
393
394 if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
395 return(0);
396
397 vge_miipoll_stop(sc);
398
399 /* Specify the register we want to write. */
400 CSR_WRITE_1(sc, VGE_MIIADDR, reg);
401
402 /* Specify the data we want to write. */
403 CSR_WRITE_2(sc, VGE_MIIDATA, data);
404
405 /* Issue write command. */
406 CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
407
408 /* Wait for the write command bit to self-clear. */
409 for (i = 0; i < VGE_TIMEOUT; i++) {
410 DELAY(1);
411 if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
412 break;
413 }
414 if (i == VGE_TIMEOUT) {
415 if_printf(&sc->arpcom.ac_if, "MII write timed out\n");
416 rval = EIO;
417 }
418
419 vge_miipoll_start(sc);
420
421 return (rval);
422}
423
424static void
425vge_cam_clear(struct vge_softc *sc)
426{
427 int i;
428
429 /*
430 * Turn off all the mask bits. This tells the chip
431 * that none of the entries in the CAM filter are valid.
432 * desired entries will be enabled as we fill the filter in.
433 */
434 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
435 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
436 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
437 for (i = 0; i < 8; i++)
438 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
439
440 /* Clear the VLAN filter too. */
441 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
442 for (i = 0; i < 8; i++)
443 CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
444
445 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
446 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
447 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
448
449 sc->vge_camidx = 0;
450}
451
452static int
453vge_cam_set(struct vge_softc *sc, uint8_t *addr)
454{
455 int i, error = 0;
456
457 if (sc->vge_camidx == VGE_CAM_MAXADDRS)
458 return(ENOSPC);
459
460 /* Select the CAM data page. */
461 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
462 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
463
464 /* Set the filter entry we want to update and enable writing. */
465 CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx);
466
467 /* Write the address to the CAM registers */
468 for (i = 0; i < ETHER_ADDR_LEN; i++)
469 CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
470
471 /* Issue a write command. */
472 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
473
474 /* Wake for it to clear. */
475 for (i = 0; i < VGE_TIMEOUT; i++) {
476 DELAY(1);
477 if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
478 break;
479 }
480 if (i == VGE_TIMEOUT) {
481 if_printf(&sc->arpcom.ac_if, "setting CAM filter failed\n");
482 error = EIO;
483 goto fail;
484 }
485
486 /* Select the CAM mask page. */
487 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
488 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
489
490 /* Set the mask bit that enables this filter. */
491 CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8),
492 1<<(sc->vge_camidx & 7));
493
494 sc->vge_camidx++;
495
496fail:
497 /* Turn off access to CAM. */
498 CSR_WRITE_1(sc, VGE_CAMADDR, 0);
499 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
500 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
501
502 return (error);
503}
504
505/*
506 * Program the multicast filter. We use the 64-entry CAM filter
507 * for perfect filtering. If there's more than 64 multicast addresses,
508 * we use the hash filter insted.
509 */
510static void
511vge_setmulti(struct vge_softc *sc)
512{
513 struct ifnet *ifp = &sc->arpcom.ac_if;
514 int error = 0;
515 struct ifmultiaddr *ifma;
516 uint32_t h, hashes[2] = { 0, 0 };
517
518 /* First, zot all the multicast entries. */
519 vge_cam_clear(sc);
520 CSR_WRITE_4(sc, VGE_MAR0, 0);
521 CSR_WRITE_4(sc, VGE_MAR1, 0);
522
523 /*
524 * If the user wants allmulti or promisc mode, enable reception
525 * of all multicast frames.
526 */
527 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
528 CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
529 CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
530 return;
531 }
532
533 /* Now program new ones */
441d34b2 534 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
13bca4c6
SZ
535 if (ifma->ifma_addr->sa_family != AF_LINK)
536 continue;
537 error = vge_cam_set(sc,
538 LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
539 if (error)
540 break;
541 }
542
543 /* If there were too many addresses, use the hash filter. */
544 if (error) {
545 vge_cam_clear(sc);
546
441d34b2 547 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
13bca4c6
SZ
548 if (ifma->ifma_addr->sa_family != AF_LINK)
549 continue;
550 h = ether_crc32_be(LLADDR((struct sockaddr_dl *)
551 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26;
552 if (h < 32)
553 hashes[0] |= (1 << h);
554 else
555 hashes[1] |= (1 << (h - 32));
556 }
557
558 CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
559 CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
560 }
561}
562
563static void
564vge_reset(struct vge_softc *sc)
565{
566 int i;
567
568 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
569
570 for (i = 0; i < VGE_TIMEOUT; i++) {
571 DELAY(5);
572 if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
573 break;
574 }
575
576 if (i == VGE_TIMEOUT) {
577 if_printf(&sc->arpcom.ac_if, "soft reset timed out");
578 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
579 DELAY(2000);
580 }
581
582 DELAY(5000);
583
584 CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
585
586 for (i = 0; i < VGE_TIMEOUT; i++) {
587 DELAY(5);
588 if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
589 break;
590 }
591 if (i == VGE_TIMEOUT) {
592 if_printf(&sc->arpcom.ac_if, "EEPROM reload timed out\n");
593 return;
594 }
595
596 CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
597}
598
599/*
600 * Probe for a VIA gigabit chip. Check the PCI vendor and device
601 * IDs against our list and return a device name if we find a match.
602 */
603static int
604vge_probe(device_t dev)
605{
606 const struct vge_type *t;
607 uint16_t did, vid;
608
609 did = pci_get_device(dev);
610 vid = pci_get_vendor(dev);
611 for (t = vge_devs; t->vge_name != NULL; ++t) {
612 if (vid == t->vge_vid && did == t->vge_did) {
613 device_set_desc(dev, t->vge_name);
614 return 0;
615 }
616 }
617 return (ENXIO);
618}
619
620static void
621vge_dma_map_rx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
622 bus_size_t mapsize, int error)
623{
624
625 struct vge_dmaload_arg *ctx;
626 struct vge_rx_desc *d = NULL;
627
628 if (error)
629 return;
630
631 ctx = arg;
632
633 /* Signal error to caller if there's too many segments */
634 if (nseg > ctx->vge_maxsegs) {
635 ctx->vge_maxsegs = 0;
636 return;
637 }
638
639 /*
640 * Map the segment array into descriptors.
641 */
642 d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx];
643
644 /* If this descriptor is still owned by the chip, bail. */
645 if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) {
646 if_printf(&ctx->sc->arpcom.ac_if,
647 "tried to map busy descriptor\n");
648 ctx->vge_maxsegs = 0;
649 return;
650 }
651
652 d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I);
653 d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
654 d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
655 d->vge_sts = 0;
656 d->vge_ctl = 0;
657
658 ctx->vge_maxsegs = 1;
659}
660
661static void
662vge_dma_map_tx_desc(void *arg, bus_dma_segment_t *segs, int nseg,
663 bus_size_t mapsize, int error)
664{
665 struct vge_dmaload_arg *ctx;
666 struct vge_tx_desc *d = NULL;
667 struct vge_tx_frag *f;
668 int i = 0;
669
670 if (error)
671 return;
672
673 ctx = arg;
674
675 /* Signal error to caller if there's too many segments */
676 if (nseg > ctx->vge_maxsegs) {
677 ctx->vge_maxsegs = 0;
678 return;
679 }
680
681 /* Map the segment array into descriptors. */
682 d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx];
683
684 /* If this descriptor is still owned by the chip, bail. */
685 if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) {
686 ctx->vge_maxsegs = 0;
687 return;
688 }
689
690 for (i = 0; i < nseg; i++) {
691 f = &d->vge_frag[i];
692 f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len));
693 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr));
694 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF);
695 }
696
697 /* Argh. This chip does not autopad short frames */
698 if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) {
699 f = &d->vge_frag[i];
700 f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN -
701 ctx->vge_m0->m_pkthdr.len));
702 f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr));
703 f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF);
704 ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN;
705 i++;
706 }
707
708 /*
709 * When telling the chip how many segments there are, we
710 * must use nsegs + 1 instead of just nsegs. Darned if I
711 * know why.
712 */
713 i++;
714
715 d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16;
716 d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM;
717
718 if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN)
719 d->vge_ctl |= VGE_TDCTL_JUMBO;
720
721 ctx->vge_maxsegs = nseg;
722}
723
724/*
725 * Map a single buffer address.
726 */
727
728static void
729vge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
730{
731 if (error)
732 return;
733
734 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
735 *((bus_addr_t *)arg) = segs->ds_addr;
736}
737
738static int
739vge_dma_alloc(device_t dev)
740{
741 struct vge_softc *sc = device_get_softc(dev);
742 int error, nseg, i, tx_pos = 0, rx_pos = 0;
743
744 /*
745 * Allocate the parent bus DMA tag appropriate for PCI.
746 */
747#define VGE_NSEG_NEW 32
748 error = bus_dma_tag_create(NULL, /* parent */
749 1, 0, /* alignment, boundary */
750 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
751 BUS_SPACE_MAXADDR, /* highaddr */
752 NULL, NULL, /* filter, filterarg */
753 MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */
754 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
755 BUS_DMA_ALLOCNOW, /* flags */
756 &sc->vge_parent_tag);
757 if (error) {
758 device_printf(dev, "can't create parent dma tag\n");
759 return error;
760 }
761
762 /*
763 * Allocate map for RX mbufs.
764 */
765 nseg = 32;
766 error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0,
767 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
768 NULL, NULL,
769 MCLBYTES * nseg, nseg, MCLBYTES,
770 BUS_DMA_ALLOCNOW, &sc->vge_ldata.vge_mtag);
771 if (error) {
772 device_printf(dev, "could not allocate mbuf dma tag\n");
773 return error;
774 }
775
776 /*
777 * Allocate map for TX descriptor list.
778 */
779 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
780 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
781 NULL, NULL,
782 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
783 BUS_DMA_ALLOCNOW,
784 &sc->vge_ldata.vge_tx_list_tag);
785 if (error) {
786 device_printf(dev, "could not allocate tx list dma tag\n");
787 return error;
788 }
789
790 /* Allocate DMA'able memory for the TX ring */
791 error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag,
792 (void **)&sc->vge_ldata.vge_tx_list,
793 BUS_DMA_WAITOK | BUS_DMA_ZERO,
794 &sc->vge_ldata.vge_tx_list_map);
795 if (error) {
796 device_printf(dev, "could not allocate tx list dma memory\n");
797 return error;
798 }
799
800 /* Load the map for the TX ring. */
801 error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag,
802 sc->vge_ldata.vge_tx_list_map,
803 sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ,
804 vge_dma_map_addr,
805 &sc->vge_ldata.vge_tx_list_addr,
806 BUS_DMA_WAITOK);
807 if (error) {
808 device_printf(dev, "could not load tx list\n");
809 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
810 sc->vge_ldata.vge_tx_list,
811 sc->vge_ldata.vge_tx_list_map);
812 sc->vge_ldata.vge_tx_list = NULL;
813 return error;
814 }
815
816 /* Create DMA maps for TX buffers */
817 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
818 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
819 &sc->vge_ldata.vge_tx_dmamap[i]);
820 if (error) {
821 device_printf(dev, "can't create DMA map for TX\n");
822 tx_pos = i;
823 goto map_fail;
824 }
825 }
826 tx_pos = VGE_TX_DESC_CNT;
827
828 /*
829 * Allocate map for RX descriptor list.
830 */
831 error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0,
832 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
833 NULL, NULL,
834 VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ,
835 BUS_DMA_ALLOCNOW,
836 &sc->vge_ldata.vge_rx_list_tag);
837 if (error) {
838 device_printf(dev, "could not allocate rx list dma tag\n");
839 return error;
840 }
841
842 /* Allocate DMA'able memory for the RX ring */
843 error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag,
844 (void **)&sc->vge_ldata.vge_rx_list,
845 BUS_DMA_WAITOK | BUS_DMA_ZERO,
846 &sc->vge_ldata.vge_rx_list_map);
847 if (error) {
848 device_printf(dev, "could not allocate rx list dma memory\n");
849 return error;
850 }
851
852 /* Load the map for the RX ring. */
853 error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag,
854 sc->vge_ldata.vge_rx_list_map,
855 sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ,
856 vge_dma_map_addr,
857 &sc->vge_ldata.vge_rx_list_addr,
858 BUS_DMA_WAITOK);
859 if (error) {
860 device_printf(dev, "could not load rx list\n");
861 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
862 sc->vge_ldata.vge_rx_list,
863 sc->vge_ldata.vge_rx_list_map);
864 sc->vge_ldata.vge_rx_list = NULL;
865 return error;
866 }
867
868 /* Create DMA maps for RX buffers */
869 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
870 error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0,
871 &sc->vge_ldata.vge_rx_dmamap[i]);
872 if (error) {
873 device_printf(dev, "can't create DMA map for RX\n");
874 rx_pos = i;
875 goto map_fail;
876 }
877 }
878 return (0);
879
880map_fail:
881 for (i = 0; i < tx_pos; ++i) {
882 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
883 sc->vge_ldata.vge_tx_dmamap[i]);
884 }
885 for (i = 0; i < rx_pos; ++i) {
886 error = bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
887 sc->vge_ldata.vge_rx_dmamap[i]);
888 }
889 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
890 sc->vge_ldata.vge_mtag = NULL;
891
892 return error;
893}
894
895static void
896vge_dma_free(struct vge_softc *sc)
897{
898 /* Unload and free the RX DMA ring memory and map */
899 if (sc->vge_ldata.vge_rx_list_tag) {
900 bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag,
901 sc->vge_ldata.vge_rx_list_map);
902 bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag,
903 sc->vge_ldata.vge_rx_list,
904 sc->vge_ldata.vge_rx_list_map);
905 }
906
907 if (sc->vge_ldata.vge_rx_list_tag)
908 bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag);
909
910 /* Unload and free the TX DMA ring memory and map */
911 if (sc->vge_ldata.vge_tx_list_tag) {
912 bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag,
913 sc->vge_ldata.vge_tx_list_map);
914 bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag,
915 sc->vge_ldata.vge_tx_list,
916 sc->vge_ldata.vge_tx_list_map);
917 }
918
919 if (sc->vge_ldata.vge_tx_list_tag)
920 bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag);
921
922 /* Destroy all the RX and TX buffer maps */
923 if (sc->vge_ldata.vge_mtag) {
924 int i;
925
926 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
927 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
928 sc->vge_ldata.vge_tx_dmamap[i]);
929 }
930 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
931 bus_dmamap_destroy(sc->vge_ldata.vge_mtag,
932 sc->vge_ldata.vge_rx_dmamap[i]);
933 }
934 bus_dma_tag_destroy(sc->vge_ldata.vge_mtag);
935 }
936
937 if (sc->vge_parent_tag)
938 bus_dma_tag_destroy(sc->vge_parent_tag);
939}
940
941/*
942 * Attach the interface. Allocate softc structures, do ifmedia
943 * setup and ethernet/BPF attach.
944 */
945static int
946vge_attach(device_t dev)
947{
948 uint8_t eaddr[ETHER_ADDR_LEN];
949 struct vge_softc *sc;
950 struct ifnet *ifp;
951 int error = 0;
952
953 sc = device_get_softc(dev);
954 ifp = &sc->arpcom.ac_if;
955
956 /* Initialize if_xname early, so if_printf() can be used */
957 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
958
959 /*
960 * Map control/status registers.
961 */
962 pci_enable_busmaster(dev);
963
964 sc->vge_res_rid = VGE_PCI_LOMEM;
965 sc->vge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
966 &sc->vge_res_rid, RF_ACTIVE);
967 if (sc->vge_res == NULL) {
968 device_printf(dev, "couldn't map ports/memory\n");
969 return ENXIO;
970 }
971
972 sc->vge_btag = rman_get_bustag(sc->vge_res);
973 sc->vge_bhandle = rman_get_bushandle(sc->vge_res);
974
975 /* Allocate interrupt */
976 sc->vge_irq_rid = 0;
977 sc->vge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->vge_irq_rid,
978 RF_SHAREABLE | RF_ACTIVE);
979 if (sc->vge_irq == NULL) {
980 device_printf(dev, "couldn't map interrupt\n");
981 error = ENXIO;
982 goto fail;
983 }
984
985 /* Reset the adapter. */
986 vge_reset(sc);
987
988 /*
989 * Get station address from the EEPROM.
990 */
991 vge_read_eeprom(sc, eaddr, VGE_EE_EADDR, 3, 0);
992
993 /* Allocate DMA related stuffs */
994 error = vge_dma_alloc(dev);
995 if (error)
996 goto fail;
997
998 /* Do MII setup */
999 error = mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd,
1000 vge_ifmedia_sts);
1001 if (error) {
1002 device_printf(dev, "MII without any phy!\n");
1003 goto fail;
1004 }
1005
1006 ifp->if_softc = sc;
1007 ifp->if_mtu = ETHERMTU;
1008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1009 ifp->if_init = vge_init;
1010 ifp->if_start = vge_start;
1011 ifp->if_watchdog = vge_watchdog;
1012 ifp->if_ioctl = vge_ioctl;
4e5366b5
SZ
1013#ifdef IFPOLL_ENABLE
1014 ifp->if_npoll = vge_npoll;
13bca4c6
SZ
1015#endif
1016 ifp->if_hwassist = VGE_CSUM_FEATURES;
1017 ifp->if_capabilities = IFCAP_VLAN_MTU |
1018 IFCAP_HWCSUM |
1019 IFCAP_VLAN_HWTAGGING;
1020 ifp->if_capenable = ifp->if_capabilities;
1021 ifq_set_maxlen(&ifp->if_snd, VGE_IFQ_MAXLEN);
1022 ifq_set_ready(&ifp->if_snd);
1023
1024 /*
1025 * Call MI attach routine.
1026 */
1027 ether_ifattach(ifp, eaddr, NULL);
1028
4e5366b5
SZ
1029#ifdef IFPOLL_ENABLE
1030 ifpoll_compat_setup(&sc->vge_npoll, NULL, NULL, device_get_unit(dev),
1031 ifp->if_serializer);
1032#endif
1033
13bca4c6
SZ
1034 /* Hook interrupt last to avoid having to lock softc */
1035 error = bus_setup_intr(dev, sc->vge_irq, INTR_MPSAFE, vge_intr, sc,
1036 &sc->vge_intrhand, ifp->if_serializer);
1037 if (error) {
1038 device_printf(dev, "couldn't set up irq\n");
1039 ether_ifdetach(ifp);
1040 goto fail;
1041 }
1042
28e81a28 1043 ifp->if_cpuid = rman_get_cpuid(sc->vge_irq);
9db4b353
SZ
1044 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1045
13bca4c6
SZ
1046 return 0;
1047fail:
1048 vge_detach(dev);
1049 return error;
1050}
1051
1052/*
1053 * Shutdown hardware and free up resources. This can be called any
1054 * time after the mutex has been initialized. It is called in both
1055 * the error case in attach and the normal detach case so it needs
1056 * to be careful about only freeing resources that have actually been
1057 * allocated.
1058 */
1059static int
1060vge_detach(device_t dev)
1061{
1062 struct vge_softc *sc = device_get_softc(dev);
1063 struct ifnet *ifp = &sc->arpcom.ac_if;
1064
1065 /* These should only be active if attach succeeded */
1066 if (device_is_attached(dev)) {
1067 lwkt_serialize_enter(ifp->if_serializer);
1068
1069 vge_stop(sc);
1070 bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand);
1071 /*
1072 * Force off the IFF_UP flag here, in case someone
1073 * still had a BPF descriptor attached to this
1074 * interface. If they do, ether_ifattach() will cause
1075 * the BPF code to try and clear the promisc mode
1076 * flag, which will bubble down to vge_ioctl(),
1077 * which will try to call vge_init() again. This will
1078 * turn the NIC back on and restart the MII ticker,
1079 * which will panic the system when the kernel tries
1080 * to invoke the vge_tick() function that isn't there
1081 * anymore.
1082 */
1083 ifp->if_flags &= ~IFF_UP;
1084
1085 lwkt_serialize_exit(ifp->if_serializer);
1086
1087 ether_ifdetach(ifp);
1088 }
1089
1090 if (sc->vge_miibus)
1091 device_delete_child(dev, sc->vge_miibus);
1092 bus_generic_detach(dev);
1093
1094 if (sc->vge_irq) {
1095 bus_release_resource(dev, SYS_RES_IRQ, sc->vge_irq_rid,
1096 sc->vge_irq);
1097 }
1098
1099 if (sc->vge_res) {
1100 bus_release_resource(dev, SYS_RES_MEMORY, sc->vge_res_rid,
1101 sc->vge_res);
1102 }
1103
1104 vge_dma_free(sc);
1105 return (0);
1106}
1107
1108static int
1109vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1110{
1111 struct vge_dmaload_arg arg;
1112 struct mbuf *n = NULL;
1113 int i, error;
1114
1115 if (m == NULL) {
1116 n = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
1117 if (n == NULL)
1118 return (ENOBUFS);
1119 m = n;
1120 } else {
1121 m->m_data = m->m_ext.ext_buf;
1122 }
1123
1124
1125#ifdef VGE_FIXUP_RX
1126 /*
1127 * This is part of an evil trick to deal with non-x86 platforms.
1128 * The VIA chip requires RX buffers to be aligned on 32-bit
1129 * boundaries, but that will hose non-x86 machines. To get around
1130 * this, we leave some empty space at the start of each buffer
1131 * and for non-x86 hosts, we copy the buffer back two bytes
1132 * to achieve word alignment. This is slightly more efficient
1133 * than allocating a new buffer, copying the contents, and
1134 * discarding the old buffer.
1135 */
1136 m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN;
1137 m_adj(m, VGE_ETHER_ALIGN);
1138#else
1139 m->m_len = m->m_pkthdr.len = MCLBYTES;
1140#endif
1141
1142 arg.sc = sc;
1143 arg.vge_idx = idx;
1144 arg.vge_maxsegs = 1;
1145 arg.vge_flags = 0;
1146
1147 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag,
1148 sc->vge_ldata.vge_rx_dmamap[idx], m,
1149 vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT);
1150 if (error || arg.vge_maxsegs != 1) {
1151 if (n != NULL)
1152 m_freem(n);
1153 return (ENOMEM);
1154 }
1155
1156 /*
1157 * Note: the manual fails to document the fact that for
1158 * proper opration, the driver needs to replentish the RX
1159 * DMA ring 4 descriptors at a time (rather than one at a
1160 * time, like most chips). We can allocate the new buffers
1161 * but we should not set the OWN bits until we're ready
1162 * to hand back 4 of them in one shot.
1163 */
1164
1165#define VGE_RXCHUNK 4
1166 sc->vge_rx_consumed++;
1167 if (sc->vge_rx_consumed == VGE_RXCHUNK) {
1168 for (i = idx; i != idx - sc->vge_rx_consumed; i--) {
1169 sc->vge_ldata.vge_rx_list[i].vge_sts |=
1170 htole32(VGE_RDSTS_OWN);
1171 }
1172 sc->vge_rx_consumed = 0;
1173 }
1174
1175 sc->vge_ldata.vge_rx_mbuf[idx] = m;
1176
1177 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1178 sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD);
1179
1180 return (0);
1181}
1182
1183static int
7b9f668c 1184vge_tx_list_init(struct vge_softc *sc)
13bca4c6
SZ
1185{
1186 bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ);
1187 bzero ((char *)&sc->vge_ldata.vge_tx_mbuf,
1188 (VGE_TX_DESC_CNT * sizeof(struct mbuf *)));
1189
1190 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1191 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE);
1192 sc->vge_ldata.vge_tx_prodidx = 0;
1193 sc->vge_ldata.vge_tx_considx = 0;
1194 sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT;
1195
1196 return (0);
1197}
1198
1199static int
1200vge_rx_list_init(struct vge_softc *sc)
1201{
1202 int i;
1203
1204 bzero(sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ);
1205 bzero(&sc->vge_ldata.vge_rx_mbuf,
1206 VGE_RX_DESC_CNT * sizeof(struct mbuf *));
1207
1208 sc->vge_rx_consumed = 0;
1209
1210 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
1211 if (vge_newbuf(sc, i, NULL) == ENOBUFS)
1212 return (ENOBUFS);
1213 }
1214
1215 /* Flush the RX descriptors */
1216 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1217 sc->vge_ldata.vge_rx_list_map,
1218 BUS_DMASYNC_PREWRITE);
1219
1220 sc->vge_ldata.vge_rx_prodidx = 0;
1221 sc->vge_rx_consumed = 0;
1222 sc->vge_head = sc->vge_tail = NULL;
1223 return (0);
1224}
1225
1226#ifdef VGE_FIXUP_RX
1227static __inline void
1228vge_fixup_rx(struct mbuf *m)
1229{
1230 uint16_t *src, *dst;
1231 int i;
1232
1233 src = mtod(m, uint16_t *);
1234 dst = src - 1;
1235
1236 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1237 *dst++ = *src++;
1238
1239 m->m_data -= ETHER_ALIGN;
1240}
1241#endif
1242
1243/*
1244 * RX handler. We support the reception of jumbo frames that have
1245 * been fragmented across multiple 2K mbuf cluster buffers.
1246 */
1247static void
1248vge_rxeof(struct vge_softc *sc, int count)
1249{
1250 struct ifnet *ifp = &sc->arpcom.ac_if;
1251 struct mbuf *m;
1252 int i, total_len, lim = 0;
1253 struct vge_rx_desc *cur_rx;
1254 uint32_t rxstat, rxctl;
1255
1256 ASSERT_SERIALIZED(ifp->if_serializer);
1257
1258 i = sc->vge_ldata.vge_rx_prodidx;
1259
1260 /* Invalidate the descriptor memory */
1261
1262 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1263 sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD);
1264
1265 while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) {
4e5366b5 1266#ifdef IFPOLL_ENABLE
13bca4c6
SZ
1267 if (count >= 0 && count-- == 0)
1268 break;
1269#endif
1270
1271 cur_rx = &sc->vge_ldata.vge_rx_list[i];
1272 m = sc->vge_ldata.vge_rx_mbuf[i];
1273 total_len = VGE_RXBYTES(cur_rx);
1274 rxstat = le32toh(cur_rx->vge_sts);
1275 rxctl = le32toh(cur_rx->vge_ctl);
1276
1277 /* Invalidate the RX mbuf and unload its map */
1278 bus_dmamap_sync(sc->vge_ldata.vge_mtag,
1279 sc->vge_ldata.vge_rx_dmamap[i],
1280 BUS_DMASYNC_POSTWRITE);
1281 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1282 sc->vge_ldata.vge_rx_dmamap[i]);
1283
1284 /*
1285 * If the 'start of frame' bit is set, this indicates
1286 * either the first fragment in a multi-fragment receive,
1287 * or an intermediate fragment. Either way, we want to
1288 * accumulate the buffers.
1289 */
1290 if (rxstat & VGE_RXPKT_SOF) {
1291 m->m_len = MCLBYTES - VGE_ETHER_ALIGN;
1292 if (sc->vge_head == NULL) {
1293 sc->vge_head = sc->vge_tail = m;
1294 } else {
1295 m->m_flags &= ~M_PKTHDR;
1296 sc->vge_tail->m_next = m;
1297 sc->vge_tail = m;
1298 }
1299 vge_newbuf(sc, i, NULL);
1300 VGE_RX_DESC_INC(i);
1301 continue;
1302 }
1303
1304 /*
1305 * Bad/error frames will have the RXOK bit cleared.
1306 * However, there's one error case we want to allow:
1307 * if a VLAN tagged frame arrives and the chip can't
1308 * match it against the CAM filter, it considers this
1309 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1310 * We don't want to drop the frame though: our VLAN
1311 * filtering is done in software.
1312 */
1313 if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) &&
1314 !(rxstat & VGE_RDSTS_CSUMERR)) {
1315 ifp->if_ierrors++;
1316 /*
1317 * If this is part of a multi-fragment packet,
1318 * discard all the pieces.
1319 */
1320 if (sc->vge_head != NULL) {
1321 m_freem(sc->vge_head);
1322 sc->vge_head = sc->vge_tail = NULL;
1323 }
1324 vge_newbuf(sc, i, m);
1325 VGE_RX_DESC_INC(i);
1326 continue;
1327 }
1328
1329 /*
1330 * If allocating a replacement mbuf fails,
1331 * reload the current one.
1332 */
1333 if (vge_newbuf(sc, i, NULL)) {
1334 ifp->if_ierrors++;
1335 if (sc->vge_head != NULL) {
1336 m_freem(sc->vge_head);
1337 sc->vge_head = sc->vge_tail = NULL;
1338 }
1339 vge_newbuf(sc, i, m);
1340 VGE_RX_DESC_INC(i);
1341 continue;
1342 }
1343
1344 VGE_RX_DESC_INC(i);
1345
1346 if (sc->vge_head != NULL) {
1347 m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN);
1348 /*
1349 * Special case: if there's 4 bytes or less
1350 * in this buffer, the mbuf can be discarded:
1351 * the last 4 bytes is the CRC, which we don't
1352 * care about anyway.
1353 */
1354 if (m->m_len <= ETHER_CRC_LEN) {
1355 sc->vge_tail->m_len -=
1356 (ETHER_CRC_LEN - m->m_len);
1357 m_freem(m);
1358 } else {
1359 m->m_len -= ETHER_CRC_LEN;
1360 m->m_flags &= ~M_PKTHDR;
1361 sc->vge_tail->m_next = m;
1362 }
1363 m = sc->vge_head;
1364 sc->vge_head = sc->vge_tail = NULL;
1365 m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1366 } else {
1367 m->m_pkthdr.len = m->m_len =
1368 (total_len - ETHER_CRC_LEN);
1369 }
1370
1371#ifdef VGE_FIXUP_RX
1372 vge_fixup_rx(m);
1373#endif
1374 ifp->if_ipackets++;
1375 m->m_pkthdr.rcvif = ifp;
1376
1377 /* Do RX checksumming if enabled */
1378 if (ifp->if_capenable & IFCAP_RXCSUM) {
1379 /* Check IP header checksum */
1380 if (rxctl & VGE_RDCTL_IPPKT)
1381 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1382 if (rxctl & VGE_RDCTL_IPCSUMOK)
1383 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1384
1385 /* Check TCP/UDP checksum */
1386 if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) &&
1387 rxctl & VGE_RDCTL_PROTOCSUMOK) {
1388 m->m_pkthdr.csum_flags |=
fbb35ef0
SZ
1389 CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
1390 CSUM_FRAG_NOT_CHECKED;
13bca4c6
SZ
1391 m->m_pkthdr.csum_data = 0xffff;
1392 }
1393 }
1394
e6b5847c
SZ
1395 if (rxstat & VGE_RDSTS_VTAG) {
1396 m->m_flags |= M_VLANTAG;
1397 m->m_pkthdr.ether_vlantag =
1398 ntohs((rxctl & VGE_RDCTL_VLANID));
1399 }
1400 ifp->if_input(ifp, m);
13bca4c6
SZ
1401
1402 lim++;
1403 if (lim == VGE_RX_DESC_CNT)
1404 break;
1405 }
1406
1407 /* Flush the RX DMA ring */
1408 bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag,
1409 sc->vge_ldata.vge_rx_list_map,
1410 BUS_DMASYNC_PREWRITE);
1411
1412 sc->vge_ldata.vge_rx_prodidx = i;
1413 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1414}
1415
1416static void
1417vge_txeof(struct vge_softc *sc)
1418{
1419 struct ifnet *ifp = &sc->arpcom.ac_if;
1420 uint32_t txstat;
1421 int idx;
1422
1423 idx = sc->vge_ldata.vge_tx_considx;
1424
1425 /* Invalidate the TX descriptor list */
1426
1427 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1428 sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD);
1429
1430 while (idx != sc->vge_ldata.vge_tx_prodidx) {
1431
1432 txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts);
1433 if (txstat & VGE_TDSTS_OWN)
1434 break;
1435
1436 m_freem(sc->vge_ldata.vge_tx_mbuf[idx]);
1437 sc->vge_ldata.vge_tx_mbuf[idx] = NULL;
1438 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
1439 sc->vge_ldata.vge_tx_dmamap[idx]);
1440 if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1441 ifp->if_collisions++;
1442 if (txstat & VGE_TDSTS_TXERR)
1443 ifp->if_oerrors++;
1444 else
1445 ifp->if_opackets++;
1446
1447 sc->vge_ldata.vge_tx_free++;
1448 VGE_TX_DESC_INC(idx);
1449 }
1450
1451 /* No changes made to the TX ring, so no flush needed */
1452 if (idx != sc->vge_ldata.vge_tx_considx) {
1453 sc->vge_ldata.vge_tx_considx = idx;
9ed293e0 1454 ifq_clr_oactive(&ifp->if_snd);
13bca4c6
SZ
1455 ifp->if_timer = 0;
1456 }
1457
1458 /*
1459 * If not all descriptors have been released reaped yet,
1460 * reload the timer so that we will eventually get another
1461 * interrupt that will cause us to re-enter this routine.
1462 * This is done in case the transmitter has gone idle.
1463 */
1464 if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT)
1465 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1466}
1467
1468static void
1469vge_tick(struct vge_softc *sc)
1470{
1471 struct ifnet *ifp = &sc->arpcom.ac_if;
1472 struct mii_data *mii;
1473
1474 mii = device_get_softc(sc->vge_miibus);
1475
1476 mii_tick(mii);
1477 if (sc->vge_link) {
f3e7c70d 1478 if (!(mii->mii_media_status & IFM_ACTIVE))
13bca4c6 1479 sc->vge_link = 0;
13bca4c6
SZ
1480 } else {
1481 if (mii->mii_media_status & IFM_ACTIVE &&
1482 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1483 sc->vge_link = 1;
13bca4c6 1484 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1485 if_devstart(ifp);
13bca4c6
SZ
1486 }
1487 }
1488}
1489
4e5366b5
SZ
1490#ifdef IFPOLL_ENABLE
1491
13bca4c6 1492static void
4e5366b5 1493vge_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
13bca4c6
SZ
1494{
1495 struct vge_softc *sc = ifp->if_softc;
1496
4e5366b5 1497 ASSERT_SERIALIZED(ifp->if_serializer);
13bca4c6 1498
4e5366b5
SZ
1499 vge_rxeof(sc, count);
1500 vge_txeof(sc);
1501
1502 if (!ifq_is_empty(&ifp->if_snd))
1503 if_devstart(ifp);
13bca4c6 1504
4e5366b5
SZ
1505 /* XXX copy & paste from vge_intr */
1506 if (sc->vge_npoll.ifpc_stcount-- == 0) {
1507 uint32_t status;
13bca4c6 1508
4e5366b5 1509 sc->vge_npoll.ifpc_stcount = sc->vge_npoll.ifpc_stfrac;
13bca4c6 1510
4e5366b5
SZ
1511 status = CSR_READ_4(sc, VGE_ISR);
1512 if (status == 0xffffffff)
1513 return;
13bca4c6 1514
4e5366b5
SZ
1515 if (status)
1516 CSR_WRITE_4(sc, VGE_ISR, status);
13bca4c6 1517
4e5366b5
SZ
1518 if (status & (VGE_ISR_TXDMA_STALL |
1519 VGE_ISR_RXDMA_STALL))
1520 vge_init(sc);
13bca4c6 1521
4e5366b5
SZ
1522 if (status & (VGE_ISR_RXOFLOW | VGE_ISR_RXNODESC)) {
1523 ifp->if_ierrors++;
1524 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1525 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
13bca4c6 1526 }
13bca4c6 1527 }
4e5366b5
SZ
1528}
1529
1530static void
1531vge_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1532{
1533 struct vge_softc *sc = ifp->if_softc;
1534
1535 ASSERT_SERIALIZED(ifp->if_serializer);
1536
1537 if (info != NULL) {
1538 int cpuid = sc->vge_npoll.ifpc_cpuid;
1539
1540 info->ifpi_rx[cpuid].poll_func = vge_npoll_compat;
1541 info->ifpi_rx[cpuid].arg = NULL;
1542 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
13bca4c6 1543
4e5366b5
SZ
1544 if (ifp->if_flags & IFF_RUNNING)
1545 vge_disable_intr(sc);
1546 ifp->if_npoll_cpuid = cpuid;
1547 } else {
1548 if (ifp->if_flags & IFF_RUNNING)
1549 vge_enable_intr(sc, 0xffffffff);
1550 ifp->if_npoll_cpuid = -1;
1551 }
13bca4c6 1552}
4e5366b5
SZ
1553
1554#endif /* IFPOLL_ENABLE */
13bca4c6
SZ
1555
1556static void
1557vge_intr(void *arg)
1558{
1559 struct vge_softc *sc = arg;
1560 struct ifnet *ifp = &sc->arpcom.ac_if;
1561 uint32_t status;
1562
1563 if (sc->suspended || !(ifp->if_flags & IFF_UP))
1564 return;
1565
1566 /* Disable interrupts */
1567 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1568
1569 for (;;) {
1570 status = CSR_READ_4(sc, VGE_ISR);
1571 /* If the card has gone away the read returns 0xffff. */
1572 if (status == 0xFFFFFFFF)
1573 break;
1574
1575 if (status)
1576 CSR_WRITE_4(sc, VGE_ISR, status);
1577
1578 if ((status & VGE_INTRS) == 0)
1579 break;
1580
1581 if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1582 vge_rxeof(sc, -1);
1583
1584 if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1585 vge_rxeof(sc, -1);
1586 ifp->if_ierrors++;
1587 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1588 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1589 }
1590
1591 if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1592 vge_txeof(sc);
1593
1594 if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1595 vge_init(sc);
1596
1597 if (status & VGE_ISR_LINKSTS)
1598 vge_tick(sc);
1599 }
1600
1601 /* Re-enable interrupts */
1602 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1603
1604 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1605 if_devstart(ifp);
13bca4c6
SZ
1606}
1607
1608static int
1609vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1610{
1611 struct vge_dmaload_arg arg;
1612 bus_dmamap_t map;
1613 int error;
1614
1615 arg.vge_flags = 0;
1616
1617 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1618 arg.vge_flags |= VGE_TDCTL_IPCSUM;
1619 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1620 arg.vge_flags |= VGE_TDCTL_TCPCSUM;
1621 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
1622 arg.vge_flags |= VGE_TDCTL_UDPCSUM;
1623
1624 arg.sc = sc;
1625 arg.vge_idx = idx;
1626 arg.vge_m0 = m_head;
1627 arg.vge_maxsegs = VGE_TX_FRAGS;
1628
1629 map = sc->vge_ldata.vge_tx_dmamap[idx];
1630 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head,
1631 vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT);
1632 if (error && error != EFBIG) {
1633 if_printf(&sc->arpcom.ac_if, "can't map mbuf (error %d)\n",
1634 error);
1635 goto fail;
1636 }
1637
1638 /* Too many segments to map, coalesce into a single mbuf */
1639 if (error || arg.vge_maxsegs == 0) {
1640 struct mbuf *m_new;
1641
1642 m_new = m_defrag(m_head, MB_DONTWAIT);
1643 if (m_new == NULL) {
1644 error = ENOBUFS;
1645 goto fail;
1646 } else {
1647 m_head = m_new;
1648 }
1649
1650 arg.sc = sc;
1651 arg.vge_m0 = m_head;
1652 arg.vge_idx = idx;
1653 arg.vge_maxsegs = 1;
1654
1655 error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map,
1656 m_head, vge_dma_map_tx_desc, &arg,
1657 BUS_DMA_NOWAIT);
1658 if (error) {
1659 if_printf(&sc->arpcom.ac_if,
1660 "can't map mbuf (error %d)\n", error);
1661 goto fail;
1662 }
1663 }
1664
1665 sc->vge_ldata.vge_tx_mbuf[idx] = m_head;
1666 sc->vge_ldata.vge_tx_free--;
1667
1668 /*
1669 * Set up hardware VLAN tagging.
1670 */
83790f85
SZ
1671 if (m_head->m_flags & M_VLANTAG) {
1672 sc->vge_ldata.vge_tx_list[idx].vge_ctl |=
1673 htole32(htons(m_head->m_pkthdr.ether_vlantag) |
1674 VGE_TDCTL_VTAG);
13bca4c6
SZ
1675 }
1676
1677 sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN);
1678 return (0);
1679
1680fail:
1681 m_freem(m_head);
1682 return error;
1683}
1684
1685/*
1686 * Main transmit routine.
1687 */
1688
1689static void
1690vge_start(struct ifnet *ifp)
1691{
1692 struct vge_softc *sc = ifp->if_softc;
1693 struct mbuf *m_head = NULL;
1694 int idx, pidx = 0;
1695
1696 ASSERT_SERIALIZED(ifp->if_serializer);
1697
9db4b353
SZ
1698 if (!sc->vge_link) {
1699 ifq_purge(&ifp->if_snd);
13bca4c6 1700 return;
9db4b353 1701 }
13bca4c6 1702
9ed293e0 1703 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
13bca4c6
SZ
1704 return;
1705
1706 idx = sc->vge_ldata.vge_tx_prodidx;
1707
1708 pidx = idx - 1;
1709 if (pidx < 0)
1710 pidx = VGE_TX_DESC_CNT - 1;
1711
1712 while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) {
13bca4c6 1713 if (sc->vge_ldata.vge_tx_free <= 2) {
9ed293e0 1714 ifq_set_oactive(&ifp->if_snd);
13bca4c6
SZ
1715 break;
1716 }
1717
9db4b353
SZ
1718 m_head = ifq_dequeue(&ifp->if_snd, NULL);
1719 if (m_head == NULL)
1720 break;
13bca4c6
SZ
1721
1722 if (vge_encap(sc, m_head, idx)) {
1723 /* If vge_encap() failed, it will free m_head for us */
9ed293e0 1724 ifq_set_oactive(&ifp->if_snd);
13bca4c6
SZ
1725 break;
1726 }
1727
1728 sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |=
1729 htole16(VGE_TXDESC_Q);
1730
1731 pidx = idx;
1732 VGE_TX_DESC_INC(idx);
1733
1734 /*
1735 * If there's a BPF listener, bounce a copy of this frame
1736 * to him.
1737 */
b637f170 1738 ETHER_BPF_MTAP(ifp, m_head);
13bca4c6
SZ
1739 }
1740
1741 if (idx == sc->vge_ldata.vge_tx_prodidx)
1742 return;
1743
1744 /* Flush the TX descriptors */
1745 bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag,
1746 sc->vge_ldata.vge_tx_list_map,
1747 BUS_DMASYNC_PREWRITE);
1748
1749 /* Issue a transmit command. */
1750 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1751
1752 sc->vge_ldata.vge_tx_prodidx = idx;
1753
1754 /*
1755 * Use the countdown timer for interrupt moderation.
1756 * 'TX done' interrupts are disabled. Instead, we reset the
1757 * countdown timer, which will begin counting until it hits
1758 * the value in the SSTIMER register, and then trigger an
1759 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1760 * the timer count is reloaded. Only when the transmitter
1761 * is idle will the timer hit 0 and an interrupt fire.
1762 */
1763 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1764
1765 /*
1766 * Set a timeout in case the chip goes out to lunch.
1767 */
1768 ifp->if_timer = 5;
1769}
1770
1771static void
1772vge_init(void *xsc)
1773{
1774 struct vge_softc *sc = xsc;
1775 struct ifnet *ifp = &sc->arpcom.ac_if;
1776 struct mii_data *mii;
1777 int i;
1778
1779 ASSERT_SERIALIZED(ifp->if_serializer);
1780
1781 mii = device_get_softc(sc->vge_miibus);
1782
1783 /*
1784 * Cancel pending I/O and free all RX/TX buffers.
1785 */
1786 vge_stop(sc);
1787 vge_reset(sc);
1788
1789 /*
1790 * Initialize the RX and TX descriptors and mbufs.
1791 */
1792 vge_rx_list_init(sc);
1793 vge_tx_list_init(sc);
1794
1795 /* Set our station address */
1796 for (i = 0; i < ETHER_ADDR_LEN; i++)
1797 CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(ifp)[i]);
1798
1799 /*
1800 * Set receive FIFO threshold. Also allow transmission and
1801 * reception of VLAN tagged frames.
1802 */
1803 CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1804 CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1805
1806 /* Set DMA burst length */
1807 CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1808 CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1809
1810 CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1811
1812 /* Set collision backoff algorithm */
1813 CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1814 VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1815 CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1816
1817 /* Disable LPSEL field in priority resolution */
1818 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1819
1820 /*
1821 * Load the addresses of the DMA queues into the chip.
1822 * Note that we only use one transmit queue.
1823 */
1824 CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0,
1825 VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr));
1826 CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1);
1827
1828 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO,
1829 VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr));
1830 CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1);
1831 CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT);
1832
1833 /* Enable and wake up the RX descriptor queue */
1834 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1835 CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1836
1837 /* Enable the TX descriptor queue */
1838 CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1839
1840 /* Set up the receive filter -- allow large frames for VLANs. */
1841 CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1842
1843 /* If we want promiscuous mode, set the allframes bit. */
1844 if (ifp->if_flags & IFF_PROMISC)
1845 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1846
1847 /* Set capture broadcast bit to capture broadcast frames. */
1848 if (ifp->if_flags & IFF_BROADCAST)
1849 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1850
1851 /* Set multicast bit to capture multicast frames. */
1852 if (ifp->if_flags & IFF_MULTICAST)
1853 CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1854
1855 /* Init the cam filter. */
1856 vge_cam_clear(sc);
1857
1858 /* Init the multicast filter. */
1859 vge_setmulti(sc);
1860
1861 /* Enable flow control */
1862
1863 CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1864
1865 /* Enable jumbo frame reception (if desired) */
1866
1867 /* Start the MAC. */
1868 CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1869 CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1870 CSR_WRITE_1(sc, VGE_CRS0,
1871 VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1872
1873 /*
1874 * Configure one-shot timer for microsecond
1875 * resulution and load it for 500 usecs.
1876 */
1877 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1878 CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1879
1880 /*
1881 * Configure interrupt moderation for receive. Enable
1882 * the holdoff counter and load it, and set the RX
1883 * suppression count to the number of descriptors we
1884 * want to allow before triggering an interrupt.
1885 * The holdoff timer is in units of 20 usecs.
1886 */
1887
1888#ifdef notyet
1889 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1890 /* Select the interrupt holdoff timer page. */
1891 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1892 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1893 CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1894
1895 /* Enable use of the holdoff timer. */
1896 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1897 CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1898
1899 /* Select the RX suppression threshold page. */
1900 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1901 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1902 CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1903
1904 /* Restore the page select bits. */
1905 CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1906 CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1907#endif
1908
4e5366b5 1909#ifdef IFPOLL_ENABLE
13bca4c6 1910 /* Disable intr if polling(4) is enabled */
4e5366b5 1911 if (ifp->if_flags & IFF_NPOLLING)
13bca4c6
SZ
1912 vge_disable_intr(sc);
1913 else
1914#endif
1915 vge_enable_intr(sc, 0);
1916
1917 mii_mediachg(mii);
1918
1919 ifp->if_flags |= IFF_RUNNING;
9ed293e0 1920 ifq_clr_oactive(&ifp->if_snd);
13bca4c6
SZ
1921
1922 sc->vge_if_flags = 0;
1923 sc->vge_link = 0;
1924}
1925
1926/*
1927 * Set media options.
1928 */
1929static int
1930vge_ifmedia_upd(struct ifnet *ifp)
1931{
1932 struct vge_softc *sc = ifp->if_softc;
1933 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1934
1935 mii_mediachg(mii);
1936
1937 return (0);
1938}
1939
1940/*
1941 * Report current media status.
1942 */
1943static void
1944vge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1945{
1946 struct vge_softc *sc = ifp->if_softc;
1947 struct mii_data *mii = device_get_softc(sc->vge_miibus);
1948
1949 mii_pollstat(mii);
1950 ifmr->ifm_active = mii->mii_media_active;
1951 ifmr->ifm_status = mii->mii_media_status;
1952}
1953
1954static void
1955vge_miibus_statchg(device_t dev)
1956{
1957 struct vge_softc *sc;
1958 struct mii_data *mii;
1959 struct ifmedia_entry *ife;
1960
1961 sc = device_get_softc(dev);
1962 mii = device_get_softc(sc->vge_miibus);
1963 ife = mii->mii_media.ifm_cur;
1964
1965 /*
1966 * If the user manually selects a media mode, we need to turn
1967 * on the forced MAC mode bit in the DIAGCTL register. If the
1968 * user happens to choose a full duplex mode, we also need to
1969 * set the 'force full duplex' bit. This applies only to
1970 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1971 * mode is disabled, and in 1000baseT mode, full duplex is
1972 * always implied, so we turn on the forced mode bit but leave
1973 * the FDX bit cleared.
1974 */
1975
1976 switch (IFM_SUBTYPE(ife->ifm_media)) {
1977 case IFM_AUTO:
1978 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1979 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1980 break;
1981 case IFM_1000_T:
1982 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1983 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1984 break;
1985 case IFM_100_TX:
1986 case IFM_10_T:
1987 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1988 if ((ife->ifm_media & IFM_GMASK) == IFM_FDX)
1989 CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1990 else
1991 CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1992 break;
1993 default:
1994 device_printf(dev, "unknown media type: %x\n",
1995 IFM_SUBTYPE(ife->ifm_media));
1996 break;
1997 }
1998}
1999
2000static int
2001vge_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
2002{
2003 struct vge_softc *sc = ifp->if_softc;
2004 struct ifreq *ifr = (struct ifreq *)data;
2005 struct mii_data *mii;
2006 int error = 0;
2007
2008 switch (command) {
2009 case SIOCSIFMTU:
2010 if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2011 error = EINVAL;
2012 ifp->if_mtu = ifr->ifr_mtu;
2013 break;
2014 case SIOCSIFFLAGS:
2015 if (ifp->if_flags & IFF_UP) {
2016 if ((ifp->if_flags & IFF_RUNNING) &&
2017 (ifp->if_flags & IFF_PROMISC) &&
2018 !(sc->vge_if_flags & IFF_PROMISC)) {
2019 CSR_SETBIT_1(sc, VGE_RXCTL,
2020 VGE_RXCTL_RX_PROMISC);
2021 vge_setmulti(sc);
2022 } else if ((ifp->if_flags & IFF_RUNNING) &&
2023 !(ifp->if_flags & IFF_PROMISC) &&
2024 (sc->vge_if_flags & IFF_PROMISC)) {
2025 CSR_CLRBIT_1(sc, VGE_RXCTL,
2026 VGE_RXCTL_RX_PROMISC);
2027 vge_setmulti(sc);
2028 } else {
2029 vge_init(sc);
2030 }
2031 } else {
2032 if (ifp->if_flags & IFF_RUNNING)
2033 vge_stop(sc);
2034 }
2035 sc->vge_if_flags = ifp->if_flags;
2036 break;
2037 case SIOCADDMULTI:
2038 case SIOCDELMULTI:
2039 vge_setmulti(sc);
2040 break;
2041 case SIOCGIFMEDIA:
2042 case SIOCSIFMEDIA:
2043 mii = device_get_softc(sc->vge_miibus);
2044 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
2045 break;
2046 case SIOCSIFCAP:
2047 {
2048 uint32_t mask = ifr->ifr_reqcap ^ ifp->if_capenable;
2049
2050 if (mask & IFCAP_HWCSUM) {
2051 ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM);
2052 if (ifp->if_capenable & IFCAP_TXCSUM)
2053 ifp->if_hwassist = VGE_CSUM_FEATURES;
2054 else
2055 ifp->if_hwassist = 0;
2056 if (ifp->if_flags & IFF_RUNNING)
2057 vge_init(sc);
2058 }
2059 }
2060 break;
2061 default:
2062 error = ether_ioctl(ifp, command, data);
2063 break;
2064 }
2065 return (error);
2066}
2067
2068static void
2069vge_watchdog(struct ifnet *ifp)
2070{
2071 struct vge_softc *sc = ifp->if_softc;
2072
2073 if_printf(ifp, "watchdog timeout\n");
2074 ifp->if_oerrors++;
2075
2076 vge_txeof(sc);
2077 vge_rxeof(sc, -1);
2078
2079 vge_init(sc);
2080}
2081
2082/*
2083 * Stop the adapter and free any mbufs allocated to the
2084 * RX and TX lists.
2085 */
2086static void
2087vge_stop(struct vge_softc *sc)
2088{
2089 struct ifnet *ifp = &sc->arpcom.ac_if;
2090 int i;
2091
2092 ASSERT_SERIALIZED(ifp->if_serializer);
2093
2094 ifp->if_timer = 0;
2095
9ed293e0
SZ
2096 ifp->if_flags &= ~IFF_RUNNING;
2097 ifq_clr_oactive(&ifp->if_snd);
13bca4c6
SZ
2098
2099 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2100 CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2101 CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2102 CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2103 CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2104 CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2105
2106 if (sc->vge_head != NULL) {
2107 m_freem(sc->vge_head);
2108 sc->vge_head = sc->vge_tail = NULL;
2109 }
2110
2111 /* Free the TX list buffers. */
2112 for (i = 0; i < VGE_TX_DESC_CNT; i++) {
2113 if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) {
2114 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2115 sc->vge_ldata.vge_tx_dmamap[i]);
2116 m_freem(sc->vge_ldata.vge_tx_mbuf[i]);
2117 sc->vge_ldata.vge_tx_mbuf[i] = NULL;
2118 }
2119 }
2120
2121 /* Free the RX list buffers. */
2122 for (i = 0; i < VGE_RX_DESC_CNT; i++) {
2123 if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) {
2124 bus_dmamap_unload(sc->vge_ldata.vge_mtag,
2125 sc->vge_ldata.vge_rx_dmamap[i]);
2126 m_freem(sc->vge_ldata.vge_rx_mbuf[i]);
2127 sc->vge_ldata.vge_rx_mbuf[i] = NULL;
2128 }
2129 }
2130}
2131
2132/*
2133 * Device suspend routine. Stop the interface and save some PCI
2134 * settings in case the BIOS doesn't restore them properly on
2135 * resume.
2136 */
2137static int
2138vge_suspend(device_t dev)
2139{
2140 struct vge_softc *sc = device_get_softc(dev);
2141 struct ifnet *ifp = &sc->arpcom.ac_if;
2142
2143 lwkt_serialize_enter(ifp->if_serializer);
2144 vge_stop(sc);
2145 sc->suspended = 1;
2146 lwkt_serialize_exit(ifp->if_serializer);
2147
2148 return (0);
2149}
2150
2151/*
2152 * Device resume routine. Restore some PCI settings in case the BIOS
2153 * doesn't, re-enable busmastering, and restart the interface if
2154 * appropriate.
2155 */
2156static int
2157vge_resume(device_t dev)
2158{
2159 struct vge_softc *sc = device_get_softc(dev);
2160 struct ifnet *ifp = &sc->arpcom.ac_if;
2161
2162 /* reenable busmastering */
2163 pci_enable_busmaster(dev);
2164 pci_enable_io(dev, SYS_RES_MEMORY);
2165
2166 lwkt_serialize_enter(ifp->if_serializer);
2167 /* reinitialize interface if necessary */
2168 if (ifp->if_flags & IFF_UP)
2169 vge_init(sc);
2170
2171 sc->suspended = 0;
2172 lwkt_serialize_exit(ifp->if_serializer);
2173
2174 return (0);
2175}
2176
2177/*
2178 * Stop all chip I/O so that the kernel's probe routines don't
2179 * get confused by errant DMAs when rebooting.
2180 */
2181static void
2182vge_shutdown(device_t dev)
2183{
2184 struct vge_softc *sc = device_get_softc(dev);
2185 struct ifnet *ifp = &sc->arpcom.ac_if;
2186
2187 lwkt_serialize_enter(ifp->if_serializer);
2188 vge_stop(sc);
2189 lwkt_serialize_exit(ifp->if_serializer);
2190}
2191
2192static void
2193vge_enable_intr(struct vge_softc *sc, uint32_t isr)
2194{
2195 CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
2196 CSR_WRITE_4(sc, VGE_ISR, isr);
2197 CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
2198}
2199
4e5366b5
SZ
2200#ifdef IFPOLL_ENABLE
2201
13bca4c6
SZ
2202static void
2203vge_disable_intr(struct vge_softc *sc)
2204{
2205 CSR_WRITE_4(sc, VGE_IMR, 0);
2206 CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
4e5366b5 2207 sc->vge_npoll.ifpc_stcount = 0;
13bca4c6 2208}
4e5366b5
SZ
2209
2210#endif /* IFPOLL_ENABLE */