Allow nfe and xl to compile without DEVICE_POLLING.
[dragonfly.git] / sys / dev / netif / xl / if_xl.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1997, 1998, 1999
3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
a3a1d2d2 32 * $FreeBSD: src/sys/pci/if_xl.c,v 1.72.2.28 2003/10/08 06:01:57 murray Exp $
a75a1559 33 * $DragonFly: src/sys/dev/netif/xl/if_xl.c,v 1.56 2008/09/17 08:51:29 sephe Exp $
984263bc
MD
34 */
35
36/*
37 * 3Com 3c90x Etherlink XL PCI NIC driver
38 *
39 * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI
40 * bus-master chips (3c90x cards and embedded controllers) including
41 * the following:
42 *
43 * 3Com 3c900-TPO 10Mbps/RJ-45
44 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC
45 * 3Com 3c905-TX 10/100Mbps/RJ-45
46 * 3Com 3c905-T4 10/100Mbps/RJ-45
47 * 3Com 3c900B-TPO 10Mbps/RJ-45
48 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC
49 * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC
50 * 3Com 3c900B-FL 10Mbps/Fiber-optic
51 * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC
52 * 3Com 3c905B-TX 10/100Mbps/RJ-45
53 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic
54 * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC)
55 * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC)
56 * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC)
57 * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC)
58 * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC)
59 * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane)
60 * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
61 * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC)
62 * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
63 * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
64 * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
65 * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
66 * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC)
67 * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC)
68 * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45
69 * Dell on-board 3c920 10/100Mbps/RJ-45
70 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45
71 * Dell Latitude laptop docking station embedded 3c905-TX
72 *
73 * Written by Bill Paul <wpaul@ctr.columbia.edu>
74 * Electrical Engineering Department
75 * Columbia University, New York City
76 */
77
78/*
79 * The 3c90x series chips use a bus-master DMA interface for transfering
80 * packets to and from the controller chip. Some of the "vortex" cards
81 * (3c59x) also supported a bus master mode, however for those chips
82 * you could only DMA packets to/from a contiguous memory buffer. For
83 * transmission this would mean copying the contents of the queued mbuf
a3a1d2d2 84 * chain into an mbuf cluster and then DMAing the cluster. This extra
984263bc
MD
85 * copy would sort of defeat the purpose of the bus master support for
86 * any packet that doesn't fit into a single mbuf.
87 *
88 * By contrast, the 3c90x cards support a fragment-based bus master
89 * mode where mbuf chains can be encapsulated using TX descriptors.
90 * This is similar to other PCI chips such as the Texas Instruments
91 * ThunderLAN and the Intel 82557/82558.
92 *
93 * The "vortex" driver (if_vx.c) happens to work for the "boomerang"
94 * bus master chips because they maintain the old PIO interface for
95 * backwards compatibility, but starting with the 3c905B and the
96 * "cyclone" chips, the compatibility interface has been dropped.
97 * Since using bus master DMA is a big win, we use this driver to
98 * support the PCI "boomerang" chips even though they work with the
99 * "vortex" driver in order to obtain better performance.
984263bc
MD
100 */
101
2b71c8f1
SZ
102#include "opt_polling.h"
103
984263bc
MD
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/sockio.h>
a3a1d2d2 107#include <sys/endian.h>
984263bc 108#include <sys/mbuf.h>
984263bc
MD
109#include <sys/kernel.h>
110#include <sys/socket.h>
78195a76 111#include <sys/serialize.h>
1f7ab7c9
MD
112#include <sys/bus.h>
113#include <sys/rman.h>
4ec4a72b 114#include <sys/thread2.h>
9db4b353 115#include <sys/interrupt.h>
984263bc
MD
116
117#include <net/if.h>
b4e1aa10 118#include <net/ifq_var.h>
984263bc
MD
119#include <net/if_arp.h>
120#include <net/ethernet.h>
121#include <net/if_dl.h>
122#include <net/if_media.h>
a3a1d2d2 123#include <net/vlan/if_vlan_var.h>
984263bc
MD
124
125#include <net/bpf.h>
126
1f2de5d4
MD
127#include "../mii_layer/mii.h"
128#include "../mii_layer/miivar.h"
984263bc 129
1f2de5d4
MD
130#include <bus/pci/pcireg.h>
131#include <bus/pci/pcivar.h>
984263bc 132
984263bc
MD
133/* "controller miibus0" required. See GENERIC if you get errors here. */
134#include "miibus_if.h"
135
1f2de5d4 136#include "if_xlreg.h"
984263bc 137
984263bc
MD
138#define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
139
140/*
141 * Various supported device vendors/types and their names.
142 */
143static struct xl_type xl_devs[] = {
144 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT,
145 "3Com 3c900-TPO Etherlink XL" },
146 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO,
147 "3Com 3c900-COMBO Etherlink XL" },
148 { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT,
149 "3Com 3c905-TX Fast Etherlink XL" },
150 { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4,
151 "3Com 3c905-T4 Fast Etherlink XL" },
152 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT,
153 "3Com 3c900B-TPO Etherlink XL" },
154 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO,
155 "3Com 3c900B-COMBO Etherlink XL" },
156 { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC,
157 "3Com 3c900B-TPC Etherlink XL" },
158 { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL,
159 "3Com 3c900B-FL Etherlink XL" },
160 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT,
161 "3Com 3c905B-TX Fast Etherlink XL" },
162 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4,
163 "3Com 3c905B-T4 Fast Etherlink XL" },
164 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX,
165 "3Com 3c905B-FX/SC Fast Etherlink XL" },
166 { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO,
167 "3Com 3c905B-COMBO Fast Etherlink XL" },
168 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT,
169 "3Com 3c905C-TX Fast Etherlink XL" },
170 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B,
171 "3Com 3c920B-EMB Integrated Fast Etherlink XL" },
172 { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV,
173 "3Com 3c980 Fast Etherlink XL" },
174 { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV,
175 "3Com 3c980C Fast Etherlink XL" },
176 { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX,
177 "3Com 3cSOHO100-TX OfficeConnect" },
178 { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT,
179 "3Com 3c450-TX HomeConnect" },
180 { TC_VENDORID, TC_DEVICEID_HURRICANE_555,
181 "3Com 3c555 Fast Etherlink XL" },
182 { TC_VENDORID, TC_DEVICEID_HURRICANE_556,
183 "3Com 3c556 Fast Etherlink XL" },
184 { TC_VENDORID, TC_DEVICEID_HURRICANE_556B,
185 "3Com 3c556B Fast Etherlink XL" },
186 { TC_VENDORID, TC_DEVICEID_HURRICANE_575A,
187 "3Com 3c575TX Fast Etherlink XL" },
188 { TC_VENDORID, TC_DEVICEID_HURRICANE_575B,
189 "3Com 3c575B Fast Etherlink XL" },
190 { TC_VENDORID, TC_DEVICEID_HURRICANE_575C,
191 "3Com 3c575C Fast Etherlink XL" },
192 { TC_VENDORID, TC_DEVICEID_HURRICANE_656,
193 "3Com 3c656 Fast Etherlink XL" },
194 { TC_VENDORID, TC_DEVICEID_HURRICANE_656B,
195 "3Com 3c656B Fast Etherlink XL" },
196 { TC_VENDORID, TC_DEVICEID_TORNADO_656C,
197 "3Com 3c656C Fast Etherlink XL" },
198 { 0, 0, NULL }
199};
200
201static int xl_probe (device_t);
202static int xl_attach (device_t);
203static int xl_detach (device_t);
56170638
SZ
204static void xl_shutdown (device_t);
205static int xl_suspend (device_t);
206static int xl_resume (device_t);
984263bc 207
e4345646
SZ
208static int xl_newbuf (struct xl_softc *, struct xl_chain_onefrag *,
209 int);
984263bc 210static void xl_stats_update (void *);
78195a76 211static void xl_stats_update_serialized(void *);
984263bc
MD
212static int xl_encap (struct xl_softc *, struct xl_chain *,
213 struct mbuf *);
a0e7467e 214static void xl_rxeof (struct xl_softc *, int);
984263bc
MD
215static int xl_rx_resync (struct xl_softc *);
216static void xl_txeof (struct xl_softc *);
217static void xl_txeof_90xB (struct xl_softc *);
218static void xl_txeoc (struct xl_softc *);
219static void xl_intr (void *);
a0e7467e 220static void xl_start_body (struct ifnet *, int);
984263bc
MD
221static void xl_start (struct ifnet *);
222static void xl_start_90xB (struct ifnet *);
bd4539cc
JH
223static int xl_ioctl (struct ifnet *, u_long, caddr_t,
224 struct ucred *);
984263bc
MD
225static void xl_init (void *);
226static void xl_stop (struct xl_softc *);
227static void xl_watchdog (struct ifnet *);
a0e7467e 228#ifdef DEVICE_POLLING
92a4a5bb 229static void xl_start_poll (struct ifnet *);
a0e7467e
SZ
230static void xl_poll (struct ifnet *, enum poll_cmd, int);
231#endif
232static void xl_enable_intrs (struct xl_softc *, uint16_t);
984263bc
MD
233
234static int xl_ifmedia_upd (struct ifnet *);
235static void xl_ifmedia_sts (struct ifnet *, struct ifmediareq *);
236
237static int xl_eeprom_wait (struct xl_softc *);
238static int xl_read_eeprom (struct xl_softc *, caddr_t, int, int, int);
239static void xl_mii_sync (struct xl_softc *);
240static void xl_mii_send (struct xl_softc *, u_int32_t, int);
241static int xl_mii_readreg (struct xl_softc *, struct xl_mii_frame *);
242static int xl_mii_writereg (struct xl_softc *, struct xl_mii_frame *);
243
244static void xl_setcfg (struct xl_softc *);
245static void xl_setmode (struct xl_softc *, int);
984263bc
MD
246static void xl_setmulti (struct xl_softc *);
247static void xl_setmulti_hash (struct xl_softc *);
248static void xl_reset (struct xl_softc *);
249static int xl_list_rx_init (struct xl_softc *);
5001d436
SZ
250static void xl_list_tx_init (struct xl_softc *);
251static void xl_list_tx_init_90xB(struct xl_softc *);
984263bc
MD
252static void xl_wait (struct xl_softc *);
253static void xl_mediacheck (struct xl_softc *);
254static void xl_choose_xcvr (struct xl_softc *, int);
5001d436
SZ
255
256static int xl_dma_alloc (device_t);
257static void xl_dma_free (device_t);
258
984263bc
MD
259#ifdef notdef
260static void xl_testpacket (struct xl_softc *);
261#endif
262
263static int xl_miibus_readreg (device_t, int, int);
264static int xl_miibus_writereg (device_t, int, int, int);
265static void xl_miibus_statchg (device_t);
266static void xl_miibus_mediainit (device_t);
267
984263bc
MD
268static device_method_t xl_methods[] = {
269 /* Device interface */
270 DEVMETHOD(device_probe, xl_probe),
271 DEVMETHOD(device_attach, xl_attach),
272 DEVMETHOD(device_detach, xl_detach),
273 DEVMETHOD(device_shutdown, xl_shutdown),
274 DEVMETHOD(device_suspend, xl_suspend),
275 DEVMETHOD(device_resume, xl_resume),
276
277 /* bus interface */
278 DEVMETHOD(bus_print_child, bus_generic_print_child),
279 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
280
281 /* MII interface */
282 DEVMETHOD(miibus_readreg, xl_miibus_readreg),
283 DEVMETHOD(miibus_writereg, xl_miibus_writereg),
284 DEVMETHOD(miibus_statchg, xl_miibus_statchg),
285 DEVMETHOD(miibus_mediainit, xl_miibus_mediainit),
286
287 { 0, 0 }
288};
289
290static driver_t xl_driver = {
291 "xl",
292 xl_methods,
293 sizeof(struct xl_softc)
294};
295
296static devclass_t xl_devclass;
297
32832096
MD
298DECLARE_DUMMY_MODULE(if_xl);
299MODULE_DEPEND(if_xl, miibus, 1, 1, 1);
984263bc 300DRIVER_MODULE(if_xl, pci, xl_driver, xl_devclass, 0, 0);
29bcac8b 301DRIVER_MODULE(if_xl, cardbus, xl_driver, xl_devclass, 0, 0);
984263bc
MD
302DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0);
303
a3a1d2d2 304static void
a0e7467e
SZ
305xl_enable_intrs(struct xl_softc *sc, uint16_t intrs)
306{
307 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK | 0xFF);
308 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB | intrs);
309 if (sc->xl_flags & XL_FLAG_FUNCREG)
310 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
311}
312
984263bc
MD
313/*
314 * Murphy's law says that it's possible the chip can wedge and
315 * the 'command in progress' bit may never clear. Hence, we wait
316 * only a finite amount of time to avoid getting caught in an
317 * infinite loop. Normally this delay routine would be a macro,
318 * but it isn't called during normal operation so we can afford
319 * to make it a function.
320 */
321static void
96d3e330 322xl_wait(struct xl_softc *sc)
984263bc 323{
3d0f5f54 324 int i;
984263bc
MD
325
326 for (i = 0; i < XL_TIMEOUT; i++) {
327 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
328 break;
329 }
330
331 if (i == XL_TIMEOUT)
ed2832e7 332 if_printf(&sc->arpcom.ac_if, "command never completed!");
984263bc
MD
333
334 return;
335}
336
337/*
338 * MII access routines are provided for adapters with external
339 * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in
340 * autoneg logic that's faked up to look like a PHY (3c905B-TX).
341 * Note: if you don't perform the MDIO operations just right,
342 * it's possible to end up with code that works correctly with
343 * some chips/CPUs/processor speeds/bus speeds/etc but not
344 * with others.
345 */
346#define MII_SET(x) \
347 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
a3a1d2d2 348 CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x))
984263bc
MD
349
350#define MII_CLR(x) \
351 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \
a3a1d2d2 352 CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x))
984263bc
MD
353
354/*
355 * Sync the PHYs by setting data bit and strobing the clock 32 times.
356 */
357static void
96d3e330 358xl_mii_sync(struct xl_softc *sc)
984263bc 359{
3d0f5f54 360 int i;
984263bc
MD
361
362 XL_SEL_WIN(4);
363 MII_SET(XL_MII_DIR|XL_MII_DATA);
364
365 for (i = 0; i < 32; i++) {
366 MII_SET(XL_MII_CLK);
a3a1d2d2
MD
367 MII_SET(XL_MII_DATA);
368 MII_SET(XL_MII_DATA);
984263bc 369 MII_CLR(XL_MII_CLK);
a3a1d2d2
MD
370 MII_SET(XL_MII_DATA);
371 MII_SET(XL_MII_DATA);
984263bc
MD
372 }
373
374 return;
375}
376
377/*
378 * Clock a series of bits through the MII.
379 */
380static void
96d3e330 381xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt)
984263bc
MD
382{
383 int i;
384
385 XL_SEL_WIN(4);
386 MII_CLR(XL_MII_CLK);
387
388 for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
389 if (bits & i) {
390 MII_SET(XL_MII_DATA);
391 } else {
392 MII_CLR(XL_MII_DATA);
393 }
984263bc 394 MII_CLR(XL_MII_CLK);
984263bc
MD
395 MII_SET(XL_MII_CLK);
396 }
397}
398
399/*
400 * Read an PHY register through the MII.
401 */
402static int
96d3e330 403xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame)
984263bc 404{
4ec4a72b 405 int i, ack;
984263bc 406
984263bc
MD
407 /*
408 * Set up frame for RX.
409 */
410 frame->mii_stdelim = XL_MII_STARTDELIM;
411 frame->mii_opcode = XL_MII_READOP;
412 frame->mii_turnaround = 0;
413 frame->mii_data = 0;
414
415 /*
416 * Select register window 4.
417 */
418
419 XL_SEL_WIN(4);
420
421 CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0);
422 /*
423 * Turn on data xmit.
424 */
425 MII_SET(XL_MII_DIR);
426
427 xl_mii_sync(sc);
428
429 /*
430 * Send command/address info.
431 */
432 xl_mii_send(sc, frame->mii_stdelim, 2);
433 xl_mii_send(sc, frame->mii_opcode, 2);
434 xl_mii_send(sc, frame->mii_phyaddr, 5);
435 xl_mii_send(sc, frame->mii_regaddr, 5);
436
437 /* Idle bit */
438 MII_CLR((XL_MII_CLK|XL_MII_DATA));
984263bc 439 MII_SET(XL_MII_CLK);
984263bc
MD
440
441 /* Turn off xmit. */
442 MII_CLR(XL_MII_DIR);
443
444 /* Check for ack */
445 MII_CLR(XL_MII_CLK);
984263bc
MD
446 ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA;
447 MII_SET(XL_MII_CLK);
984263bc
MD
448
449 /*
450 * Now try reading data bits. If the ack failed, we still
451 * need to clock through 16 cycles to keep the PHY(s) in sync.
452 */
453 if (ack) {
454 for(i = 0; i < 16; i++) {
455 MII_CLR(XL_MII_CLK);
984263bc 456 MII_SET(XL_MII_CLK);
984263bc
MD
457 }
458 goto fail;
459 }
460
461 for (i = 0x8000; i; i >>= 1) {
462 MII_CLR(XL_MII_CLK);
984263bc
MD
463 if (!ack) {
464 if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA)
465 frame->mii_data |= i;
984263bc
MD
466 }
467 MII_SET(XL_MII_CLK);
984263bc
MD
468 }
469
470fail:
471
472 MII_CLR(XL_MII_CLK);
984263bc 473 MII_SET(XL_MII_CLK);
984263bc 474
984263bc
MD
475 if (ack)
476 return(1);
477 return(0);
478}
479
480/*
481 * Write to a PHY register through the MII.
482 */
483static int
96d3e330 484xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame)
984263bc 485{
984263bc
MD
486 /*
487 * Set up frame for TX.
488 */
489
490 frame->mii_stdelim = XL_MII_STARTDELIM;
491 frame->mii_opcode = XL_MII_WRITEOP;
492 frame->mii_turnaround = XL_MII_TURNAROUND;
493
494 /*
495 * Select the window 4.
496 */
497 XL_SEL_WIN(4);
498
499 /*
500 * Turn on data output.
501 */
502 MII_SET(XL_MII_DIR);
503
504 xl_mii_sync(sc);
505
506 xl_mii_send(sc, frame->mii_stdelim, 2);
507 xl_mii_send(sc, frame->mii_opcode, 2);
508 xl_mii_send(sc, frame->mii_phyaddr, 5);
509 xl_mii_send(sc, frame->mii_regaddr, 5);
510 xl_mii_send(sc, frame->mii_turnaround, 2);
511 xl_mii_send(sc, frame->mii_data, 16);
512
513 /* Idle bit. */
514 MII_SET(XL_MII_CLK);
984263bc 515 MII_CLR(XL_MII_CLK);
984263bc
MD
516
517 /*
518 * Turn off xmit.
519 */
520 MII_CLR(XL_MII_DIR);
521
984263bc
MD
522 return(0);
523}
524
525static int
96d3e330 526xl_miibus_readreg(device_t dev, int phy, int reg)
984263bc
MD
527{
528 struct xl_softc *sc;
529 struct xl_mii_frame frame;
530
531 sc = device_get_softc(dev);
532
533 /*
534 * Pretend that PHYs are only available at MII address 24.
535 * This is to guard against problems with certain 3Com ASIC
536 * revisions that incorrectly map the internal transceiver
537 * control registers at all MII addresses. This can cause
538 * the miibus code to attach the same PHY several times over.
539 */
540 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
541 return(0);
542
543 bzero((char *)&frame, sizeof(frame));
544
545 frame.mii_phyaddr = phy;
546 frame.mii_regaddr = reg;
547 xl_mii_readreg(sc, &frame);
548
549 return(frame.mii_data);
550}
551
552static int
96d3e330 553xl_miibus_writereg(device_t dev, int phy, int reg, int data)
984263bc
MD
554{
555 struct xl_softc *sc;
556 struct xl_mii_frame frame;
557
558 sc = device_get_softc(dev);
559
560 if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24)
561 return(0);
562
563 bzero((char *)&frame, sizeof(frame));
564
565 frame.mii_phyaddr = phy;
566 frame.mii_regaddr = reg;
567 frame.mii_data = data;
568
569 xl_mii_writereg(sc, &frame);
570
571 return(0);
572}
573
574static void
96d3e330 575xl_miibus_statchg(device_t dev)
984263bc
MD
576{
577 struct xl_softc *sc;
578 struct mii_data *mii;
579
984263bc
MD
580 sc = device_get_softc(dev);
581 mii = device_get_softc(sc->xl_miibus);
582
56170638
SZ
583 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer);
584
984263bc
MD
585 xl_setcfg(sc);
586
587 /* Set ASIC's duplex mode to match the PHY. */
588 XL_SEL_WIN(3);
589 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
590 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
591 else
592 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
593 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
984263bc
MD
594}
595
596/*
597 * Special support for the 3c905B-COMBO. This card has 10/100 support
598 * plus BNC and AUI ports. This means we will have both an miibus attached
599 * plus some non-MII media settings. In order to allow this, we have to
600 * add the extra media to the miibus's ifmedia struct, but we can't do
601 * that during xl_attach() because the miibus hasn't been attached yet.
602 * So instead, we wait until the miibus probe/attach is done, at which
603 * point we will get a callback telling is that it's safe to add our
604 * extra media.
605 */
606static void
96d3e330 607xl_miibus_mediainit(device_t dev)
984263bc
MD
608{
609 struct xl_softc *sc;
610 struct mii_data *mii;
611 struct ifmedia *ifm;
612
613 sc = device_get_softc(dev);
614 mii = device_get_softc(sc->xl_miibus);
615 ifm = &mii->mii_media;
616
617 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
618 /*
619 * Check for a 10baseFL board in disguise.
620 */
621 if (sc->xl_type == XL_TYPE_905B &&
622 sc->xl_media == XL_MEDIAOPT_10FL) {
623 if (bootverbose)
ed2832e7 624 device_printf(dev, "found 10baseFL\n");
984263bc
MD
625 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL);
626 ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL);
627 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
628 ifmedia_add(ifm,
629 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
630 } else {
631 if (bootverbose)
ed2832e7 632 device_printf(dev, "found AUI\n");
984263bc
MD
633 ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL);
634 }
635 }
636
637 if (sc->xl_media & XL_MEDIAOPT_BNC) {
638 if (bootverbose)
ed2832e7 639 device_printf(dev, "found BNC\n");
984263bc
MD
640 ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL);
641 }
642
643 return;
644}
645
646/*
647 * The EEPROM is slow: give it time to come ready after issuing
648 * it a command.
649 */
650static int
96d3e330 651xl_eeprom_wait(struct xl_softc *sc)
984263bc
MD
652{
653 int i;
654
655 for (i = 0; i < 100; i++) {
656 if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY)
657 DELAY(162);
658 else
659 break;
660 }
661
662 if (i == 100) {
ed2832e7 663 if_printf(&sc->arpcom.ac_if, "eeprom failed to come ready\n");
984263bc
MD
664 return(1);
665 }
666
667 return(0);
668}
669
670/*
671 * Read a sequence of words from the EEPROM. Note that ethernet address
672 * data is stored in the EEPROM in network byte order.
673 */
674static int
96d3e330 675xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap)
984263bc
MD
676{
677 int err = 0, i;
678 u_int16_t word = 0, *ptr;
679#define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F))
680#define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F)
681 /* WARNING! DANGER!
682 * It's easy to accidentally overwrite the rom content!
683 * Note: the 3c575 uses 8bit EEPROM offsets.
684 */
685 XL_SEL_WIN(0);
686
687 if (xl_eeprom_wait(sc))
688 return(1);
689
690 if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30)
691 off += 0x30;
692
693 for (i = 0; i < cnt; i++) {
694 if (sc->xl_flags & XL_FLAG_8BITROM)
695 CSR_WRITE_2(sc, XL_W0_EE_CMD,
696 XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i));
697 else
698 CSR_WRITE_2(sc, XL_W0_EE_CMD,
699 XL_EE_READ | EEPROM_5BIT_OFFSET(off + i));
700 err = xl_eeprom_wait(sc);
701 if (err)
702 break;
703 word = CSR_READ_2(sc, XL_W0_EE_DATA);
704 ptr = (u_int16_t *)(dest + (i * 2));
705 if (swap)
706 *ptr = ntohs(word);
707 else
708 *ptr = word;
709 }
710
711 return(err ? 1 : 0);
712}
713
714/*
984263bc
MD
715 * NICs older than the 3c905B have only one multicast option, which
716 * is to enable reception of all multicast frames.
717 */
718static void
96d3e330 719xl_setmulti(struct xl_softc *sc)
984263bc
MD
720{
721 struct ifnet *ifp;
722 struct ifmultiaddr *ifma;
723 u_int8_t rxfilt;
724 int mcnt = 0;
725
726 ifp = &sc->arpcom.ac_if;
727
728 XL_SEL_WIN(5);
729 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
730
731 if (ifp->if_flags & IFF_ALLMULTI) {
732 rxfilt |= XL_RXFILTER_ALLMULTI;
733 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
734 return;
735 }
736
03df8a20 737 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
984263bc
MD
738 mcnt++;
739
740 if (mcnt)
741 rxfilt |= XL_RXFILTER_ALLMULTI;
742 else
743 rxfilt &= ~XL_RXFILTER_ALLMULTI;
744
745 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
746
747 return;
748}
749
750/*
751 * 3c905B adapters have a hash filter that we can program.
752 */
753static void
96d3e330 754xl_setmulti_hash(struct xl_softc *sc)
984263bc
MD
755{
756 struct ifnet *ifp;
757 int h = 0, i;
758 struct ifmultiaddr *ifma;
759 u_int8_t rxfilt;
760 int mcnt = 0;
761
762 ifp = &sc->arpcom.ac_if;
763
764 XL_SEL_WIN(5);
765 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
766
767 if (ifp->if_flags & IFF_ALLMULTI) {
768 rxfilt |= XL_RXFILTER_ALLMULTI;
769 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
770 return;
771 } else
772 rxfilt &= ~XL_RXFILTER_ALLMULTI;
773
774
775 /* first, zot all the existing hash bits */
776 for (i = 0; i < XL_HASHFILT_SIZE; i++)
777 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i);
778
779 /* now program new ones */
03df8a20 780 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
984263bc
MD
781 if (ifma->ifma_addr->sa_family != AF_LINK)
782 continue;
20bc1bbb
JS
783
784 /*
785 * Note: the 3c905B currently only supports a 64-bit
786 * hash table, which means we really only need 6 bits,
787 * but the manual indicates that future chip revisions
788 * will have a 256-bit hash table, hence the routine is
789 * set up to calculate 8 bits of position info in case
790 * we need it some day.
791 * Note II, The Sequel: _CURRENT_ versions of the 3c905B
792 * have a 256 bit hash table. This means we have to use
793 * all 8 bits regardless. On older cards, the upper 2
794 * bits will be ignored. Grrrr....
795 */
796 h = ether_crc32_be(
797 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
798 ETHER_ADDR_LEN) & 0xff;
984263bc
MD
799 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h);
800 mcnt++;
801 }
802
803 if (mcnt)
804 rxfilt |= XL_RXFILTER_MULTIHASH;
805 else
806 rxfilt &= ~XL_RXFILTER_MULTIHASH;
807
808 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
809
810 return;
811}
812
813#ifdef notdef
814static void
96d3e330 815xl_testpacket(struct xl_softc *sc)
984263bc
MD
816{
817 struct mbuf *m;
818 struct ifnet *ifp;
819
820 ifp = &sc->arpcom.ac_if;
821
74f1caca 822 MGETHDR(m, MB_DONTWAIT, MT_DATA);
984263bc
MD
823
824 if (m == NULL)
825 return;
826
827 bcopy(&sc->arpcom.ac_enaddr,
828 mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN);
829 bcopy(&sc->arpcom.ac_enaddr,
830 mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN);
831 mtod(m, struct ether_header *)->ether_type = htons(3);
832 mtod(m, unsigned char *)[14] = 0;
833 mtod(m, unsigned char *)[15] = 0;
834 mtod(m, unsigned char *)[16] = 0xE3;
835 m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3;
836 IF_ENQUEUE(&ifp->if_snd, m);
837 xl_start(ifp);
838
839 return;
840}
841#endif
842
843static void
96d3e330 844xl_setcfg(struct xl_softc *sc)
984263bc
MD
845{
846 u_int32_t icfg;
847
848 XL_SEL_WIN(3);
849 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
850 icfg &= ~XL_ICFG_CONNECTOR_MASK;
851 if (sc->xl_media & XL_MEDIAOPT_MII ||
852 sc->xl_media & XL_MEDIAOPT_BT4)
853 icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS);
854 if (sc->xl_media & XL_MEDIAOPT_BTX)
855 icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS);
856
857 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
858 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
859
860 return;
861}
862
863static void
96d3e330 864xl_setmode(struct xl_softc *sc, int media)
984263bc 865{
ed2832e7 866 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
867 u_int32_t icfg;
868 u_int16_t mediastat;
869
ed2832e7 870 if_printf(ifp, "selecting ");
984263bc
MD
871
872 XL_SEL_WIN(4);
873 mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
874 XL_SEL_WIN(3);
875 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG);
876
877 if (sc->xl_media & XL_MEDIAOPT_BT) {
878 if (IFM_SUBTYPE(media) == IFM_10_T) {
e3869ec7 879 kprintf("10baseT transceiver, ");
984263bc
MD
880 sc->xl_xcvr = XL_XCVR_10BT;
881 icfg &= ~XL_ICFG_CONNECTOR_MASK;
882 icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS);
883 mediastat |= XL_MEDIASTAT_LINKBEAT|
884 XL_MEDIASTAT_JABGUARD;
885 mediastat &= ~XL_MEDIASTAT_SQEENB;
886 }
887 }
888
889 if (sc->xl_media & XL_MEDIAOPT_BFX) {
890 if (IFM_SUBTYPE(media) == IFM_100_FX) {
e3869ec7 891 kprintf("100baseFX port, ");
984263bc
MD
892 sc->xl_xcvr = XL_XCVR_100BFX;
893 icfg &= ~XL_ICFG_CONNECTOR_MASK;
894 icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS);
895 mediastat |= XL_MEDIASTAT_LINKBEAT;
896 mediastat &= ~XL_MEDIASTAT_SQEENB;
897 }
898 }
899
900 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
901 if (IFM_SUBTYPE(media) == IFM_10_5) {
e3869ec7 902 kprintf("AUI port, ");
984263bc
MD
903 sc->xl_xcvr = XL_XCVR_AUI;
904 icfg &= ~XL_ICFG_CONNECTOR_MASK;
905 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
906 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
907 XL_MEDIASTAT_JABGUARD);
908 mediastat |= ~XL_MEDIASTAT_SQEENB;
909 }
910 if (IFM_SUBTYPE(media) == IFM_10_FL) {
e3869ec7 911 kprintf("10baseFL transceiver, ");
984263bc
MD
912 sc->xl_xcvr = XL_XCVR_AUI;
913 icfg &= ~XL_ICFG_CONNECTOR_MASK;
914 icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS);
915 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
916 XL_MEDIASTAT_JABGUARD);
917 mediastat |= ~XL_MEDIASTAT_SQEENB;
918 }
919 }
920
921 if (sc->xl_media & XL_MEDIAOPT_BNC) {
922 if (IFM_SUBTYPE(media) == IFM_10_2) {
e3869ec7 923 kprintf("BNC port, ");
984263bc
MD
924 sc->xl_xcvr = XL_XCVR_COAX;
925 icfg &= ~XL_ICFG_CONNECTOR_MASK;
926 icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS);
927 mediastat &= ~(XL_MEDIASTAT_LINKBEAT|
928 XL_MEDIASTAT_JABGUARD|
929 XL_MEDIASTAT_SQEENB);
930 }
931 }
932
933 if ((media & IFM_GMASK) == IFM_FDX ||
934 IFM_SUBTYPE(media) == IFM_100_FX) {
e3869ec7 935 kprintf("full duplex\n");
984263bc
MD
936 XL_SEL_WIN(3);
937 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX);
938 } else {
e3869ec7 939 kprintf("half duplex\n");
984263bc
MD
940 XL_SEL_WIN(3);
941 CSR_WRITE_1(sc, XL_W3_MAC_CTRL,
942 (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX));
943 }
944
945 if (IFM_SUBTYPE(media) == IFM_10_2)
946 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
947 else
948 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
949 CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg);
950 XL_SEL_WIN(4);
951 CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat);
952 DELAY(800);
953 XL_SEL_WIN(7);
984263bc
MD
954}
955
956static void
96d3e330 957xl_reset(struct xl_softc *sc)
984263bc 958{
3d0f5f54 959 int i;
984263bc
MD
960
961 XL_SEL_WIN(0);
962 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET |
963 ((sc->xl_flags & XL_FLAG_WEIRDRESET) ?
964 XL_RESETOPT_DISADVFD:0));
965
a3a1d2d2
MD
966 /*
967 * If we're using memory mapped register mode, pause briefly
968 * after issuing the reset command before trying to access any
969 * other registers. With my 3c575C cardbus card, failing to do
970 * this results in the system locking up while trying to poll
971 * the command busy bit in the status register.
972 */
973 if (sc->xl_flags & XL_FLAG_USE_MMIO)
974 DELAY(100000);
975
984263bc
MD
976 for (i = 0; i < XL_TIMEOUT; i++) {
977 DELAY(10);
978 if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY))
979 break;
980 }
981
982 if (i == XL_TIMEOUT)
ed2832e7 983 if_printf(&sc->arpcom.ac_if, "reset didn't complete\n");
984263bc
MD
984
985 /* Reset TX and RX. */
986 /* Note: the RX reset takes an absurd amount of time
987 * on newer versions of the Tornado chips such as those
a3a1d2d2 988 * on the 3c905CX and newer 3c908C cards. We wait an
984263bc
MD
989 * extra amount of time so that xl_wait() doesn't complain
990 * and annoy the users.
991 */
992 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
993 DELAY(100000);
994 xl_wait(sc);
995 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
996 xl_wait(sc);
997
998 if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR ||
999 sc->xl_flags & XL_FLAG_INVERT_MII_PWR) {
1000 XL_SEL_WIN(2);
1001 CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc,
1002 XL_W2_RESET_OPTIONS)
1003 | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0)
1004 | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0)
1005 );
1006 }
1007
1008 /* Wait a little while for the chip to get its brains in order. */
1009 DELAY(100000);
1010 return;
1011}
1012
1013/*
1014 * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device
1015 * IDs against our list and return a device name if we find a match.
1016 */
1017static int
10a3d3c7 1018xl_probe(device_t dev)
984263bc 1019{
10a3d3c7
JS
1020 struct xl_type *t;
1021 uint16_t vid, did;
984263bc 1022
10a3d3c7
JS
1023 vid = pci_get_vendor(dev);
1024 did = pci_get_device(dev);
1025 for (t = xl_devs; t->xl_name != NULL; t++) {
1026 if (vid == t->xl_vid && did == t->xl_did) {
984263bc
MD
1027 device_set_desc(dev, t->xl_name);
1028 return(0);
1029 }
984263bc 1030 }
984263bc
MD
1031 return(ENXIO);
1032}
1033
1034/*
1035 * This routine is a kludge to work around possible hardware faults
1036 * or manufacturing defects that can cause the media options register
1037 * (or reset options register, as it's called for the first generation
1038 * 3c90x adapters) to return an incorrect result. I have encountered
1039 * one Dell Latitude laptop docking station with an integrated 3c905-TX
1040 * which doesn't have any of the 'mediaopt' bits set. This screws up
1041 * the attach routine pretty badly because it doesn't know what media
1042 * to look for. If we find ourselves in this predicament, this routine
1043 * will try to guess the media options values and warn the user of a
1044 * possible manufacturing defect with his adapter/system/whatever.
1045 */
1046static void
96d3e330 1047xl_mediacheck(struct xl_softc *sc)
984263bc 1048{
ed2832e7 1049 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
1050
1051 /*
1052 * If some of the media options bits are set, assume they are
1053 * correct. If not, try to figure it out down below.
1054 * XXX I should check for 10baseFL, but I don't have an adapter
1055 * to test with.
1056 */
1057 if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) {
1058 /*
1059 * Check the XCVR value. If it's not in the normal range
1060 * of values, we need to fake it up here.
1061 */
1062 if (sc->xl_xcvr <= XL_XCVR_AUTO)
1063 return;
1064 else {
ed2832e7
JS
1065 if_printf(ifp, "bogus xcvr value in EEPROM (%x)\n",
1066 sc->xl_xcvr);
1067 if_printf(ifp,
1068 "choosing new default based on card type\n");
984263bc
MD
1069 }
1070 } else {
1071 if (sc->xl_type == XL_TYPE_905B &&
1072 sc->xl_media & XL_MEDIAOPT_10FL)
1073 return;
ed2832e7
JS
1074 if_printf(ifp, "WARNING: no media options bits set in "
1075 "the media options register!!\n");
1076 if_printf(ifp, "this could be a manufacturing defect in "
1077 "your adapter or system\n");
1078 if_printf(ifp, "attempting to guess media type; you "
1079 "should probably consult your vendor\n");
984263bc
MD
1080 }
1081
1082 xl_choose_xcvr(sc, 1);
984263bc
MD
1083}
1084
1085static void
96d3e330 1086xl_choose_xcvr(struct xl_softc *sc, int verbose)
984263bc 1087{
ed2832e7 1088 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
1089 u_int16_t devid;
1090
1091 /*
1092 * Read the device ID from the EEPROM.
1093 * This is what's loaded into the PCI device ID register, so it has
1094 * to be correct otherwise we wouldn't have gotten this far.
1095 */
1096 xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0);
1097
1098 switch(devid) {
1099 case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */
1100 case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */
1101 sc->xl_media = XL_MEDIAOPT_BT;
1102 sc->xl_xcvr = XL_XCVR_10BT;
1103 if (verbose)
ed2832e7 1104 if_printf(ifp, "guessing 10BaseT transceiver\n");
984263bc
MD
1105 break;
1106 case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */
1107 case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */
1108 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1109 sc->xl_xcvr = XL_XCVR_10BT;
1110 if (verbose)
ed2832e7 1111 if_printf(ifp, "guessing COMBO (AUI/BNC/TP)\n");
984263bc
MD
1112 break;
1113 case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */
1114 sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC;
1115 sc->xl_xcvr = XL_XCVR_10BT;
1116 if (verbose)
ed2832e7 1117 if_printf(ifp, "guessing TPC (BNC/TP)\n");
984263bc
MD
1118 break;
1119 case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */
1120 sc->xl_media = XL_MEDIAOPT_10FL;
1121 sc->xl_xcvr = XL_XCVR_AUI;
1122 if (verbose)
ed2832e7 1123 if_printf(ifp, "guessing 10baseFL\n");
984263bc
MD
1124 break;
1125 case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */
1126 case TC_DEVICEID_HURRICANE_555: /* 3c555 */
1127 case TC_DEVICEID_HURRICANE_556: /* 3c556 */
1128 case TC_DEVICEID_HURRICANE_556B: /* 3c556B */
1129 case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */
1130 case TC_DEVICEID_HURRICANE_575B: /* 3c575B */
1131 case TC_DEVICEID_HURRICANE_575C: /* 3c575C */
1132 case TC_DEVICEID_HURRICANE_656: /* 3c656 */
1133 case TC_DEVICEID_HURRICANE_656B: /* 3c656B */
1134 case TC_DEVICEID_TORNADO_656C: /* 3c656C */
1135 case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */
1136 sc->xl_media = XL_MEDIAOPT_MII;
1137 sc->xl_xcvr = XL_XCVR_MII;
1138 if (verbose)
ed2832e7 1139 if_printf(ifp, "guessing MII\n");
984263bc
MD
1140 break;
1141 case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */
1142 case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */
1143 sc->xl_media = XL_MEDIAOPT_BT4;
1144 sc->xl_xcvr = XL_XCVR_MII;
1145 if (verbose)
ed2832e7 1146 if_printf(ifp, "guessing 100BaseT4/MII\n");
984263bc
MD
1147 break;
1148 case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */
1149 case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */
1150 case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */
1151 case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */
1152 case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */
1153 case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */
1154 sc->xl_media = XL_MEDIAOPT_BTX;
1155 sc->xl_xcvr = XL_XCVR_AUTO;
1156 if (verbose)
ed2832e7 1157 if_printf(ifp, "guessing 10/100 internal\n");
984263bc
MD
1158 break;
1159 case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */
1160 sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI;
1161 sc->xl_xcvr = XL_XCVR_AUTO;
1162 if (verbose)
ed2832e7 1163 if_printf(ifp, "guessing 10/100 plus BNC/AUI\n");
984263bc
MD
1164 break;
1165 default:
ed2832e7
JS
1166 if_printf(ifp,
1167 "unknown device ID: %x -- defaulting to 10baseT\n", devid);
984263bc
MD
1168 sc->xl_media = XL_MEDIAOPT_BT;
1169 break;
1170 }
1171
1172 return;
1173}
1174
1175/*
1176 * Attach the interface. Allocate softc structures, do ifmedia
1177 * setup and ethernet/BPF attach.
1178 */
1179static int
96d3e330 1180xl_attach(device_t dev)
984263bc 1181{
984263bc 1182 u_char eaddr[ETHER_ADDR_LEN];
a3a1d2d2 1183 u_int16_t xcvr[2];
984263bc
MD
1184 struct xl_softc *sc;
1185 struct ifnet *ifp;
1186 int media = IFM_ETHER|IFM_100_TX|IFM_FDX;
912167be 1187 int error = 0, rid, res;
47ee0633 1188 uint16_t did;
984263bc 1189
984263bc 1190 sc = device_get_softc(dev);
984263bc 1191
a3a1d2d2
MD
1192 ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts);
1193
47ee0633
SW
1194 did = pci_get_device(dev);
1195
984263bc 1196 sc->xl_flags = 0;
47ee0633 1197 if (did == TC_DEVICEID_HURRICANE_555)
984263bc 1198 sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK;
47ee0633
SW
1199 if (did == TC_DEVICEID_HURRICANE_556 ||
1200 did == TC_DEVICEID_HURRICANE_556B)
984263bc
MD
1201 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK |
1202 XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET |
1203 XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR;
47ee0633
SW
1204 if (did == TC_DEVICEID_HURRICANE_555 ||
1205 did == TC_DEVICEID_HURRICANE_556)
984263bc 1206 sc->xl_flags |= XL_FLAG_8BITROM;
47ee0633 1207 if (did == TC_DEVICEID_HURRICANE_556B)
984263bc 1208 sc->xl_flags |= XL_FLAG_NO_XCVR_PWR;
47ee0633
SW
1209 if (did == TC_DEVICEID_HURRICANE_575B ||
1210 did == TC_DEVICEID_HURRICANE_575C ||
1211 did == TC_DEVICEID_HURRICANE_656B ||
1212 did == TC_DEVICEID_TORNADO_656C)
29bcac8b 1213 sc->xl_flags |= XL_FLAG_FUNCREG;
47ee0633
SW
1214 if (did == TC_DEVICEID_HURRICANE_575A ||
1215 did == TC_DEVICEID_HURRICANE_575B ||
1216 did == TC_DEVICEID_HURRICANE_575C ||
1217 did == TC_DEVICEID_HURRICANE_656B ||
1218 did == TC_DEVICEID_TORNADO_656C)
29bcac8b
HP
1219 sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 |
1220 XL_FLAG_8BITROM;
47ee0633 1221 if (did == TC_DEVICEID_HURRICANE_656)
984263bc 1222 sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK;
47ee0633 1223 if (did == TC_DEVICEID_HURRICANE_575B)
984263bc 1224 sc->xl_flags |= XL_FLAG_INVERT_LED_PWR;
47ee0633 1225 if (did == TC_DEVICEID_HURRICANE_575C)
984263bc 1226 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
47ee0633 1227 if (did == TC_DEVICEID_TORNADO_656C)
984263bc 1228 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR;
47ee0633
SW
1229 if (did == TC_DEVICEID_HURRICANE_656 ||
1230 did == TC_DEVICEID_HURRICANE_656B)
984263bc
MD
1231 sc->xl_flags |= XL_FLAG_INVERT_MII_PWR |
1232 XL_FLAG_INVERT_LED_PWR;
47ee0633 1233 if (did == TC_DEVICEID_TORNADO_10_100BT_920B)
984263bc 1234 sc->xl_flags |= XL_FLAG_PHYOK;
a3a1d2d2 1235#ifndef BURN_BRIDGES
984263bc
MD
1236 /*
1237 * If this is a 3c905B, we have to check one extra thing.
1238 * The 905B supports power management and may be placed in
1239 * a low-power mode (D3 mode), typically by certain operating
1240 * systems which shall not be named. The PCI BIOS is supposed
1241 * to reset the NIC and bring it out of low-power mode, but
1242 * some do not. Consequently, we have to see if this chip
1243 * supports power management, and if so, make sure it's not
1244 * in low-power mode. If power management is available, the
1245 * capid byte will be 0x01.
1246 *
1247 * I _think_ that what actually happens is that the chip
1248 * loses its PCI configuration during the transition from
1249 * D3 back to D0; this means that it should be possible for
1250 * us to save the PCI iobase, membase and IRQ, put the chip
1251 * back in the D0 state, then restore the PCI config ourselves.
1252 */
1253
1254 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1255 u_int32_t iobase, membase, irq;
1256
1257 /* Save important PCI config data. */
1258 iobase = pci_read_config(dev, XL_PCI_LOIO, 4);
1259 membase = pci_read_config(dev, XL_PCI_LOMEM, 4);
1260 irq = pci_read_config(dev, XL_PCI_INTLINE, 4);
1261
1262 /* Reset the power state. */
ed2832e7
JS
1263 device_printf(dev, "chip is in D%d power mode "
1264 "-- setting to D0\n", pci_get_powerstate(dev));
984263bc
MD
1265
1266 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1267
1268 /* Restore PCI config data. */
1269 pci_write_config(dev, XL_PCI_LOIO, iobase, 4);
1270 pci_write_config(dev, XL_PCI_LOMEM, membase, 4);
1271 pci_write_config(dev, XL_PCI_INTLINE, irq, 4);
1272 }
a3a1d2d2 1273#endif
984263bc
MD
1274 /*
1275 * Map control/status registers.
1276 */
1277 pci_enable_busmaster(dev);
984263bc 1278
a3a1d2d2
MD
1279 rid = XL_PCI_LOMEM;
1280 res = SYS_RES_MEMORY;
1281
1282#if 0
46d241bb 1283 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
a3a1d2d2 1284#endif
984263bc 1285
a3a1d2d2
MD
1286 if (sc->xl_res != NULL) {
1287 sc->xl_flags |= XL_FLAG_USE_MMIO;
1288 if (bootverbose)
ed2832e7 1289 device_printf(dev, "using memory mapped I/O\n");
a3a1d2d2
MD
1290 } else {
1291 rid = XL_PCI_LOIO;
1292 res = SYS_RES_IOPORT;
46d241bb 1293 sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE);
a3a1d2d2 1294 if (sc->xl_res == NULL) {
ed2832e7 1295 device_printf(dev, "couldn't map ports/memory\n");
a3a1d2d2
MD
1296 error = ENXIO;
1297 goto fail;
1298 }
1299 if (bootverbose)
ed2832e7 1300 device_printf(dev, "using port I/O\n");
984263bc
MD
1301 }
1302
1303 sc->xl_btag = rman_get_bustag(sc->xl_res);
1304 sc->xl_bhandle = rman_get_bushandle(sc->xl_res);
1305
1306 if (sc->xl_flags & XL_FLAG_FUNCREG) {
1307 rid = XL_PCI_FUNCMEM;
46d241bb
JS
1308 sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1309 RF_ACTIVE);
984263bc
MD
1310
1311 if (sc->xl_fres == NULL) {
29bcac8b 1312 device_printf(dev, "couldn't map funcreg memory\n");
984263bc
MD
1313 error = ENXIO;
1314 goto fail;
1315 }
1316
1317 sc->xl_ftag = rman_get_bustag(sc->xl_fres);
1318 sc->xl_fhandle = rman_get_bushandle(sc->xl_fres);
1319 }
1320
a3a1d2d2 1321 /* Allocate interrupt */
984263bc 1322 rid = 0;
46d241bb 1323 sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
984263bc 1324 RF_SHAREABLE | RF_ACTIVE);
984263bc 1325 if (sc->xl_irq == NULL) {
ed2832e7 1326 device_printf(dev, "couldn't map interrupt\n");
984263bc
MD
1327 error = ENXIO;
1328 goto fail;
1329 }
1330
fe166639
JS
1331 ifp = &sc->arpcom.ac_if;
1332 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1333
984263bc
MD
1334 /* Reset the adapter. */
1335 xl_reset(sc);
1336
1337 /*
1338 * Get station address from the EEPROM.
1339 */
1340 if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) {
ed2832e7 1341 device_printf(dev, "failed to read station address\n");
984263bc
MD
1342 error = ENXIO;
1343 goto fail;
1344 }
1345
74a0ee08 1346 callout_init(&sc->xl_stat_timer);
984263bc 1347
5001d436
SZ
1348 error = xl_dma_alloc(dev);
1349 if (error)
a3a1d2d2 1350 goto fail;
984263bc
MD
1351
1352 /*
1353 * Figure out the card type. 3c905B adapters have the
1354 * 'supportsNoTxLength' bit set in the capabilities
1355 * word in the EEPROM.
a3a1d2d2
MD
1356 * Note: my 3c575C cardbus card lies. It returns a value
1357 * of 0x1578 for its capabilities word, which is somewhat
1358 * nonsensical. Another way to distinguish a 3c90x chip
1359 * from a 3c90xB/C chip is to check for the 'supportsLargePackets'
1360 * bit. This will only be set for 3c90x boomerage chips.
984263bc
MD
1361 */
1362 xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0);
a3a1d2d2
MD
1363 if (sc->xl_caps & XL_CAPS_NO_TXLENGTH ||
1364 !(sc->xl_caps & XL_CAPS_LARGE_PKTS))
984263bc
MD
1365 sc->xl_type = XL_TYPE_905B;
1366 else
1367 sc->xl_type = XL_TYPE_90X;
0ef35b28
SZ
1368 if (bootverbose) {
1369 device_printf(dev, "type %s\n",
1370 sc->xl_type == XL_TYPE_905B ? "90XB" : "90X");
1371 }
984263bc 1372
984263bc 1373 ifp->if_softc = sc;
984263bc
MD
1374 ifp->if_mtu = ETHERMTU;
1375 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1376 ifp->if_ioctl = xl_ioctl;
984263bc
MD
1377 if (sc->xl_type == XL_TYPE_905B) {
1378 ifp->if_start = xl_start_90xB;
cb79ad8e 1379 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_VLAN_MTU;
a3a1d2d2 1380 } else {
984263bc 1381 ifp->if_start = xl_start;
a3a1d2d2 1382 }
984263bc
MD
1383 ifp->if_watchdog = xl_watchdog;
1384 ifp->if_init = xl_init;
a0e7467e
SZ
1385#ifdef DEVICE_POLLING
1386 ifp->if_poll = xl_poll;
1387#endif
984263bc 1388 ifp->if_baudrate = 10000000;
b4e1aa10
JS
1389 ifq_set_maxlen(&ifp->if_snd, XL_TX_LIST_CNT - 1);
1390 ifq_set_ready(&ifp->if_snd);
b876f7ac 1391 /*
cb79ad8e
SZ
1392 * NOTE: Hardware checksum features disabled by default.
1393 * This seems to corrupt tx packet data one out of a
1394 * million packets or so and then generates a good checksum
1395 * so the receiver doesn't know the packet is bad
b876f7ac 1396 */
cb79ad8e 1397 ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM;
b876f7ac
MD
1398 if (ifp->if_capenable & IFCAP_TXCSUM)
1399 ifp->if_hwassist = XL905B_CSUM_FEATURES;
984263bc
MD
1400
1401 /*
1402 * Now we have to see what sort of media we have.
1403 * This includes probing for an MII interace and a
1404 * possible PHY.
1405 */
1406 XL_SEL_WIN(3);
1407 sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT);
1408 if (bootverbose)
ed2832e7 1409 if_printf(ifp, "media options word: %x\n", sc->xl_media);
984263bc 1410
a3a1d2d2
MD
1411 xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0);
1412 sc->xl_xcvr = xcvr[0] | xcvr[1] << 16;
984263bc
MD
1413 sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK;
1414 sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS;
1415
1416 xl_mediacheck(sc);
1417
1418 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
1419 || sc->xl_media & XL_MEDIAOPT_BT4) {
1420 if (bootverbose)
ed2832e7 1421 if_printf(ifp, "found MII/AUTO\n");
984263bc 1422 xl_setcfg(sc);
c4b81dd9
SZ
1423
1424 error = mii_phy_probe(dev, &sc->xl_miibus,
1425 xl_ifmedia_upd, xl_ifmedia_sts);
1426 if (error) {
ed2832e7 1427 if_printf(ifp, "no PHY found!\n");
984263bc
MD
1428 goto fail;
1429 }
1430
1431 goto done;
1432 }
1433
1434 /*
1435 * Sanity check. If the user has selected "auto" and this isn't
1436 * a 10/100 card of some kind, we need to force the transceiver
1437 * type to something sane.
1438 */
1439 if (sc->xl_xcvr == XL_XCVR_AUTO)
1440 xl_choose_xcvr(sc, bootverbose);
1441
1442 /*
1443 * Do ifmedia setup.
1444 */
984263bc
MD
1445 if (sc->xl_media & XL_MEDIAOPT_BT) {
1446 if (bootverbose)
ed2832e7 1447 if_printf(ifp, "found 10baseT\n");
984263bc
MD
1448 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
1449 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL);
1450 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1451 ifmedia_add(&sc->ifmedia,
1452 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
1453 }
1454
1455 if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) {
1456 /*
1457 * Check for a 10baseFL board in disguise.
1458 */
1459 if (sc->xl_type == XL_TYPE_905B &&
1460 sc->xl_media == XL_MEDIAOPT_10FL) {
1461 if (bootverbose)
ed2832e7 1462 if_printf(ifp, "found 10baseFL\n");
984263bc
MD
1463 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL);
1464 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX,
1465 0, NULL);
1466 if (sc->xl_caps & XL_CAPS_FULL_DUPLEX)
1467 ifmedia_add(&sc->ifmedia,
1468 IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL);
1469 } else {
1470 if (bootverbose)
ed2832e7 1471 if_printf(ifp, "found AUI\n");
984263bc
MD
1472 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);
1473 }
1474 }
1475
1476 if (sc->xl_media & XL_MEDIAOPT_BNC) {
1477 if (bootverbose)
ed2832e7 1478 if_printf(ifp, "found BNC\n");
984263bc
MD
1479 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
1480 }
1481
1482 if (sc->xl_media & XL_MEDIAOPT_BFX) {
1483 if (bootverbose)
ed2832e7 1484 if_printf(ifp, "found 100baseFX\n");
984263bc
MD
1485 ifp->if_baudrate = 100000000;
1486 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL);
1487 }
1488
1489 /* Choose a default media. */
1490 switch(sc->xl_xcvr) {
1491 case XL_XCVR_10BT:
1492 media = IFM_ETHER|IFM_10_T;
1493 xl_setmode(sc, media);
1494 break;
1495 case XL_XCVR_AUI:
1496 if (sc->xl_type == XL_TYPE_905B &&
1497 sc->xl_media == XL_MEDIAOPT_10FL) {
1498 media = IFM_ETHER|IFM_10_FL;
1499 xl_setmode(sc, media);
1500 } else {
1501 media = IFM_ETHER|IFM_10_5;
1502 xl_setmode(sc, media);
1503 }
1504 break;
1505 case XL_XCVR_COAX:
1506 media = IFM_ETHER|IFM_10_2;
1507 xl_setmode(sc, media);
1508 break;
1509 case XL_XCVR_AUTO:
1510 case XL_XCVR_100BTX:
1511 case XL_XCVR_MII:
1512 /* Chosen by miibus */
1513 break;
1514 case XL_XCVR_100BFX:
1515 media = IFM_ETHER|IFM_100_FX;
1516 break;
1517 default:
ed2832e7 1518 if_printf(ifp, "unknown XCVR type: %d\n", sc->xl_xcvr);
984263bc
MD
1519 /*
1520 * This will probably be wrong, but it prevents
1521 * the ifmedia code from panicking.
1522 */
1523 media = IFM_ETHER|IFM_10_T;
1524 break;
1525 }
1526
1527 if (sc->xl_miibus == NULL)
1528 ifmedia_set(&sc->ifmedia, media);
1529
1530done:
1531
1532 if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) {
1533 XL_SEL_WIN(0);
1534 CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS);
1535 }
1536
1537 /*
1538 * Call MI attach routine.
1539 */
78195a76 1540 ether_ifattach(ifp, eaddr, NULL);
984263bc 1541
a3a1d2d2
MD
1542 /*
1543 * Tell the upper layer(s) we support long frames.
1544 */
1545 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1546
1547 /* Hook interrupt last to avoid having to lock softc */
95893fe4 1548 error = bus_setup_intr(dev, sc->xl_irq, INTR_MPSAFE,
78195a76
MD
1549 xl_intr, sc, &sc->xl_intrhand,
1550 ifp->if_serializer);
a3a1d2d2 1551 if (error) {
ed2832e7 1552 if_printf(ifp, "couldn't set up irq\n");
0a8b5977 1553 ether_ifdetach(ifp);
a3a1d2d2
MD
1554 goto fail;
1555 }
1556
9db4b353
SZ
1557 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->xl_irq));
1558 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
1559
c4b81dd9 1560 return 0;
a3a1d2d2 1561
c4b81dd9
SZ
1562fail:
1563 xl_detach(dev);
1564 return error;
984263bc
MD
1565}
1566
a3a1d2d2
MD
1567/*
1568 * Shutdown hardware and free up resources. This can be called any
1569 * time after the mutex has been initialized. It is called in both
1570 * the error case in attach and the normal detach case so it needs
1571 * to be careful about only freeing resources that have actually been
1572 * allocated.
1573 */
984263bc 1574static int
96d3e330 1575xl_detach(device_t dev)
984263bc
MD
1576{
1577 struct xl_softc *sc;
1578 struct ifnet *ifp;
a3a1d2d2 1579 int rid, res;
984263bc
MD
1580
1581 sc = device_get_softc(dev);
1582 ifp = &sc->arpcom.ac_if;
1583
a3a1d2d2 1584 if (sc->xl_flags & XL_FLAG_USE_MMIO) {
c4b81dd9 1585 rid = XL_PCI_LOMEM;
a3a1d2d2
MD
1586 res = SYS_RES_MEMORY;
1587 } else {
1588 rid = XL_PCI_LOIO;
1589 res = SYS_RES_IOPORT;
1590 }
1591
c4b81dd9 1592 if (device_is_attached(dev)) {
cdf89432 1593 lwkt_serialize_enter(ifp->if_serializer);
069f2034
MD
1594 xl_reset(sc);
1595 xl_stop(sc);
cdf89432
SZ
1596 bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand);
1597 lwkt_serialize_exit(ifp->if_serializer);
1598
069f2034
MD
1599 ether_ifdetach(ifp);
1600 }
c4b81dd9 1601
a3a1d2d2 1602 if (sc->xl_miibus)
984263bc 1603 device_delete_child(dev, sc->xl_miibus);
a3a1d2d2
MD
1604 bus_generic_detach(dev);
1605 ifmedia_removeall(&sc->ifmedia);
984263bc 1606
a3a1d2d2
MD
1607 if (sc->xl_irq)
1608 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq);
984263bc
MD
1609 if (sc->xl_fres != NULL)
1610 bus_release_resource(dev, SYS_RES_MEMORY,
1611 XL_PCI_FUNCMEM, sc->xl_fres);
a3a1d2d2
MD
1612 if (sc->xl_res)
1613 bus_release_resource(dev, res, rid, sc->xl_res);
984263bc 1614
5001d436
SZ
1615 xl_dma_free(dev);
1616
1617 return(0);
1618}
1619
1620static int
1621xl_dma_alloc(device_t dev)
1622{
1623 struct xl_softc *sc;
1624 struct xl_chain_data *cd;
1625 struct xl_list_data *ld;
7bee0b42 1626 bus_dmamem_t dmem;
5001d436
SZ
1627 int i, error;
1628
1629 sc = device_get_softc(dev);
1630 cd = &sc->xl_cdata;
1631 ld = &sc->xl_ldata;
1632
1633 /*
af9cfd42
SZ
1634 * Allocate the parent bus DMA tag appropriate for PCI.
1635 */
1636 error = bus_dma_tag_create(NULL, 1, 0,
1637 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
1638 NULL, NULL,
1639 BUS_SPACE_MAXSIZE_32BIT, 0,
1640 BUS_SPACE_MAXSIZE_32BIT,
1641 0, &sc->xl_parent_tag);
1642 if (error) {
1643 device_printf(dev, "could not allocate parent dma tag\n");
1644 return error;
1645 }
1646
1647 /*
5001d436
SZ
1648 * Now allocate a tag for the DMA descriptor lists and a chunk
1649 * of DMA-able memory based on the tag. Also obtain the DMA
1650 * addresses of the RX and TX ring, which we'll need later.
1651 * All of our lists are allocated as a contiguous block
1652 * of memory.
1653 */
7bee0b42
SZ
1654 error = bus_dmamem_coherent(sc->xl_parent_tag, XL_LIST_ALIGN, 0,
1655 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1656 XL_RX_LIST_SZ, BUS_DMA_WAITOK, &dmem);
5001d436 1657 if (error) {
7bee0b42 1658 device_printf(dev, "failed to allocate rx list\n");
5001d436
SZ
1659 return error;
1660 }
7bee0b42
SZ
1661 ld->xl_rx_tag = dmem.dmem_tag;
1662 ld->xl_rx_dmamap = dmem.dmem_map;
1663 ld->xl_rx_list = dmem.dmem_addr;
1664 ld->xl_rx_dmaaddr = dmem.dmem_busaddr;
1665
1666 error = bus_dmamem_coherent(sc->xl_parent_tag, XL_LIST_ALIGN, 0,
1667 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1668 XL_TX_LIST_SZ, BUS_DMA_WAITOK, &dmem);
5001d436 1669 if (error) {
7bee0b42 1670 device_printf(dev, "failed to allocate tx list\n");
5001d436
SZ
1671 return error;
1672 }
7bee0b42
SZ
1673 ld->xl_tx_tag = dmem.dmem_tag;
1674 ld->xl_tx_dmamap = dmem.dmem_map;
1675 ld->xl_tx_list = dmem.dmem_addr;
1676 ld->xl_tx_dmaaddr = dmem.dmem_busaddr;
5001d436
SZ
1677
1678 /*
1679 * Allocate a DMA tag for the mapping of mbufs.
1680 */
af9cfd42
SZ
1681 error = bus_dma_tag_create(sc->xl_parent_tag, 1, 0,
1682 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
5001d436 1683 NULL, NULL,
8628af14
SZ
1684 MCLBYTES, 1, MCLBYTES,
1685 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1686 &sc->xl_rx_mtag);
5001d436 1687 if (error) {
8628af14 1688 device_printf(dev, "failed to allocate RX mbuf dma tag\n");
5001d436
SZ
1689 return error;
1690 }
1691
1692 /*
1693 * Allocate a spare DMA map for the RX ring.
1694 */
8628af14
SZ
1695 error = bus_dmamap_create(sc->xl_rx_mtag, BUS_DMA_WAITOK,
1696 &sc->xl_tmpmap);
5001d436 1697 if (error) {
8628af14
SZ
1698 device_printf(dev, "failed to create RX mbuf tmp dma map\n");
1699 bus_dma_tag_destroy(sc->xl_rx_mtag);
1700 sc->xl_rx_mtag = NULL;
5001d436
SZ
1701 return error;
1702 }
1703
1704 for (i = 0; i < XL_RX_LIST_CNT; i++) {
8628af14 1705 error = bus_dmamap_create(sc->xl_rx_mtag, BUS_DMA_WAITOK,
5001d436
SZ
1706 &cd->xl_rx_chain[i].xl_map);
1707 if (error) {
1708 device_printf(dev, "failed to create %dth "
1709 "rx descriptor dma map!\n", i);
1710 return error;
1711 }
1712 cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i];
a3a1d2d2 1713 }
5001d436 1714
8628af14
SZ
1715 error = bus_dma_tag_create(sc->xl_parent_tag, 1, 0,
1716 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1717 NULL, NULL,
1718 MCLBYTES, XL_MAXFRAGS, MCLBYTES,
1719 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
1720 &sc->xl_tx_mtag);
1721 if (error) {
1722 device_printf(dev, "failed to allocate TX mbuf dma tag\n");
1723 return error;
1724 }
1725
5001d436 1726 for (i = 0; i < XL_TX_LIST_CNT; i++) {
8628af14 1727 error = bus_dmamap_create(sc->xl_tx_mtag, BUS_DMA_WAITOK,
5001d436
SZ
1728 &cd->xl_tx_chain[i].xl_map);
1729 if (error) {
1730 device_printf(dev, "failed to create %dth "
1731 "tx descriptor dma map!\n", i);
1732 return error;
1733 }
1734 cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i];
a3a1d2d2 1735 }
5001d436
SZ
1736 return 0;
1737}
1738
1739static void
1740xl_dma_free(device_t dev)
1741{
1742 struct xl_softc *sc;
1743 struct xl_chain_data *cd;
1744 struct xl_list_data *ld;
1745 int i;
1746
1747 sc = device_get_softc(dev);
1748 cd = &sc->xl_cdata;
1749 ld = &sc->xl_ldata;
1750
1751 for (i = 0; i < XL_RX_LIST_CNT; ++i) {
1752 if (cd->xl_rx_chain[i].xl_ptr != NULL) {
1753 if (cd->xl_rx_chain[i].xl_mbuf != NULL) {
8628af14 1754 bus_dmamap_unload(sc->xl_rx_mtag,
5001d436 1755 cd->xl_rx_chain[i].xl_map);
41e70e0f 1756 m_freem(cd->xl_rx_chain[i].xl_mbuf);
5001d436 1757 }
8628af14 1758 bus_dmamap_destroy(sc->xl_rx_mtag,
5001d436
SZ
1759 cd->xl_rx_chain[i].xl_map);
1760 }
a3a1d2d2 1761 }
984263bc 1762
5001d436
SZ
1763 for (i = 0; i < XL_TX_LIST_CNT; ++i) {
1764 if (cd->xl_tx_chain[i].xl_ptr != NULL) {
1765 if (cd->xl_tx_chain[i].xl_mbuf != NULL) {
8628af14 1766 bus_dmamap_unload(sc->xl_tx_mtag,
5001d436 1767 cd->xl_tx_chain[i].xl_map);
41e70e0f 1768 m_freem(cd->xl_tx_chain[i].xl_mbuf);
5001d436 1769 }
8628af14 1770 bus_dmamap_destroy(sc->xl_tx_mtag,
5001d436
SZ
1771 cd->xl_tx_chain[i].xl_map);
1772 }
1773 }
1774
1775 if (ld->xl_rx_tag) {
1776 bus_dmamap_unload(ld->xl_rx_tag, ld->xl_rx_dmamap);
1777 bus_dmamem_free(ld->xl_rx_tag, ld->xl_rx_list,
1778 ld->xl_rx_dmamap);
1779 bus_dma_tag_destroy(ld->xl_rx_tag);
1780 }
1781
1782 if (ld->xl_tx_tag) {
1783 bus_dmamap_unload(ld->xl_tx_tag, ld->xl_tx_dmamap);
1784 bus_dmamem_free(ld->xl_tx_tag, ld->xl_tx_list,
1785 ld->xl_tx_dmamap);
1786 bus_dma_tag_destroy(ld->xl_tx_tag);
1787 }
1788
8628af14
SZ
1789 if (sc->xl_rx_mtag) {
1790 bus_dmamap_destroy(sc->xl_rx_mtag, sc->xl_tmpmap);
1791 bus_dma_tag_destroy(sc->xl_rx_mtag);
5001d436 1792 }
8628af14
SZ
1793 if (sc->xl_tx_mtag)
1794 bus_dma_tag_destroy(sc->xl_tx_mtag);
af9cfd42
SZ
1795
1796 if (sc->xl_parent_tag)
1797 bus_dma_tag_destroy(sc->xl_parent_tag);
984263bc
MD
1798}
1799
1800/*
1801 * Initialize the transmit descriptors.
1802 */
5001d436
SZ
1803static void
1804xl_list_tx_init(struct xl_softc *sc)
984263bc
MD
1805{
1806 struct xl_chain_data *cd;
1807 struct xl_list_data *ld;
5001d436 1808 int i;
984263bc
MD
1809
1810 cd = &sc->xl_cdata;
a3a1d2d2 1811 ld = &sc->xl_ldata;
984263bc 1812 for (i = 0; i < XL_TX_LIST_CNT; i++) {
a3a1d2d2
MD
1813 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1814 i * sizeof(struct xl_list);
984263bc
MD
1815 if (i == (XL_TX_LIST_CNT - 1))
1816 cd->xl_tx_chain[i].xl_next = NULL;
1817 else
1818 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1819 }
1820
1821 cd->xl_tx_free = &cd->xl_tx_chain[0];
1822 cd->xl_tx_tail = cd->xl_tx_head = NULL;
984263bc
MD
1823}
1824
1825/*
1826 * Initialize the transmit descriptors.
1827 */
5001d436
SZ
1828static void
1829xl_list_tx_init_90xB(struct xl_softc *sc)
984263bc
MD
1830{
1831 struct xl_chain_data *cd;
1832 struct xl_list_data *ld;
5001d436 1833 int i;
984263bc
MD
1834
1835 cd = &sc->xl_cdata;
a3a1d2d2 1836 ld = &sc->xl_ldata;
984263bc 1837 for (i = 0; i < XL_TX_LIST_CNT; i++) {
a3a1d2d2
MD
1838 cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr +
1839 i * sizeof(struct xl_list);
984263bc
MD
1840 if (i == (XL_TX_LIST_CNT - 1))
1841 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0];
1842 else
1843 cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1];
1b6f9882 1844 if (i == 0) {
984263bc
MD
1845 cd->xl_tx_chain[i].xl_prev =
1846 &cd->xl_tx_chain[XL_TX_LIST_CNT - 1];
1b6f9882 1847 } else {
984263bc
MD
1848 cd->xl_tx_chain[i].xl_prev =
1849 &cd->xl_tx_chain[i - 1];
1b6f9882 1850 }
984263bc
MD
1851 }
1852
a3a1d2d2 1853 ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY);
984263bc
MD
1854
1855 cd->xl_tx_prod = 1;
1856 cd->xl_tx_cons = 1;
1857 cd->xl_tx_cnt = 0;
984263bc
MD
1858}
1859
1860/*
1861 * Initialize the RX descriptors and allocate mbufs for them. Note that
1862 * we arrange the descriptors in a closed ring, so that the last descriptor
1863 * points back to the first.
1864 */
1865static int
96d3e330 1866xl_list_rx_init(struct xl_softc *sc)
984263bc
MD
1867{
1868 struct xl_chain_data *cd;
1869 struct xl_list_data *ld;
a3a1d2d2
MD
1870 int error, i, next;
1871 u_int32_t nextptr;
984263bc
MD
1872
1873 cd = &sc->xl_cdata;
a3a1d2d2 1874 ld = &sc->xl_ldata;
984263bc
MD
1875
1876 for (i = 0; i < XL_RX_LIST_CNT; i++) {
e4345646 1877 error = xl_newbuf(sc, &cd->xl_rx_chain[i], 1);
a3a1d2d2
MD
1878 if (error)
1879 return(error);
1880 if (i == (XL_RX_LIST_CNT - 1))
1881 next = 0;
1882 else
1883 next = i + 1;
1884 nextptr = ld->xl_rx_dmaaddr +
1885 next * sizeof(struct xl_list_onefrag);
1886 cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next];
1887 ld->xl_rx_list[i].xl_next = htole32(nextptr);
984263bc
MD
1888 }
1889
1890 cd->xl_rx_head = &cd->xl_rx_chain[0];
1891
1892 return(0);
1893}
1894
1895/*
1896 * Initialize an RX descriptor and attach an MBUF cluster.
a3a1d2d2
MD
1897 * If we fail to do so, we need to leave the old mbuf and
1898 * the old DMA map untouched so that it can be reused.
984263bc
MD
1899 */
1900static int
e4345646 1901xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c, int init)
984263bc 1902{
5001d436 1903 struct mbuf *m_new;
a3a1d2d2 1904 bus_dmamap_t map;
e4345646
SZ
1905 int error, nsegs;
1906 bus_dma_segment_t seg;
984263bc 1907
e4345646 1908 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
984263bc
MD
1909 if (m_new == NULL)
1910 return(ENOBUFS);
1911
984263bc
MD
1912 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
1913
1914 /* Force longword alignment for packet payload. */
1915 m_adj(m_new, ETHER_ALIGN);
1916
e4345646
SZ
1917 error = bus_dmamap_load_mbuf_segment(sc->xl_rx_mtag, sc->xl_tmpmap,
1918 m_new, &seg, 1, &nsegs, BUS_DMA_NOWAIT);
a3a1d2d2
MD
1919 if (error) {
1920 m_freem(m_new);
e4345646
SZ
1921 if (init) {
1922 if_printf(&sc->arpcom.ac_if,
1923 "can't map mbuf (error %d)\n", error);
1924 }
a3a1d2d2
MD
1925 return(error);
1926 }
1927
e4345646
SZ
1928 if (c->xl_mbuf != NULL) {
1929 bus_dmamap_sync(sc->xl_rx_mtag, c->xl_map,
1930 BUS_DMASYNC_POSTREAD);
1931 bus_dmamap_unload(sc->xl_rx_mtag, c->xl_map);
1932 }
1933
a3a1d2d2
MD
1934 map = c->xl_map;
1935 c->xl_map = sc->xl_tmpmap;
1936 sc->xl_tmpmap = map;
984263bc 1937 c->xl_mbuf = m_new;
e4345646
SZ
1938
1939 c->xl_ptr->xl_frag.xl_len = htole32(seg.ds_len | XL_LAST_FRAG);
1940 c->xl_ptr->xl_frag.xl_addr = htole32(seg.ds_addr);
984263bc 1941 c->xl_ptr->xl_status = 0;
e4345646 1942
984263bc
MD
1943 return(0);
1944}
1945
1946static int
96d3e330 1947xl_rx_resync(struct xl_softc *sc)
984263bc
MD
1948{
1949 struct xl_chain_onefrag *pos;
1950 int i;
1951
1952 pos = sc->xl_cdata.xl_rx_head;
1953
1954 for (i = 0; i < XL_RX_LIST_CNT; i++) {
1955 if (pos->xl_ptr->xl_status)
1956 break;
1957 pos = pos->xl_next;
1958 }
1959
1960 if (i == XL_RX_LIST_CNT)
1961 return(0);
1962
1963 sc->xl_cdata.xl_rx_head = pos;
1964
1965 return(EAGAIN);
1966}
1967
1968/*
1969 * A frame has been uploaded: pass the resulting mbuf chain up to
1970 * the higher level protocols.
1971 */
1972static void
a0e7467e 1973xl_rxeof(struct xl_softc *sc, int count)
984263bc 1974{
984263bc
MD
1975 struct mbuf *m;
1976 struct ifnet *ifp;
1977 struct xl_chain_onefrag *cur_rx;
1978 int total_len = 0;
1979 u_int32_t rxstat;
3e210793 1980 struct mbuf_chain chain[MAXCPU];
984263bc
MD
1981
1982 ifp = &sc->arpcom.ac_if;
1983
3e210793 1984 ether_input_chain_init(chain);
984263bc 1985again:
a3a1d2d2 1986 while((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) {
a0e7467e
SZ
1987#ifdef DEVICE_POLLING
1988 if (count >= 0 && count-- == 0)
1989 break;
1990#endif
984263bc
MD
1991 cur_rx = sc->xl_cdata.xl_rx_head;
1992 sc->xl_cdata.xl_rx_head = cur_rx->xl_next;
a3a1d2d2
MD
1993 total_len = rxstat & XL_RXSTAT_LENMASK;
1994
1995 /*
1996 * Since we have told the chip to allow large frames,
1997 * we need to trap giant frame errors in software. We allow
1998 * a little more than the normal frame size to account for
1999 * frames with VLAN tags.
2000 */
2001 if (total_len > XL_MAX_FRAMELEN)
2002 rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE);
984263bc
MD
2003
2004 /*
2005 * If an error occurs, update stats, clear the
2006 * status word and leave the mbuf cluster in place:
2007 * it should simply get re-used next time this descriptor
2008 * comes up in the ring.
2009 */
2010 if (rxstat & XL_RXSTAT_UP_ERROR) {
2011 ifp->if_ierrors++;
2012 cur_rx->xl_ptr->xl_status = 0;
2013 continue;
2014 }
2015
2016 /*
a3a1d2d2 2017 * If the error bit was not set, the upload complete
984263bc
MD
2018 * bit should be set which means we have a valid packet.
2019 * If not, something truly strange has happened.
2020 */
2021 if (!(rxstat & XL_RXSTAT_UP_CMPLT)) {
ed2832e7
JS
2022 if_printf(ifp,
2023 "bad receive status -- packet dropped\n");
984263bc
MD
2024 ifp->if_ierrors++;
2025 cur_rx->xl_ptr->xl_status = 0;
2026 continue;
2027 }
2028
2029 /* No errors; receive the packet. */
2030 m = cur_rx->xl_mbuf;
984263bc
MD
2031
2032 /*
2033 * Try to conjure up a new mbuf cluster. If that
2034 * fails, it means we have an out of memory condition and
2035 * should leave the buffer in place and continue. This will
2036 * result in a lost packet, but there's little else we
2037 * can do in this situation.
2038 */
e4345646 2039 if (xl_newbuf(sc, cur_rx, 0)) {
984263bc
MD
2040 ifp->if_ierrors++;
2041 cur_rx->xl_ptr->xl_status = 0;
2042 continue;
2043 }
2044
2045 ifp->if_ipackets++;
984263bc
MD
2046 m->m_pkthdr.rcvif = ifp;
2047 m->m_pkthdr.len = m->m_len = total_len;
2048
a3a1d2d2 2049 if (ifp->if_capenable & IFCAP_RXCSUM) {
984263bc
MD
2050 /* Do IP checksum checking. */
2051 if (rxstat & XL_RXSTAT_IPCKOK)
2052 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2053 if (!(rxstat & XL_RXSTAT_IPCKERR))
2054 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2055 if ((rxstat & XL_RXSTAT_TCPCOK &&
2056 !(rxstat & XL_RXSTAT_TCPCKERR)) ||
2057 (rxstat & XL_RXSTAT_UDPCKOK &&
2058 !(rxstat & XL_RXSTAT_UDPCKERR))) {
2059 m->m_pkthdr.csum_flags |=
fbb35ef0
SZ
2060 CSUM_DATA_VALID|CSUM_PSEUDO_HDR|
2061 CSUM_FRAG_NOT_CHECKED;
984263bc
MD
2062 m->m_pkthdr.csum_data = 0xffff;
2063 }
2064 }
a3a1d2d2 2065
2eb0d069 2066 ether_input_chain(ifp, m, NULL, chain);
984263bc
MD
2067 }
2068
0ef35b28
SZ
2069 if (sc->xl_type != XL_TYPE_905B) {
2070 /*
2071 * Handle the 'end of channel' condition. When the upload
2072 * engine hits the end of the RX ring, it will stall. This
2073 * is our cue to flush the RX ring, reload the uplist pointer
2074 * register and unstall the engine.
2075 * XXX This is actually a little goofy. With the ThunderLAN
2076 * chip, you get an interrupt when the receiver hits the end
2077 * of the receive ring, which tells you exactly when you
2078 * you need to reload the ring pointer. Here we have to
2079 * fake it. I'm mad at myself for not being clever enough
2080 * to avoid the use of a goto here.
2081 */
2082 if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 ||
2083 CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) {
2084 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2085 xl_wait(sc);
2086 CSR_WRITE_4(sc, XL_UPLIST_PTR,
2087 sc->xl_ldata.xl_rx_dmaaddr);
2088 sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0];
2089 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2090 goto again;
2091 }
984263bc 2092 }
3e210793 2093
3e210793 2094 ether_input_dispatch(chain);
984263bc
MD
2095}
2096
2097/*
2098 * A frame was downloaded to the chip. It's safe for us to clean up
2099 * the list buffers.
2100 */
2101static void
96d3e330 2102xl_txeof(struct xl_softc *sc)
984263bc
MD
2103{
2104 struct xl_chain *cur_tx;
2105 struct ifnet *ifp;
2106
2107 ifp = &sc->arpcom.ac_if;
2108
2109 /* Clear the timeout timer. */
2110 ifp->if_timer = 0;
2111
2112 /*
2113 * Go through our tx list and free mbufs for those
2114 * frames that have been uploaded. Note: the 3c905B
2115 * sets a special bit in the status word to let us
2116 * know that a frame has been downloaded, but the
2117 * original 3c900/3c905 adapters don't do that.
2118 * Consequently, we have to use a different test if
2119 * xl_type != XL_TYPE_905B.
2120 */
2121 while(sc->xl_cdata.xl_tx_head != NULL) {
2122 cur_tx = sc->xl_cdata.xl_tx_head;
2123
2124 if (CSR_READ_4(sc, XL_DOWNLIST_PTR))
2125 break;
2126
2127 sc->xl_cdata.xl_tx_head = cur_tx->xl_next;
8628af14 2128 bus_dmamap_unload(sc->xl_tx_mtag, cur_tx->xl_map);
984263bc
MD
2129 m_freem(cur_tx->xl_mbuf);
2130 cur_tx->xl_mbuf = NULL;
2131 ifp->if_opackets++;
2132
2133 cur_tx->xl_next = sc->xl_cdata.xl_tx_free;
2134 sc->xl_cdata.xl_tx_free = cur_tx;
2135 }
2136
2137 if (sc->xl_cdata.xl_tx_head == NULL) {
2138 ifp->if_flags &= ~IFF_OACTIVE;
2139 sc->xl_cdata.xl_tx_tail = NULL;
2140 } else {
2141 if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED ||
2142 !CSR_READ_4(sc, XL_DOWNLIST_PTR)) {
2143 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
a3a1d2d2 2144 sc->xl_cdata.xl_tx_head->xl_phys);
984263bc
MD
2145 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2146 }
2147 }
2148
2149 return;
2150}
2151
a3a1d2d2 2152static void
96d3e330 2153xl_txeof_90xB(struct xl_softc *sc)
984263bc
MD
2154{
2155 struct xl_chain *cur_tx = NULL;
2156 struct ifnet *ifp;
2157 int idx;
2158
2159 ifp = &sc->arpcom.ac_if;
2160
2161 idx = sc->xl_cdata.xl_tx_cons;
2162 while(idx != sc->xl_cdata.xl_tx_prod) {
2163
2164 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2165
a3a1d2d2
MD
2166 if (!(le32toh(cur_tx->xl_ptr->xl_status) &
2167 XL_TXSTAT_DL_COMPLETE))
984263bc
MD
2168 break;
2169
2170 if (cur_tx->xl_mbuf != NULL) {
8628af14 2171 bus_dmamap_unload(sc->xl_tx_mtag, cur_tx->xl_map);
984263bc
MD
2172 m_freem(cur_tx->xl_mbuf);
2173 cur_tx->xl_mbuf = NULL;
2174 }
2175
2176 ifp->if_opackets++;
2177
2178 sc->xl_cdata.xl_tx_cnt--;
2179 XL_INC(idx, XL_TX_LIST_CNT);
2180 ifp->if_timer = 0;
2181 }
2182
2183 sc->xl_cdata.xl_tx_cons = idx;
2184
2185 if (cur_tx != NULL)
2186 ifp->if_flags &= ~IFF_OACTIVE;
2187
2188 return;
2189}
2190
2191/*
2192 * TX 'end of channel' interrupt handler. Actually, we should
2193 * only get a 'TX complete' interrupt if there's a transmit error,
2194 * so this is really TX error handler.
2195 */
2196static void
96d3e330 2197xl_txeoc(struct xl_softc *sc)
984263bc 2198{
ed2832e7 2199 struct ifnet *ifp = &sc->arpcom.ac_if;
984263bc
MD
2200 u_int8_t txstat;
2201
2202 while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) {
2203 if (txstat & XL_TXSTATUS_UNDERRUN ||
2204 txstat & XL_TXSTATUS_JABBER ||
2205 txstat & XL_TXSTATUS_RECLAIM) {
ed2832e7 2206 if_printf(ifp, "transmission error: %x\n", txstat);
984263bc
MD
2207 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2208 xl_wait(sc);
2209 if (sc->xl_type == XL_TYPE_905B) {
2210 if (sc->xl_cdata.xl_tx_cnt) {
2211 int i;
2212 struct xl_chain *c;
2213 i = sc->xl_cdata.xl_tx_cons;
2214 c = &sc->xl_cdata.xl_tx_chain[i];
2215 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
2216 c->xl_phys);
2217 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
2218 }
2219 } else {
2220 if (sc->xl_cdata.xl_tx_head != NULL)
2221 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
a3a1d2d2 2222 sc->xl_cdata.xl_tx_head->xl_phys);
984263bc
MD
2223 }
2224 /*
2225 * Remember to set this for the
2226 * first generation 3c90X chips.
2227 */
2228 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2229 if (txstat & XL_TXSTATUS_UNDERRUN &&
2230 sc->xl_tx_thresh < XL_PACKET_SIZE) {
2231 sc->xl_tx_thresh += XL_MIN_FRAMELEN;
ed2832e7
JS
2232 if_printf(ifp, "tx underrun, increasing tx start"
2233 " threshold to %d bytes\n",
984263bc
MD
2234 sc->xl_tx_thresh);
2235 }
2236 CSR_WRITE_2(sc, XL_COMMAND,
2237 XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2238 if (sc->xl_type == XL_TYPE_905B) {
2239 CSR_WRITE_2(sc, XL_COMMAND,
2240 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2241 }
2242 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2243 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2244 } else {
2245 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2246 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2247 }
2248 /*
2249 * Write an arbitrary byte to the TX_STATUS register
2250 * to clear this interrupt/error and advance to the next.
2251 */
2252 CSR_WRITE_1(sc, XL_TX_STATUS, 0x01);
2253 }
2254
2255 return;
2256}
2257
a0e7467e
SZ
2258#ifdef DEVICE_POLLING
2259
2260static void
2261xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
2262{
2263 struct xl_softc *sc = ifp->if_softc;
2264
56170638
SZ
2265 ASSERT_SERIALIZED(ifp->if_serializer);
2266
a0e7467e
SZ
2267 switch (cmd) {
2268 case POLL_REGISTER:
2269 xl_enable_intrs(sc, 0);
9db4b353
SZ
2270 if (sc->xl_type != XL_TYPE_905B)
2271 ifp->if_start = xl_start_poll;
a0e7467e
SZ
2272 break;
2273 case POLL_DEREGISTER:
9db4b353
SZ
2274 if (sc->xl_type != XL_TYPE_905B)
2275 ifp->if_start = xl_start;
a0e7467e
SZ
2276 xl_enable_intrs(sc, XL_INTRS);
2277 break;
2278 case POLL_ONLY:
2279 case POLL_AND_CHECK_STATUS:
2280 xl_rxeof(sc, count);
2281 if (sc->xl_type == XL_TYPE_905B)
2282 xl_txeof_90xB(sc);
2283 else
2284 xl_txeof(sc);
2285
9db4b353
SZ
2286 if (!ifq_is_empty(&ifp->if_snd))
2287 if_devstart(ifp);
a0e7467e
SZ
2288
2289 if (cmd == POLL_AND_CHECK_STATUS) {
2290 uint16_t status;
2291
2292 /* XXX copy & pasted from xl_intr() */
2293 status = CSR_READ_2(sc, XL_STATUS);
2294 if ((status & XL_INTRS) && status != 0xFFFF) {
2295 CSR_WRITE_2(sc, XL_COMMAND,
2296 XL_CMD_INTR_ACK | (status & XL_INTRS));
2297
2298 if (status & XL_STAT_TX_COMPLETE) {
2299 ifp->if_oerrors++;
2300 xl_txeoc(sc);
2301 }
2302
2303 if (status & XL_STAT_ADFAIL) {
2304 xl_reset(sc);
2305 xl_init(sc);
2306 }
2307
2308 if (status & XL_STAT_STATSOFLOW) {
2309 sc->xl_stats_no_timeout = 1;
78195a76 2310 xl_stats_update_serialized(sc);
a0e7467e
SZ
2311 sc->xl_stats_no_timeout = 0;
2312 }
2313 }
2314 }
2315 break;
2316 }
2317}
2318
2319#endif /* DEVICE_POLLING */
2320
984263bc 2321static void
96d3e330 2322xl_intr(void *arg)
984263bc
MD
2323{
2324 struct xl_softc *sc;
2325 struct ifnet *ifp;
2326 u_int16_t status;
2327
2328 sc = arg;
2329 ifp = &sc->arpcom.ac_if;
2330
56170638
SZ
2331 ASSERT_SERIALIZED(ifp->if_serializer);
2332
a0e7467e
SZ
2333 while(((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS) &&
2334 status != 0xFFFF) {
984263bc
MD
2335
2336 CSR_WRITE_2(sc, XL_COMMAND,
2337 XL_CMD_INTR_ACK|(status & XL_INTRS));
2338
2339 if (status & XL_STAT_UP_COMPLETE) {
2340 int curpkts;
2341
2342 curpkts = ifp->if_ipackets;
a0e7467e 2343 xl_rxeof(sc, -1);
984263bc
MD
2344 if (curpkts == ifp->if_ipackets) {
2345 while (xl_rx_resync(sc))
a0e7467e 2346 xl_rxeof(sc, -1);
984263bc
MD
2347 }
2348 }
2349
2350 if (status & XL_STAT_DOWN_COMPLETE) {
2351 if (sc->xl_type == XL_TYPE_905B)
2352 xl_txeof_90xB(sc);
2353 else
2354 xl_txeof(sc);
2355 }
2356
2357 if (status & XL_STAT_TX_COMPLETE) {
2358 ifp->if_oerrors++;
2359 xl_txeoc(sc);
2360 }
2361
2362 if (status & XL_STAT_ADFAIL) {
2363 xl_reset(sc);
2364 xl_init(sc);
2365 }
2366
2367 if (status & XL_STAT_STATSOFLOW) {
2368 sc->xl_stats_no_timeout = 1;
78195a76 2369 xl_stats_update_serialized(sc);
984263bc
MD
2370 sc->xl_stats_no_timeout = 0;
2371 }
2372 }
2373
b4e1aa10 2374 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 2375 if_devstart(ifp);
984263bc
MD
2376}
2377
2378static void
96d3e330 2379xl_stats_update(void *xsc)
984263bc 2380{
78195a76
MD
2381 struct xl_softc *sc = xsc;
2382
2383 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
2384 xl_stats_update_serialized(xsc);
2385 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
2386}
2387
2388static void
2389xl_stats_update_serialized(void *xsc)
2390{
984263bc
MD
2391 struct xl_softc *sc;
2392 struct ifnet *ifp;
2393 struct xl_stats xl_stats;
2394 u_int8_t *p;
2395 int i;
2396 struct mii_data *mii = NULL;
2397
2398 bzero((char *)&xl_stats, sizeof(struct xl_stats));
2399
2400 sc = xsc;
2401 ifp = &sc->arpcom.ac_if;
2402 if (sc->xl_miibus != NULL)
2403 mii = device_get_softc(sc->xl_miibus);
2404
2405 p = (u_int8_t *)&xl_stats;
2406
2407 /* Read all the stats registers. */
2408 XL_SEL_WIN(6);
2409
2410 for (i = 0; i < 16; i++)
2411 *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i);
2412
2413 ifp->if_ierrors += xl_stats.xl_rx_overrun;
2414
2415 ifp->if_collisions += xl_stats.xl_tx_multi_collision +
2416 xl_stats.xl_tx_single_collision +
2417 xl_stats.xl_tx_late_collision;
2418
2419 /*
2420 * Boomerang and cyclone chips have an extra stats counter
2421 * in window 4 (BadSSD). We have to read this too in order
2422 * to clear out all the stats registers and avoid a statsoflow
2423 * interrupt.
2424 */
2425 XL_SEL_WIN(4);
2426 CSR_READ_1(sc, XL_W4_BADSSD);
2427
2428 if ((mii != NULL) && (!sc->xl_stats_no_timeout))
2429 mii_tick(mii);
2430
2431 XL_SEL_WIN(7);
2432
2433 if (!sc->xl_stats_no_timeout)
74a0ee08 2434 callout_reset(&sc->xl_stat_timer, hz, xl_stats_update, sc);
984263bc
MD
2435
2436 return;
2437}
2438
2439/*
2440 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
2441 * pointers to the fragment pointers.
2442 */
2443static int
96d3e330 2444xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head)
984263bc 2445{
fc3f02b0 2446 int error, nsegs, i;
a3a1d2d2 2447 u_int32_t status;
fc3f02b0
SZ
2448 bus_dma_segment_t segs[XL_MAXFRAGS];
2449 struct xl_list *l;
a3a1d2d2 2450
fc3f02b0
SZ
2451 error = bus_dmamap_load_mbuf_defrag(sc->xl_tx_mtag, c->xl_map, &m_head,
2452 segs, XL_MAXFRAGS, &nsegs, BUS_DMA_NOWAIT);
a3a1d2d2 2453 if (error) {
fc3f02b0
SZ
2454 m_freem(m_head);
2455 return error;
984263bc 2456 }
fc3f02b0 2457 bus_dmamap_sync(sc->xl_tx_mtag, c->xl_map, BUS_DMASYNC_PREWRITE);
984263bc
MD
2458
2459 if (sc->xl_type == XL_TYPE_905B) {
a3a1d2d2 2460 status = XL_TXSTAT_RND_DEFEAT;
984263bc
MD
2461 if (m_head->m_pkthdr.csum_flags) {
2462 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
a3a1d2d2 2463 status |= XL_TXSTAT_IPCKSUM;
984263bc 2464 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
a3a1d2d2 2465 status |= XL_TXSTAT_TCPCKSUM;
984263bc 2466 if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
a3a1d2d2 2467 status |= XL_TXSTAT_UDPCKSUM;
984263bc 2468 }
fc3f02b0
SZ
2469 } else {
2470 status = m_head->m_pkthdr.len;
2471 }
2472
2473 l = c->xl_ptr;
2474 for (i = 0; i < nsegs; i++) {
2475 l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr);
2476 l->xl_frag[i].xl_len = htole32(segs[i].ds_len);
a3a1d2d2 2477 }
fc3f02b0
SZ
2478 l->xl_frag[nsegs - 1].xl_len =
2479 htole32(segs[nsegs - 1].ds_len | XL_LAST_FRAG);
2480 l->xl_status = htole32(status);
2481 l->xl_next = 0;
984263bc
MD
2482
2483 c->xl_mbuf = m_head;
fc3f02b0 2484
984263bc
MD
2485 return(0);
2486}
2487
a0e7467e
SZ
2488static void
2489xl_start(struct ifnet *ifp)
2490{
56170638 2491 ASSERT_SERIALIZED(ifp->if_serializer);
a0e7467e
SZ
2492 xl_start_body(ifp, 1);
2493}
2494
92a4a5bb 2495#ifdef DEVICE_POLLING
9db4b353
SZ
2496static void
2497xl_start_poll(struct ifnet *ifp)
2498{
2499 xl_start_body(ifp, 0);
2500}
92a4a5bb 2501#endif
9db4b353 2502
984263bc
MD
2503/*
2504 * Main transmit routine. To avoid having to do mbuf copies, we put pointers
2505 * to the mbuf data regions directly in the transmit lists. We also save a
2506 * copy of the pointers since the transmit list fragment pointers are
2507 * physical addresses.
2508 */
2509static void
a0e7467e 2510xl_start_body(struct ifnet *ifp, int proc_rx)
984263bc
MD
2511{
2512 struct xl_softc *sc;
2513 struct mbuf *m_head = NULL;
2514 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
a3a1d2d2
MD
2515 struct xl_chain *prev_tx;
2516 u_int32_t status;
2517 int error;
984263bc
MD
2518
2519 sc = ifp->if_softc;
984263bc
MD
2520 /*
2521 * Check for an available queue slot. If there are none,
2522 * punt.
2523 */
2524 if (sc->xl_cdata.xl_tx_free == NULL) {
2525 xl_txeoc(sc);
2526 xl_txeof(sc);
2527 if (sc->xl_cdata.xl_tx_free == NULL) {
2528 ifp->if_flags |= IFF_OACTIVE;
2529 return;
2530 }
2531 }
2532
2533 start_tx = sc->xl_cdata.xl_tx_free;
2534
2535 while(sc->xl_cdata.xl_tx_free != NULL) {
d2c71fa0 2536 m_head = ifq_dequeue(&ifp->if_snd, NULL);
984263bc
MD
2537 if (m_head == NULL)
2538 break;
2539
2540 /* Pick a descriptor off the free list. */
a3a1d2d2 2541 prev_tx = cur_tx;
984263bc 2542 cur_tx = sc->xl_cdata.xl_tx_free;
984263bc
MD
2543
2544 /* Pack the data into the descriptor. */
a3a1d2d2
MD
2545 error = xl_encap(sc, cur_tx, m_head);
2546 if (error) {
2547 cur_tx = prev_tx;
2548 continue;
2549 }
2550
2551 sc->xl_cdata.xl_tx_free = cur_tx->xl_next;
2552 cur_tx->xl_next = NULL;
984263bc
MD
2553
2554 /* Chain it together. */
2555 if (prev != NULL) {
2556 prev->xl_next = cur_tx;
a3a1d2d2 2557 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
984263bc
MD
2558 }
2559 prev = cur_tx;
2560
7600679e 2561 BPF_MTAP(ifp, cur_tx->xl_mbuf);
984263bc
MD
2562 }
2563
2564 /*
2565 * If there are no packets queued, bail.
2566 */
a0e7467e 2567 if (cur_tx == NULL)
984263bc
MD
2568 return;
2569
2570 /*
2571 * Place the request for the upload interrupt
2572 * in the last descriptor in the chain. This way, if
2573 * we're chaining several packets at once, we'll only
2574 * get an interupt once for the whole chain rather than
2575 * once for each packet.
2576 */
a3a1d2d2
MD
2577 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2578 XL_TXSTAT_DL_INTR);
984263bc
MD
2579
2580 /*
2581 * Queue the packets. If the TX channel is clear, update
2582 * the downlist pointer register.
2583 */
2584 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2585 xl_wait(sc);
2586
2587 if (sc->xl_cdata.xl_tx_head != NULL) {
2588 sc->xl_cdata.xl_tx_tail->xl_next = start_tx;
2589 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next =
a3a1d2d2
MD
2590 htole32(start_tx->xl_phys);
2591 status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status;
2592 sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status =
2593 htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR);
984263bc
MD
2594 sc->xl_cdata.xl_tx_tail = cur_tx;
2595 } else {
2596 sc->xl_cdata.xl_tx_head = start_tx;
2597 sc->xl_cdata.xl_tx_tail = cur_tx;
2598 }
0ef35b28 2599
984263bc 2600 if (!CSR_READ_4(sc, XL_DOWNLIST_PTR))
a3a1d2d2 2601 CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys);
984263bc
MD
2602
2603 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2604
2605 XL_SEL_WIN(7);
2606
2607 /*
2608 * Set a timeout in case the chip goes out to lunch.
2609 */
2610 ifp->if_timer = 5;
2611
a0e7467e
SZ
2612 if (proc_rx) {
2613 /*
2614 * XXX Under certain conditions, usually on slower machines
2615 * where interrupts may be dropped, it's possible for the
2616 * adapter to chew up all the buffers in the receive ring
2617 * and stall, without us being able to do anything about it.
2618 * To guard against this, we need to make a pass over the
2619 * RX queue to make sure there aren't any packets pending.
2620 * Doing it here means we can flush the receive ring at the
2621 * same time the chip is DMAing the transmit descriptors we
2622 * just gave it.
2623 *
2624 * 3Com goes to some lengths to emphasize the Parallel
2625 * Tasking (tm) nature of their chips in all their marketing
2626 * literature; we may as well take advantage of it. :)
2627 */
2628 xl_rxeof(sc, -1);
2629 }
984263bc
MD
2630}
2631
a3a1d2d2 2632static void
96d3e330 2633xl_start_90xB(struct ifnet *ifp)
984263bc
MD
2634{
2635 struct xl_softc *sc;
2636 struct mbuf *m_head = NULL;
2637 struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx;
a3a1d2d2
MD
2638 struct xl_chain *prev_tx;
2639 int error, idx;
984263bc 2640
56170638
SZ
2641 ASSERT_SERIALIZED(ifp->if_serializer);
2642
984263bc
MD
2643 sc = ifp->if_softc;
2644
9db4b353 2645 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
984263bc
MD
2646 return;
2647
2648 idx = sc->xl_cdata.xl_tx_prod;
2649 start_tx = &sc->xl_cdata.xl_tx_chain[idx];
2650
2651 while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) {
2652
2653 if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) {
2654 ifp->if_flags |= IFF_OACTIVE;
2655 break;
2656 }
2657
d2c71fa0 2658 m_head = ifq_dequeue(&ifp->if_snd, NULL);
984263bc
MD
2659 if (m_head == NULL)
2660 break;
2661
a3a1d2d2 2662 prev_tx = cur_tx;
984263bc
MD
2663 cur_tx = &sc->xl_cdata.xl_tx_chain[idx];
2664
2665 /* Pack the data into the descriptor. */
a3a1d2d2
MD
2666 error = xl_encap(sc, cur_tx, m_head);
2667 if (error) {
2668 cur_tx = prev_tx;
2669 continue;
2670 }
984263bc
MD
2671
2672 /* Chain it together. */
2673 if (prev != NULL)
a3a1d2d2 2674 prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys);
984263bc
MD
2675 prev = cur_tx;
2676
7600679e 2677 BPF_MTAP(ifp, cur_tx->xl_mbuf);
984263bc
MD
2678
2679 XL_INC(idx, XL_TX_LIST_CNT);
2680 sc->xl_cdata.xl_tx_cnt++;
2681 }
2682
2683 /*
2684 * If there are no packets queued, bail.
2685 */
a0e7467e 2686 if (cur_tx == NULL)
984263bc
MD
2687 return;
2688
2689 /*
2690 * Place the request for the upload interrupt
2691 * in the last descriptor in the chain. This way, if
2692 * we're chaining several packets at once, we'll only
2693 * get an interupt once for the whole chain rather than
2694 * once for each packet.
2695 */
a3a1d2d2
MD
2696 cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) |
2697 XL_TXSTAT_DL_INTR);
984263bc
MD
2698
2699 /* Start transmission */
2700 sc->xl_cdata.xl_tx_prod = idx;
a3a1d2d2 2701 start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys);
984263bc
MD
2702
2703 /*
2704 * Set a timeout in case the chip goes out to lunch.
2705 */
2706 ifp->if_timer = 5;
984263bc
MD
2707}
2708
2709static void
96d3e330 2710xl_init(void *xsc)
984263bc
MD
2711{
2712 struct xl_softc *sc = xsc;
2713 struct ifnet *ifp = &sc->arpcom.ac_if;
a3a1d2d2 2714 int error, i;
984263bc
MD
2715 u_int16_t rxfilt = 0;
2716 struct mii_data *mii = NULL;
2717
56170638
SZ
2718 ASSERT_SERIALIZED(ifp->if_serializer);
2719
984263bc
MD
2720 /*
2721 * Cancel pending I/O and free all RX/TX buffers.
2722 */
2723 xl_stop(sc);
2724
2725 if (sc->xl_miibus == NULL) {
2726 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2727 xl_wait(sc);
2728 }
2729 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2730 xl_wait(sc);
2731 DELAY(10000);
2732
2733 if (sc->xl_miibus != NULL)
2734 mii = device_get_softc(sc->xl_miibus);
2735
2736 /* Init our MAC address */
2737 XL_SEL_WIN(2);
2738 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2739 CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i,
2740 sc->arpcom.ac_enaddr[i]);
2741 }
2742
2743 /* Clear the station mask. */
2744 for (i = 0; i < 3; i++)
2745 CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0);
2746#ifdef notdef
2747 /* Reset TX and RX. */
2748 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
2749 xl_wait(sc);
2750 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
2751 xl_wait(sc);
2752#endif
2753 /* Init circular RX list. */
a3a1d2d2
MD
2754 error = xl_list_rx_init(sc);
2755 if (error) {
ed2832e7
JS
2756 if_printf(ifp, "initialization of the rx ring failed (%d)\n",
2757 error);
984263bc 2758 xl_stop(sc);
984263bc
MD
2759 return;
2760 }
2761
2762 /* Init TX descriptors. */
2763 if (sc->xl_type == XL_TYPE_905B)
5001d436 2764 xl_list_tx_init_90xB(sc);
984263bc 2765 else
5001d436 2766 xl_list_tx_init(sc);
984263bc
MD
2767
2768 /*
2769 * Set the TX freethresh value.
2770 * Note that this has no effect on 3c905B "cyclone"
2771 * cards but is required for 3c900/3c905 "boomerang"
2772 * cards in order to enable the download engine.
2773 */
2774 CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8);
2775
2776 /* Set the TX start threshold for best performance. */
2777 sc->xl_tx_thresh = XL_MIN_FRAMELEN;
2778 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh);
2779
2780 /*
2781 * If this is a 3c905B, also set the tx reclaim threshold.
2782 * This helps cut down on the number of tx reclaim errors
2783 * that could happen on a busy network. The chip multiplies
2784 * the register value by 16 to obtain the actual threshold
2785 * in bytes, so we divide by 16 when setting the value here.
2786 * The existing threshold value can be examined by reading
2787 * the register at offset 9 in window 5.
2788 */
2789 if (sc->xl_type == XL_TYPE_905B) {
2790 CSR_WRITE_2(sc, XL_COMMAND,
2791 XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4));
2792 }
2793
2794 /* Set RX filter bits. */
2795 XL_SEL_WIN(5);
2796 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
2797
2798 /* Set the individual bit to receive frames for this host only. */
2799 rxfilt |= XL_RXFILTER_INDIVIDUAL;
2800
2801 /* If we want promiscuous mode, set the allframes bit. */
2802 if (ifp->if_flags & IFF_PROMISC) {
2803 rxfilt |= XL_RXFILTER_ALLFRAMES;
2804 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2805 } else {
2806 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
2807 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2808 }
2809
2810 /*
2811 * Set capture broadcast bit to capture broadcast frames.
2812 */
2813 if (ifp->if_flags & IFF_BROADCAST) {
2814 rxfilt |= XL_RXFILTER_BROADCAST;
2815 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2816 } else {
2817 rxfilt &= ~XL_RXFILTER_BROADCAST;
2818 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt);
2819 }
2820
2821 /*
2822 * Program the multicast filter, if necessary.
2823 */
2824 if (sc->xl_type == XL_TYPE_905B)
2825 xl_setmulti_hash(sc);
2826 else
2827 xl_setmulti(sc);
2828
38145d9a
SZ
2829 if (sc->xl_type == XL_TYPE_905B) {
2830 /* Set UP polling interval */
2831 CSR_WRITE_1(sc, XL_UP_POLL, 64);
2832 }
2833
984263bc
MD
2834 /*
2835 * Load the address of the RX list. We have to
2836 * stall the upload engine before we can manipulate
2837 * the uplist pointer register, then unstall it when
2838 * we're finished. We also have to wait for the
2839 * stall command to complete before proceeding.
2840 * Note that we have to do this after any RX resets
2841 * have completed since the uplist register is cleared
2842 * by a reset.
2843 */
2844 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL);
2845 xl_wait(sc);
a3a1d2d2 2846 CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr);
984263bc
MD
2847 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL);
2848 xl_wait(sc);
2849
984263bc 2850 if (sc->xl_type == XL_TYPE_905B) {
0ef35b28 2851 /* Set DN polling interval */
984263bc 2852 CSR_WRITE_1(sc, XL_DOWN_POLL, 64);
0ef35b28 2853
984263bc
MD
2854 /* Load the address of the TX list */
2855 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL);
2856 xl_wait(sc);
2857 CSR_WRITE_4(sc, XL_DOWNLIST_PTR,
a3a1d2d2 2858 sc->xl_cdata.xl_tx_chain[0].xl_phys);
984263bc
MD
2859 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL);
2860 xl_wait(sc);
2861 }
2862
2863 /*
2864 * If the coax transceiver is on, make sure to enable
2865 * the DC-DC converter.
2866 */
2867 XL_SEL_WIN(3);
2868 if (sc->xl_xcvr == XL_XCVR_COAX)
2869 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START);
2870 else
2871 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
2872
a3a1d2d2
MD
2873 /*
2874 * increase packet size to allow reception of 802.1q or ISL packets.
2875 * For the 3c90x chip, set the 'allow large packets' bit in the MAC
2876 * control register. For 3c90xB/C chips, use the RX packet size
2877 * register.
2878 */
2879
a0e7467e 2880 if (sc->xl_type == XL_TYPE_905B) {
984263bc 2881 CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE);
a0e7467e 2882 } else {
a3a1d2d2
MD
2883 u_int8_t macctl;
2884 macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL);
2885 macctl |= XL_MACCTRL_ALLOW_LARGE_PACK;
2886 CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl);
2887 }
2888
984263bc
MD
2889 /* Clear out the stats counters. */
2890 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
2891 sc->xl_stats_no_timeout = 1;
78195a76 2892 xl_stats_update_serialized(sc);
984263bc
MD
2893 sc->xl_stats_no_timeout = 0;
2894 XL_SEL_WIN(4);
2895 CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE);
2896 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE);
2897
2898 /*
2899 * Enable interrupts.
2900 */
a0e7467e
SZ
2901 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB | XL_INTRS);
2902#ifdef DEVICE_POLLING
2903 /* Do not enable interrupt if polling(4) is enabled */
2904 if ((ifp->if_flags & IFF_POLLING) != 0)
2905 xl_enable_intrs(sc, 0);
2906 else
2907#endif
2908 xl_enable_intrs(sc, XL_INTRS);
984263bc
MD
2909
2910 /* Set the RX early threshold */
2911 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2));
2912 CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY);
2913
2914 /* Enable receiver and transmitter. */
2915 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE);
2916 xl_wait(sc);
2917 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
2918 xl_wait(sc);
2919
2920 if (mii != NULL)
2921 mii_mediachg(mii);
2922
2923 /* Select window 7 for normal operations. */
2924 XL_SEL_WIN(7);
2925
2926 ifp->if_flags |= IFF_RUNNING;
2927 ifp->if_flags &= ~IFF_OACTIVE;
2928
74a0ee08 2929 callout_reset(&sc->xl_stat_timer, hz, xl_stats_update, sc);
984263bc
MD
2930}
2931
2932/*
2933 * Set media options.
2934 */
2935static int
96d3e330 2936xl_ifmedia_upd(struct ifnet *ifp)
984263bc
MD
2937{
2938 struct xl_softc *sc;
2939 struct ifmedia *ifm = NULL;
2940 struct mii_data *mii = NULL;
2941
56170638
SZ
2942 ASSERT_SERIALIZED(ifp->if_serializer);
2943
984263bc
MD
2944 sc = ifp->if_softc;
2945 if (sc->xl_miibus != NULL)
2946 mii = device_get_softc(sc->xl_miibus);
2947 if (mii == NULL)
2948 ifm = &sc->ifmedia;
2949 else
2950 ifm = &mii->mii_media;
2951
2952 switch(IFM_SUBTYPE(ifm->ifm_media)) {
2953 case IFM_100_FX:
2954 case IFM_10_FL:
2955 case IFM_10_2:
2956 case IFM_10_5:
2957 xl_setmode(sc, ifm->ifm_media);
2958 return(0);
2959 break;
2960 default:
2961 break;
2962 }
2963
2964 if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX
2965 || sc->xl_media & XL_MEDIAOPT_BT4) {
2966 xl_init(sc);
2967 } else {
2968 xl_setmode(sc, ifm->ifm_media);
2969 }
2970
2971 return(0);
2972}
2973
2974/*
2975 * Report current media status.
2976 */
2977static void
96d3e330 2978xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
984263bc
MD
2979{
2980 struct xl_softc *sc;
2981 u_int32_t icfg;
2982 struct mii_data *mii = NULL;
2983
56170638
SZ
2984 ASSERT_SERIALIZED(ifp->if_serializer);
2985
984263bc
MD
2986 sc = ifp->if_softc;
2987 if (sc->xl_miibus != NULL)
2988 mii = device_get_softc(sc->xl_miibus);
2989
2990 XL_SEL_WIN(3);
2991 icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK;
2992 icfg >>= XL_ICFG_CONNECTOR_BITS;
2993
2994 ifmr->ifm_active = IFM_ETHER;
2995
2996 switch(icfg) {
2997 case XL_XCVR_10BT:
2998 ifmr->ifm_active = IFM_ETHER|IFM_10_T;
2999 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3000 ifmr->ifm_active |= IFM_FDX;
3001 else
3002 ifmr->ifm_active |= IFM_HDX;
3003 break;
3004 case XL_XCVR_AUI:
3005 if (sc->xl_type == XL_TYPE_905B &&
3006 sc->xl_media == XL_MEDIAOPT_10FL) {
3007 ifmr->ifm_active = IFM_ETHER|IFM_10_FL;
3008 if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX)
3009 ifmr->ifm_active |= IFM_FDX;
3010 else
3011 ifmr->ifm_active |= IFM_HDX;
3012 } else
3013 ifmr->ifm_active = IFM_ETHER|IFM_10_5;
3014 break;
3015 case XL_XCVR_COAX:
3016 ifmr->ifm_active = IFM_ETHER|IFM_10_2;
3017 break;
3018 /*
3019 * XXX MII and BTX/AUTO should be separate cases.
3020 */
3021
3022 case XL_XCVR_100BTX:
3023 case XL_XCVR_AUTO:
3024 case XL_XCVR_MII:
3025 if (mii != NULL) {
3026 mii_pollstat(mii);
3027 ifmr->ifm_active = mii->mii_media_active;
3028 ifmr->ifm_status = mii->mii_media_status;
3029 }
3030 break;
3031 case XL_XCVR_100BFX:
3032 ifmr->ifm_active = IFM_ETHER|IFM_100_FX;
3033 break;
3034 default:
ed2832e7 3035 if_printf(ifp, "unknown XCVR type: %d\n", icfg);
984263bc
MD
3036 break;
3037 }
3038
3039 return;
3040}
3041
3042static int
96d3e330 3043xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc
MD
3044{
3045 struct xl_softc *sc = ifp->if_softc;
3046 struct ifreq *ifr = (struct ifreq *) data;
a3a1d2d2 3047 int error = 0;
984263bc
MD
3048 struct mii_data *mii = NULL;
3049 u_int8_t rxfilt;
3050
56170638
SZ
3051 ASSERT_SERIALIZED(ifp->if_serializer);
3052
984263bc 3053 switch(command) {
984263bc
MD
3054 case SIOCSIFFLAGS:
3055 XL_SEL_WIN(5);
3056 rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER);
3057 if (ifp->if_flags & IFF_UP) {
3058 if (ifp->if_flags & IFF_RUNNING &&
3059 ifp->if_flags & IFF_PROMISC &&
3060 !(sc->xl_if_flags & IFF_PROMISC)) {
3061 rxfilt |= XL_RXFILTER_ALLFRAMES;
3062 CSR_WRITE_2(sc, XL_COMMAND,
3063 XL_CMD_RX_SET_FILT|rxfilt);
3064 XL_SEL_WIN(7);
3065 } else if (ifp->if_flags & IFF_RUNNING &&
3066 !(ifp->if_flags & IFF_PROMISC) &&
3067 sc->xl_if_flags & IFF_PROMISC) {
3068 rxfilt &= ~XL_RXFILTER_ALLFRAMES;
3069 CSR_WRITE_2(sc, XL_COMMAND,
3070 XL_CMD_RX_SET_FILT|rxfilt);
3071 XL_SEL_WIN(7);
3072 } else
3073 xl_init(sc);
3074 } else {
3075 if (ifp->if_flags & IFF_RUNNING)
3076 xl_stop(sc);
3077 }
3078 sc->xl_if_flags = ifp->if_flags;
3079 error = 0;
3080 break;
3081 case SIOCADDMULTI:
3082 case SIOCDELMULTI:
3083 if (sc->xl_type == XL_TYPE_905B)
3084 xl_setmulti_hash(sc);
3085 else
3086 xl_setmulti(sc);
3087 error = 0;
3088 break;
3089 case SIOCGIFMEDIA:
3090 case SIOCSIFMEDIA:
3091 if (sc->xl_miibus != NULL)
3092 mii = device_get_softc(sc->xl_miibus);
3093 if (mii == NULL)
3094 error = ifmedia_ioctl(ifp, ifr,
3095 &sc->ifmedia, command);
3096 else
3097 error = ifmedia_ioctl(ifp, ifr,
3098 &mii->mii_media, command);
3099 break;
a3a1d2d2 3100 case SIOCSIFCAP:
cb79ad8e
SZ
3101 ifp->if_capenable &= ~IFCAP_HWCSUM;
3102 ifp->if_capenable |= (ifr->ifr_reqcap & IFCAP_HWCSUM);
3103 if (ifp->if_capenable & IFCAP_HWCSUM)
a3a1d2d2
MD
3104 ifp->if_hwassist = XL905B_CSUM_FEATURES;
3105 else
3106 ifp->if_hwassist = 0;
3107 break;
984263bc 3108 default:
4cde4dd5 3109 error = ether_ioctl(ifp, command, data);
984263bc
MD
3110 break;
3111 }
984263bc
MD
3112 return(error);
3113}
3114
3115static void
96d3e330 3116xl_watchdog(struct ifnet *ifp)
984263bc
MD
3117{
3118 struct xl_softc *sc;
3119 u_int16_t status = 0;
3120
56170638
SZ
3121 ASSERT_SERIALIZED(ifp->if_serializer);
3122
984263bc
MD
3123 sc = ifp->if_softc;
3124
3125 ifp->if_oerrors++;
3126 XL_SEL_WIN(4);
3127 status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS);
ed2832e7 3128 if_printf(ifp, "watchdog timeout\n");
984263bc
MD
3129
3130 if (status & XL_MEDIASTAT_CARRIER)
ed2832e7 3131 if_printf(ifp, "no carrier - transceiver cable problem?\n");
984263bc
MD
3132 xl_txeoc(sc);
3133 xl_txeof(sc);
a0e7467e 3134 xl_rxeof(sc, -1);
984263bc
MD
3135 xl_reset(sc);
3136 xl_init(sc);
3137
b4e1aa10 3138 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 3139 if_devstart(ifp);
984263bc
MD
3140}
3141
3142/*
3143 * Stop the adapter and free any mbufs allocated to the
3144 * RX and TX lists.
3145 */
3146static void
96d3e330 3147xl_stop(struct xl_softc *sc)
984263bc 3148{
3d0f5f54 3149 int i;
984263bc
MD
3150 struct ifnet *ifp;
3151
3152 ifp = &sc->arpcom.ac_if;
56170638
SZ
3153 ASSERT_SERIALIZED(ifp->if_serializer);
3154
984263bc
MD
3155 ifp->if_timer = 0;
3156
3157 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE);
3158 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE);
3159 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB);
3160 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD);
3161 xl_wait(sc);
3162 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE);
3163 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP);
3164 DELAY(800);
3165
3166#ifdef foo
3167 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET);
3168 xl_wait(sc);
3169 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET);
3170 xl_wait(sc);
3171#endif
3172
3173 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH);
3174 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0);
3175 CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0);
5001d436
SZ
3176 if (sc->xl_flags & XL_FLAG_FUNCREG)
3177 bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000);
984263bc
MD
3178
3179 /* Stop the stats updater. */
74a0ee08 3180 callout_stop(&sc->xl_stat_timer);
984263bc
MD
3181
3182 /*
3183 * Free data in the RX lists.
3184 */
3185 for (i = 0; i < XL_RX_LIST_CNT; i++) {
3186 if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) {
8628af14 3187 bus_dmamap_unload(sc->xl_rx_mtag,
a3a1d2d2 3188 sc->xl_cdata.xl_rx_chain[i].xl_map);
984263bc
MD
3189 m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf);
3190 sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL;
3191 }
3192 }
a3a1d2d2 3193 bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ);
5001d436 3194
984263bc
MD
3195 /*
3196 * Free the TX list buffers.
3197 */
3198 for (i = 0; i < XL_TX_LIST_CNT; i++) {
3199 if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) {
8628af14 3200 bus_dmamap_unload(sc->xl_tx_mtag,
a3a1d2d2 3201 sc->xl_cdata.xl_tx_chain[i].xl_map);
984263bc
MD
3202 m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf);
3203 sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL;
3204 }
3205 }
a3a1d2d2 3206 bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ);
984263bc
MD
3207
3208 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
984263bc
MD
3209}
3210
3211/*
3212 * Stop all chip I/O so that the kernel's probe routines don't
3213 * get confused by errant DMAs when rebooting.
3214 */
3215static void
96d3e330 3216xl_shutdown(device_t dev)
984263bc 3217{
78195a76 3218 struct xl_softc *sc = device_get_softc(dev);
984263bc 3219
78195a76 3220 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
984263bc
MD
3221 xl_reset(sc);
3222 xl_stop(sc);
78195a76 3223 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
984263bc
MD
3224}
3225
3226static int
96d3e330 3227xl_suspend(device_t dev)
984263bc 3228{
4ec4a72b 3229 struct xl_softc *sc = device_get_softc(dev);
a3a1d2d2 3230
78195a76 3231 lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer);
984263bc 3232 xl_stop(sc);
78195a76 3233 lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer);
984263bc
MD
3234
3235 return(0);
3236}
3237
3238static int
96d3e330 3239xl_resume(device_t dev)
984263bc
MD
3240{
3241 struct xl_softc *sc;
3242 struct ifnet *ifp;
a3a1d2d2 3243
984263bc
MD
3244 sc = device_get_softc(dev);
3245 ifp = &sc->arpcom.ac_if;
3246
78195a76 3247 lwkt_serialize_enter(ifp->if_serializer);
984263bc
MD
3248 xl_reset(sc);
3249 if (ifp->if_flags & IFF_UP)
3250 xl_init(sc);
78195a76 3251 lwkt_serialize_exit(ifp->if_serializer);
4ec4a72b 3252
984263bc
MD
3253 return(0);
3254}