emx: Add errata workaround for multiple TX queues
[dragonfly.git] / sys / dev / netif / emx / if_emx.c
CommitLineData
5330213c
SZ
1/*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
4 * Copyright (c) 2001-2008, Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
35 *
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 */
66
b3a7093f 67#include "opt_ifpoll.h"
e6cde6e6 68#include "opt_emx.h"
5330213c
SZ
69
70#include <sys/param.h>
71#include <sys/bus.h>
72#include <sys/endian.h>
73#include <sys/interrupt.h>
74#include <sys/kernel.h>
75#include <sys/ktr.h>
76#include <sys/malloc.h>
77#include <sys/mbuf.h>
78#include <sys/proc.h>
79#include <sys/rman.h>
80#include <sys/serialize.h>
bc197380 81#include <sys/serialize2.h>
5330213c
SZ
82#include <sys/socket.h>
83#include <sys/sockio.h>
84#include <sys/sysctl.h>
85#include <sys/systm.h>
86
87#include <net/bpf.h>
88#include <net/ethernet.h>
89#include <net/if.h>
90#include <net/if_arp.h>
91#include <net/if_dl.h>
92#include <net/if_media.h>
93#include <net/ifq_var.h>
89d8e73d 94#include <net/toeplitz.h>
9cc86e17 95#include <net/toeplitz2.h>
5330213c
SZ
96#include <net/vlan/if_vlan_var.h>
97#include <net/vlan/if_vlan_ether.h>
b3a7093f 98#include <net/if_poll.h>
5330213c
SZ
99
100#include <netinet/in_systm.h>
101#include <netinet/in.h>
102#include <netinet/ip.h>
103#include <netinet/tcp.h>
104#include <netinet/udp.h>
105
106#include <bus/pci/pcivar.h>
107#include <bus/pci/pcireg.h>
108
109#include <dev/netif/ig_hal/e1000_api.h>
110#include <dev/netif/ig_hal/e1000_82571.h>
111#include <dev/netif/emx/if_emx.h>
112
b2653751
SW
113#define DEBUG_HW 0
114
3f939c23
SZ
115#ifdef EMX_RSS_DEBUG
116#define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \
117do { \
89d8e73d 118 if (sc->rss_debug >= lvl) \
3f939c23
SZ
119 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
120} while (0)
121#else /* !EMX_RSS_DEBUG */
122#define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
123#endif /* EMX_RSS_DEBUG */
124
5330213c
SZ
125#define EMX_NAME "Intel(R) PRO/1000 "
126
127#define EMX_DEVICE(id) \
128 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id }
129#define EMX_DEVICE_NULL { 0, 0, NULL }
130
131static const struct emx_device {
132 uint16_t vid;
133 uint16_t did;
134 const char *desc;
135} emx_devices[] = {
136 EMX_DEVICE(82571EB_COPPER),
137 EMX_DEVICE(82571EB_FIBER),
138 EMX_DEVICE(82571EB_SERDES),
139 EMX_DEVICE(82571EB_SERDES_DUAL),
140 EMX_DEVICE(82571EB_SERDES_QUAD),
141 EMX_DEVICE(82571EB_QUAD_COPPER),
75a5634e 142 EMX_DEVICE(82571EB_QUAD_COPPER_BP),
5330213c
SZ
143 EMX_DEVICE(82571EB_QUAD_COPPER_LP),
144 EMX_DEVICE(82571EB_QUAD_FIBER),
145 EMX_DEVICE(82571PT_QUAD_COPPER),
146
147 EMX_DEVICE(82572EI_COPPER),
148 EMX_DEVICE(82572EI_FIBER),
149 EMX_DEVICE(82572EI_SERDES),
150 EMX_DEVICE(82572EI),
151
152 EMX_DEVICE(82573E),
153 EMX_DEVICE(82573E_IAMT),
154 EMX_DEVICE(82573L),
155
156 EMX_DEVICE(80003ES2LAN_COPPER_SPT),
157 EMX_DEVICE(80003ES2LAN_SERDES_SPT),
158 EMX_DEVICE(80003ES2LAN_COPPER_DPT),
159 EMX_DEVICE(80003ES2LAN_SERDES_DPT),
160
161 EMX_DEVICE(82574L),
2d0e5700 162 EMX_DEVICE(82574LA),
5330213c 163
a5807b81
SZ
164 EMX_DEVICE(PCH_LPT_I217_LM),
165 EMX_DEVICE(PCH_LPT_I217_V),
166 EMX_DEVICE(PCH_LPTLP_I218_LM),
167 EMX_DEVICE(PCH_LPTLP_I218_V),
4765c386
MN
168 EMX_DEVICE(PCH_I218_LM2),
169 EMX_DEVICE(PCH_I218_V2),
170 EMX_DEVICE(PCH_I218_LM3),
171 EMX_DEVICE(PCH_I218_V3),
a5807b81 172
5330213c
SZ
173 /* required last entry */
174 EMX_DEVICE_NULL
175};
176
177static int emx_probe(device_t);
178static int emx_attach(device_t);
179static int emx_detach(device_t);
180static int emx_shutdown(device_t);
181static int emx_suspend(device_t);
182static int emx_resume(device_t);
183
184static void emx_init(void *);
185static void emx_stop(struct emx_softc *);
186static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
f0a26983 187static void emx_start(struct ifnet *, struct ifaltq_subque *);
b3a7093f 188#ifdef IFPOLL_ENABLE
f994de37 189static void emx_npoll(struct ifnet *, struct ifpoll_info *);
2f00683b
SZ
190static void emx_npoll_status(struct ifnet *);
191static void emx_npoll_tx(struct ifnet *, void *, int);
192static void emx_npoll_rx(struct ifnet *, void *, int);
5330213c 193#endif
d84018e9 194static void emx_watchdog(struct ifaltq_subque *);
5330213c
SZ
195static void emx_media_status(struct ifnet *, struct ifmediareq *);
196static int emx_media_change(struct ifnet *);
197static void emx_timer(void *);
6d435846
SZ
198static void emx_serialize(struct ifnet *, enum ifnet_serialize);
199static void emx_deserialize(struct ifnet *, enum ifnet_serialize);
200static int emx_tryserialize(struct ifnet *, enum ifnet_serialize);
2c9effcf
SZ
201#ifdef INVARIANTS
202static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize,
203 boolean_t);
204#endif
5330213c
SZ
205
206static void emx_intr(void *);
4cb541ae
SZ
207static void emx_intr_mask(void *);
208static void emx_intr_body(struct emx_softc *, boolean_t);
9f831fa8 209static void emx_rxeof(struct emx_rxdata *, int);
ec1c60bb
SZ
210static void emx_txeof(struct emx_txdata *);
211static void emx_tx_collect(struct emx_txdata *);
5330213c
SZ
212static void emx_tx_purge(struct emx_softc *);
213static void emx_enable_intr(struct emx_softc *);
214static void emx_disable_intr(struct emx_softc *);
215
071699f8
SZ
216static int emx_dma_alloc(struct emx_softc *);
217static void emx_dma_free(struct emx_softc *);
ec1c60bb 218static void emx_init_tx_ring(struct emx_txdata *);
9f831fa8 219static int emx_init_rx_ring(struct emx_rxdata *);
d84018e9 220static void emx_free_tx_ring(struct emx_txdata *);
9f831fa8 221static void emx_free_rx_ring(struct emx_rxdata *);
ec1c60bb 222static int emx_create_tx_ring(struct emx_txdata *);
9f831fa8 223static int emx_create_rx_ring(struct emx_rxdata *);
ec1c60bb 224static void emx_destroy_tx_ring(struct emx_txdata *, int);
9f831fa8
SZ
225static void emx_destroy_rx_ring(struct emx_rxdata *, int);
226static int emx_newbuf(struct emx_rxdata *, int, int);
7f32a9b0 227static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *);
ec1c60bb 228static int emx_txcsum(struct emx_txdata *, struct mbuf *,
5330213c 229 uint32_t *, uint32_t *);
ec1c60bb
SZ
230static int emx_tso_pullup(struct emx_txdata *, struct mbuf **);
231static int emx_tso_setup(struct emx_txdata *, struct mbuf *,
3eb0ea09 232 uint32_t *, uint32_t *);
d84018e9 233static int emx_get_txring_inuse(const struct emx_softc *, boolean_t);
5330213c
SZ
234
235static int emx_is_valid_eaddr(const uint8_t *);
2d0e5700 236static int emx_reset(struct emx_softc *);
5330213c
SZ
237static void emx_setup_ifp(struct emx_softc *);
238static void emx_init_tx_unit(struct emx_softc *);
239static void emx_init_rx_unit(struct emx_softc *);
240static void emx_update_stats(struct emx_softc *);
241static void emx_set_promisc(struct emx_softc *);
242static void emx_disable_promisc(struct emx_softc *);
243static void emx_set_multi(struct emx_softc *);
244static void emx_update_link_status(struct emx_softc *);
245static void emx_smartspeed(struct emx_softc *);
2d0e5700 246static void emx_set_itr(struct emx_softc *, uint32_t);
6d5e2922 247static void emx_disable_aspm(struct emx_softc *);
5330213c
SZ
248
249static void emx_print_debug_info(struct emx_softc *);
250static void emx_print_nvm_info(struct emx_softc *);
251static void emx_print_hw_stats(struct emx_softc *);
252
253static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS);
254static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
255static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
d84018e9
SZ
256static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
257static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
09f49d52
SZ
258#ifdef IFPOLL_ENABLE
259static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
260static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
261#endif
5330213c
SZ
262static void emx_add_sysctl(struct emx_softc *);
263
bca7c435
SZ
264static void emx_serialize_skipmain(struct emx_softc *);
265static void emx_deserialize_skipmain(struct emx_softc *);
266
5330213c
SZ
267/* Management and WOL Support */
268static void emx_get_mgmt(struct emx_softc *);
269static void emx_rel_mgmt(struct emx_softc *);
270static void emx_get_hw_control(struct emx_softc *);
271static void emx_rel_hw_control(struct emx_softc *);
272static void emx_enable_wol(device_t);
273
274static device_method_t emx_methods[] = {
275 /* Device interface */
276 DEVMETHOD(device_probe, emx_probe),
277 DEVMETHOD(device_attach, emx_attach),
278 DEVMETHOD(device_detach, emx_detach),
279 DEVMETHOD(device_shutdown, emx_shutdown),
280 DEVMETHOD(device_suspend, emx_suspend),
281 DEVMETHOD(device_resume, emx_resume),
d3c9c58e 282 DEVMETHOD_END
5330213c
SZ
283};
284
285static driver_t emx_driver = {
286 "emx",
287 emx_methods,
288 sizeof(struct emx_softc),
289};
290
291static devclass_t emx_devclass;
292
293DECLARE_DUMMY_MODULE(if_emx);
294MODULE_DEPEND(emx, ig_hal, 1, 1, 1);
aa2b9d05 295DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL);
5330213c
SZ
296
297/*
298 * Tunables
299 */
300static int emx_int_throttle_ceil = EMX_DEFAULT_ITR;
301static int emx_rxd = EMX_DEFAULT_RXD;
302static int emx_txd = EMX_DEFAULT_TXD;
704b6287 303static int emx_smart_pwr_down = 0;
724cbff8 304static int emx_rxr = 0;
d84018e9 305static int emx_txr = 1;
5330213c
SZ
306
307/* Controls whether promiscuous also shows bad packets */
b4d8c36b 308static int emx_debug_sbp = 0;
5330213c 309
704b6287
SZ
310static int emx_82573_workaround = 1;
311static int emx_msi_enable = 1;
5330213c
SZ
312
313TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil);
314TUNABLE_INT("hw.emx.rxd", &emx_rxd);
724cbff8 315TUNABLE_INT("hw.emx.rxr", &emx_rxr);
5330213c 316TUNABLE_INT("hw.emx.txd", &emx_txd);
d84018e9 317TUNABLE_INT("hw.emx.txr", &emx_txr);
5330213c
SZ
318TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down);
319TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp);
320TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround);
704b6287 321TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable);
5330213c
SZ
322
323/* Global used in WOL setup with multiport cards */
324static int emx_global_quad_port_a = 0;
325
326/* Set this to one to display debug statistics */
327static int emx_display_debug_stats = 0;
328
329#if !defined(KTR_IF_EMX)
330#define KTR_IF_EMX KTR_ALL
331#endif
332KTR_INFO_MASTER(if_emx);
5bf48697
AE
333KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin");
334KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end");
335KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet");
336KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet");
337KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean");
5330213c
SZ
338#define logif(name) KTR_LOG(if_emx_ ## name)
339
235b9d30
SZ
340static __inline void
341emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf)
342{
343 rxd->rxd_bufaddr = htole64(rxbuf->paddr);
3f939c23 344 /* DD bit must be cleared */
235b9d30
SZ
345 rxd->rxd_staterr = 0;
346}
347
348static __inline void
349emx_rxcsum(uint32_t staterr, struct mbuf *mp)
350{
351 /* Ignore Checksum bit is set */
352 if (staterr & E1000_RXD_STAT_IXSM)
353 return;
354
355 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
356 E1000_RXD_STAT_IPCS)
357 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
358
359 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
360 E1000_RXD_STAT_TCPCS) {
361 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
362 CSUM_PSEUDO_HDR |
363 CSUM_FRAG_NOT_CHECKED;
364 mp->m_pkthdr.csum_data = htons(0xffff);
365 }
366}
367
9cc86e17
SZ
368static __inline struct pktinfo *
369emx_rssinfo(struct mbuf *m, struct pktinfo *pi,
370 uint32_t mrq, uint32_t hash, uint32_t staterr)
371{
372 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) {
373 case EMX_RXDMRQ_IPV4_TCP:
374 pi->pi_netisr = NETISR_IP;
375 pi->pi_flags = 0;
376 pi->pi_l3proto = IPPROTO_TCP;
377 break;
378
379 case EMX_RXDMRQ_IPV6_TCP:
380 pi->pi_netisr = NETISR_IPV6;
381 pi->pi_flags = 0;
382 pi->pi_l3proto = IPPROTO_TCP;
383 break;
384
385 case EMX_RXDMRQ_IPV4:
386 if (staterr & E1000_RXD_STAT_IXSM)
387 return NULL;
388
389 if ((staterr &
390 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
391 E1000_RXD_STAT_TCPCS) {
392 pi->pi_netisr = NETISR_IP;
393 pi->pi_flags = 0;
394 pi->pi_l3proto = IPPROTO_UDP;
395 break;
396 }
397 /* FALL THROUGH */
398 default:
399 return NULL;
400 }
401
402 m->m_flags |= M_HASH;
403 m->m_pkthdr.hash = toeplitz_hash(hash);
404 return pi;
405}
406
5330213c
SZ
407static int
408emx_probe(device_t dev)
409{
410 const struct emx_device *d;
411 uint16_t vid, did;
412
413 vid = pci_get_vendor(dev);
414 did = pci_get_device(dev);
415
416 for (d = emx_devices; d->desc != NULL; ++d) {
417 if (vid == d->vid && did == d->did) {
418 device_set_desc(dev, d->desc);
419 device_set_async_attach(dev, TRUE);
420 return 0;
421 }
422 }
423 return ENXIO;
424}
425
426static int
427emx_attach(device_t dev)
428{
429 struct emx_softc *sc = device_get_softc(dev);
d84018e9 430 int error = 0, i, throttle, msi_enable, tx_ring_max;
704b6287 431 u_int intr_flags;
2d0e5700 432 uint16_t eeprom_data, device_id, apme_mask;
4cb541ae 433 driver_intr_t *intr_func;
09f49d52
SZ
434#ifdef IFPOLL_ENABLE
435 int offset, offset_def;
436#endif
5330213c 437
9f831fa8
SZ
438 /*
439 * Setup RX rings
440 */
441 for (i = 0; i < EMX_NRX_RING; ++i) {
442 sc->rx_data[i].sc = sc;
443 sc->rx_data[i].idx = i;
444 }
445
ec1c60bb
SZ
446 /*
447 * Setup TX ring
448 */
d84018e9
SZ
449 for (i = 0; i < EMX_NTX_RING; ++i) {
450 sc->tx_data[i].sc = sc;
451 sc->tx_data[i].idx = i;
452 }
ec1c60bb 453
167d2eae
SZ
454 /*
455 * Initialize serializers
456 */
6d435846 457 lwkt_serialize_init(&sc->main_serialize);
d84018e9
SZ
458 for (i = 0; i < EMX_NTX_RING; ++i)
459 lwkt_serialize_init(&sc->tx_data[i].tx_serialize);
6d435846
SZ
460 for (i = 0; i < EMX_NRX_RING; ++i)
461 lwkt_serialize_init(&sc->rx_data[i].rx_serialize);
462
167d2eae
SZ
463 /*
464 * Initialize serializer array
465 */
6d435846 466 i = 0;
06421337
SZ
467
468 KKASSERT(i < EMX_NSERIALIZE);
6d435846 469 sc->serializes[i++] = &sc->main_serialize;
167d2eae 470
06421337 471 KKASSERT(i < EMX_NSERIALIZE);
d84018e9 472 sc->serializes[i++] = &sc->tx_data[0].tx_serialize;
06421337 473 KKASSERT(i < EMX_NSERIALIZE);
d84018e9 474 sc->serializes[i++] = &sc->tx_data[1].tx_serialize;
167d2eae 475
06421337 476 KKASSERT(i < EMX_NSERIALIZE);
6d435846 477 sc->serializes[i++] = &sc->rx_data[0].rx_serialize;
06421337 478 KKASSERT(i < EMX_NSERIALIZE);
6d435846 479 sc->serializes[i++] = &sc->rx_data[1].rx_serialize;
06421337 480
6d435846
SZ
481 KKASSERT(i == EMX_NSERIALIZE);
482
d2811227 483 ifmedia_init(&sc->media, IFM_IMASK, emx_media_change, emx_media_status);
c2022416 484 callout_init_mp(&sc->timer);
5330213c
SZ
485
486 sc->dev = sc->osdep.dev = dev;
487
488 /*
489 * Determine hardware and mac type
490 */
491 sc->hw.vendor_id = pci_get_vendor(dev);
492 sc->hw.device_id = pci_get_device(dev);
493 sc->hw.revision_id = pci_get_revid(dev);
494 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev);
495 sc->hw.subsystem_device_id = pci_get_subdevice(dev);
496
497 if (e1000_set_mac_type(&sc->hw))
498 return ENXIO;
499
500 /* Enable bus mastering */
501 pci_enable_busmaster(dev);
502
503 /*
504 * Allocate IO memory
505 */
506 sc->memory_rid = EMX_BAR_MEM;
507 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
508 &sc->memory_rid, RF_ACTIVE);
509 if (sc->memory == NULL) {
510 device_printf(dev, "Unable to allocate bus resource: memory\n");
511 error = ENXIO;
512 goto fail;
513 }
514 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
515 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
516
517 /* XXX This is quite goofy, it is not actually used */
518 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
519
d01335e8 520 /*
a835687d
SZ
521 * Don't enable MSI-X on 82574, see:
522 * 82574 specification update errata #15
523 *
d01335e8 524 * Don't enable MSI on 82571/82572, see:
a835687d 525 * 82571/82572 specification update errata #63
d01335e8
SZ
526 */
527 msi_enable = emx_msi_enable;
528 if (msi_enable &&
529 (sc->hw.mac.type == e1000_82571 ||
530 sc->hw.mac.type == e1000_82572))
531 msi_enable = 0;
532
5330213c
SZ
533 /*
534 * Allocate interrupt
535 */
d01335e8 536 sc->intr_type = pci_alloc_1intr(dev, msi_enable,
7fb43956 537 &sc->intr_rid, &intr_flags);
704b6287 538
4cb541ae
SZ
539 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
540 int unshared;
541
542 unshared = device_getenv_int(dev, "irq.unshared", 0);
543 if (!unshared) {
544 sc->flags |= EMX_FLAG_SHARED_INTR;
545 if (bootverbose)
546 device_printf(dev, "IRQ shared\n");
547 } else {
548 intr_flags &= ~RF_SHAREABLE;
549 if (bootverbose)
550 device_printf(dev, "IRQ unshared\n");
551 }
552 }
553
5330213c 554 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
704b6287 555 intr_flags);
5330213c
SZ
556 if (sc->intr_res == NULL) {
557 device_printf(dev, "Unable to allocate bus resource: "
558 "interrupt\n");
559 error = ENXIO;
560 goto fail;
561 }
562
563 /* Save PCI command register for Shared Code */
564 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
565 sc->hw.back = &sc->osdep;
566
a5807b81
SZ
567 /*
568 * For I217/I218, we need to map the flash memory and this
569 * must happen after the MAC is identified.
570 */
571 if (sc->hw.mac.type == e1000_pch_lpt) {
572 sc->flash_rid = EMX_BAR_FLASH;
573
574 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
575 &sc->flash_rid, RF_ACTIVE);
576 if (sc->flash == NULL) {
577 device_printf(dev, "Mapping of Flash failed\n");
578 error = ENXIO;
579 goto fail;
580 }
581 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash);
582 sc->osdep.flash_bus_space_handle =
583 rman_get_bushandle(sc->flash);
584
585 /*
586 * This is used in the shared code
587 * XXX this goof is actually not used.
588 */
589 sc->hw.flash_address = (uint8_t *)sc->flash;
590 }
591
5330213c
SZ
592 /* Do Shared Code initialization */
593 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
594 device_printf(dev, "Setup of Shared code failed\n");
595 error = ENXIO;
596 goto fail;
597 }
598 e1000_get_bus_info(&sc->hw);
599
5330213c
SZ
600 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
601 sc->hw.phy.autoneg_wait_to_complete = FALSE;
602 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
5330213c
SZ
603
604 /*
605 * Interrupt throttle rate
606 */
b4d8c36b
SZ
607 throttle = device_getenv_int(dev, "int_throttle_ceil",
608 emx_int_throttle_ceil);
609 if (throttle == 0) {
5330213c
SZ
610 sc->int_throttle_ceil = 0;
611 } else {
5330213c
SZ
612 if (throttle < 0)
613 throttle = EMX_DEFAULT_ITR;
614
615 /* Recalculate the tunable value to get the exact frequency. */
616 throttle = 1000000000 / 256 / throttle;
617
618 /* Upper 16bits of ITR is reserved and should be zero */
619 if (throttle & 0xffff0000)
620 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR;
621
622 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
623 }
624
625 e1000_init_script_state_82541(&sc->hw, TRUE);
626 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE);
627
628 /* Copper options */
629 if (sc->hw.phy.media_type == e1000_media_type_copper) {
630 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES;
631 sc->hw.phy.disable_polarity_correction = FALSE;
632 sc->hw.phy.ms_type = EMX_MASTER_SLAVE;
633 }
634
635 /* Set the frame limits assuming standard ethernet sized frames. */
a5807b81 636 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
5330213c
SZ
637
638 /* This controls when hardware reports transmit completion status. */
639 sc->hw.mac.report_tx_early = 1;
640
65c7a6af 641 /* Calculate # of RX rings */
724cbff8 642 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr);
a317449e 643 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING);
65c7a6af 644
d84018e9
SZ
645 /*
646 * Calculate # of TX rings
647 *
a5807b81
SZ
648 * XXX
649 * I217/I218 claims to have 2 TX queues
650 *
d84018e9
SZ
651 * NOTE:
652 * Don't enable multiple TX queues on 82574; it always gives
aff530c4
SZ
653 * watchdog timeout on TX queue0, when multiple TCP streams are
654 * received. It was originally suspected that the hardware TX
655 * checksum offloading caused this watchdog timeout, since only
656 * TCP ACKs are sent during TCP receiving tests. However, even
657 * if the hardware TX checksum offloading is disable, TX queue0
658 * still will give watchdog.
d84018e9
SZ
659 */
660 tx_ring_max = 1;
661 if (sc->hw.mac.type == e1000_82571 ||
662 sc->hw.mac.type == e1000_82572 ||
da83e939 663 sc->hw.mac.type == e1000_80003es2lan ||
57f26b35
SZ
664 sc->hw.mac.type == e1000_pch_lpt ||
665 sc->hw.mac.type == e1000_82574)
d84018e9
SZ
666 tx_ring_max = EMX_NTX_RING;
667 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr);
668 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max);
669
071699f8
SZ
670 /* Allocate RX/TX rings' busdma(9) stuffs */
671 error = emx_dma_alloc(sc);
672 if (error)
e5b3bcc4 673 goto fail;
e5b3bcc4 674
2d0e5700
SZ
675 /* Allocate multicast array memory. */
676 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX,
677 M_DEVBUF, M_WAITOK);
678
679 /* Indicate SOL/IDER usage */
680 if (e1000_check_reset_block(&sc->hw)) {
681 device_printf(dev,
682 "PHY reset is blocked due to SOL/IDER session.\n");
683 }
684
a5807b81
SZ
685 /* Disable EEE on I217/I218 */
686 sc->hw.dev_spec.ich8lan.eee_disable = 1;
687
2d0e5700
SZ
688 /*
689 * Start from a known state, this is important in reading the
690 * nvm and mac from that.
691 */
692 e1000_reset_hw(&sc->hw);
693
5330213c
SZ
694 /* Make sure we have a good EEPROM before we read from it */
695 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
696 /*
697 * Some PCI-E parts fail the first check due to
698 * the link being in sleep state, call it again,
699 * if it fails a second time its a real issue.
700 */
701 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
702 device_printf(dev,
703 "The EEPROM Checksum Is Not Valid\n");
704 error = EIO;
705 goto fail;
706 }
707 }
708
5330213c
SZ
709 /* Copy the permanent MAC address out of the EEPROM */
710 if (e1000_read_mac_addr(&sc->hw) < 0) {
711 device_printf(dev, "EEPROM read error while reading MAC"
712 " address\n");
713 error = EIO;
714 goto fail;
715 }
716 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) {
717 device_printf(dev, "Invalid MAC address\n");
718 error = EIO;
719 goto fail;
720 }
721
4765c386
MN
722 /* Disable ULP support */
723 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE);
724
5330213c 725 /* Determine if we have to control management hardware */
de0836d4
SZ
726 if (e1000_enable_mng_pass_thru(&sc->hw))
727 sc->flags |= EMX_FLAG_HAS_MGMT;
5330213c
SZ
728
729 /*
730 * Setup Wake-on-Lan
731 */
2d0e5700
SZ
732 apme_mask = EMX_EEPROM_APME;
733 eeprom_data = 0;
5330213c 734 switch (sc->hw.mac.type) {
2d0e5700 735 case e1000_82573:
de0836d4 736 sc->flags |= EMX_FLAG_HAS_AMT;
2d0e5700
SZ
737 /* FALL THROUGH */
738
5330213c 739 case e1000_82571:
2d0e5700 740 case e1000_82572:
5330213c
SZ
741 case e1000_80003es2lan:
742 if (sc->hw.bus.func == 1) {
743 e1000_read_nvm(&sc->hw,
744 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
745 } else {
746 e1000_read_nvm(&sc->hw,
747 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
748 }
5330213c
SZ
749 break;
750
751 default:
2d0e5700
SZ
752 e1000_read_nvm(&sc->hw,
753 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5330213c
SZ
754 break;
755 }
2d0e5700
SZ
756 if (eeprom_data & apme_mask)
757 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
758
5330213c
SZ
759 /*
760 * We have the eeprom settings, now apply the special cases
761 * where the eeprom may be wrong or the board won't support
762 * wake on lan on a particular port
763 */
764 device_id = pci_get_device(dev);
765 switch (device_id) {
766 case E1000_DEV_ID_82571EB_FIBER:
767 /*
768 * Wake events only supported on port A for dual fiber
769 * regardless of eeprom setting
770 */
771 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
772 E1000_STATUS_FUNC_1)
773 sc->wol = 0;
774 break;
775
776 case E1000_DEV_ID_82571EB_QUAD_COPPER:
777 case E1000_DEV_ID_82571EB_QUAD_FIBER:
778 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
779 /* if quad port sc, disable WoL on all but port A */
780 if (emx_global_quad_port_a != 0)
781 sc->wol = 0;
782 /* Reset for multiple quad port adapters */
783 if (++emx_global_quad_port_a == 4)
784 emx_global_quad_port_a = 0;
785 break;
786 }
787
788 /* XXX disable wol */
789 sc->wol = 0;
790
09f49d52
SZ
791#ifdef IFPOLL_ENABLE
792 /*
793 * NPOLLING RX CPU offset
794 */
795 if (sc->rx_ring_cnt == ncpus2) {
796 offset = 0;
797 } else {
798 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
799 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
800 if (offset >= ncpus2 ||
801 offset % sc->rx_ring_cnt != 0) {
802 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
803 offset, offset_def);
804 offset = offset_def;
805 }
806 }
807 sc->rx_npoll_off = offset;
808
809 /*
810 * NPOLLING TX CPU offset
811 */
d84018e9
SZ
812 if (sc->tx_ring_cnt == ncpus2) {
813 offset = 0;
814 } else {
815 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2;
816 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
817 if (offset >= ncpus2 ||
818 offset % sc->tx_ring_cnt != 0) {
819 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
820 offset, offset_def);
821 offset = offset_def;
822 }
09f49d52
SZ
823 }
824 sc->tx_npoll_off = offset;
825#endif
dce0b08a 826 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE);
09f49d52 827
2d0e5700
SZ
828 /* Setup OS specific network interface */
829 emx_setup_ifp(sc);
830
831 /* Add sysctl tree, must after em_setup_ifp() */
832 emx_add_sysctl(sc);
833
834 /* Reset the hardware */
835 error = emx_reset(sc);
836 if (error) {
bacca38f
SZ
837 /*
838 * Some 82573 parts fail the first reset, call it again,
839 * if it fails a second time its a real issue.
840 */
841 error = emx_reset(sc);
842 if (error) {
843 device_printf(dev, "Unable to reset the hardware\n");
844 ether_ifdetach(&sc->arpcom.ac_if);
845 goto fail;
846 }
2d0e5700
SZ
847 }
848
849 /* Initialize statistics */
850 emx_update_stats(sc);
851
852 sc->hw.mac.get_link_status = 1;
853 emx_update_link_status(sc);
854
2d0e5700 855 /* Non-AMT based hardware can now take control from firmware */
de0836d4
SZ
856 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
857 EMX_FLAG_HAS_MGMT)
2d0e5700
SZ
858 emx_get_hw_control(sc);
859
4cb541ae
SZ
860 /*
861 * Missing Interrupt Following ICR read:
862 *
a835687d
SZ
863 * 82571/82572 specification update errata #76
864 * 82573 specification update errata #31
865 * 82574 specification update errata #12
4cb541ae
SZ
866 */
867 intr_func = emx_intr;
868 if ((sc->flags & EMX_FLAG_SHARED_INTR) &&
869 (sc->hw.mac.type == e1000_82571 ||
870 sc->hw.mac.type == e1000_82572 ||
871 sc->hw.mac.type == e1000_82573 ||
872 sc->hw.mac.type == e1000_82574))
873 intr_func = emx_intr_mask;
874
875 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc,
6d435846 876 &sc->intr_tag, &sc->main_serialize);
5330213c
SZ
877 if (error) {
878 device_printf(dev, "Failed to register interrupt handler");
879 ether_ifdetach(&sc->arpcom.ac_if);
880 goto fail;
881 }
5330213c
SZ
882 return (0);
883fail:
884 emx_detach(dev);
885 return (error);
886}
887
888static int
889emx_detach(device_t dev)
890{
891 struct emx_softc *sc = device_get_softc(dev);
892
893 if (device_is_attached(dev)) {
894 struct ifnet *ifp = &sc->arpcom.ac_if;
895
6d435846 896 ifnet_serialize_all(ifp);
5330213c
SZ
897
898 emx_stop(sc);
899
900 e1000_phy_hw_reset(&sc->hw);
901
902 emx_rel_mgmt(sc);
2d0e5700 903 emx_rel_hw_control(sc);
5330213c
SZ
904
905 if (sc->wol) {
906 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
907 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
908 emx_enable_wol(dev);
909 }
910
911 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
912
6d435846 913 ifnet_deserialize_all(ifp);
5330213c
SZ
914
915 ether_ifdetach(ifp);
a19a8754 916 } else if (sc->memory != NULL) {
2d0e5700 917 emx_rel_hw_control(sc);
5330213c 918 }
d2811227
SZ
919
920 ifmedia_removeall(&sc->media);
5330213c
SZ
921 bus_generic_detach(dev);
922
923 if (sc->intr_res != NULL) {
924 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
925 sc->intr_res);
926 }
927
7fb43956 928 if (sc->intr_type == PCI_INTR_TYPE_MSI)
704b6287
SZ
929 pci_release_msi(dev);
930
5330213c
SZ
931 if (sc->memory != NULL) {
932 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid,
933 sc->memory);
934 }
935
a5807b81
SZ
936 if (sc->flash != NULL) {
937 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid,
938 sc->flash);
939 }
940
071699f8 941 emx_dma_free(sc);
5330213c 942
a19a8754
SZ
943 if (sc->mta != NULL)
944 kfree(sc->mta, M_DEVBUF);
945
5330213c
SZ
946 return (0);
947}
948
949static int
950emx_shutdown(device_t dev)
951{
952 return emx_suspend(dev);
953}
954
955static int
956emx_suspend(device_t dev)
957{
958 struct emx_softc *sc = device_get_softc(dev);
959 struct ifnet *ifp = &sc->arpcom.ac_if;
960
6d435846 961 ifnet_serialize_all(ifp);
5330213c
SZ
962
963 emx_stop(sc);
964
965 emx_rel_mgmt(sc);
2d0e5700 966 emx_rel_hw_control(sc);
5330213c 967
2d0e5700 968 if (sc->wol) {
5330213c
SZ
969 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
970 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
971 emx_enable_wol(dev);
2d0e5700 972 }
5330213c 973
6d435846 974 ifnet_deserialize_all(ifp);
5330213c
SZ
975
976 return bus_generic_suspend(dev);
977}
978
979static int
980emx_resume(device_t dev)
981{
982 struct emx_softc *sc = device_get_softc(dev);
983 struct ifnet *ifp = &sc->arpcom.ac_if;
d84018e9 984 int i;
5330213c 985
6d435846 986 ifnet_serialize_all(ifp);
5330213c
SZ
987
988 emx_init(sc);
989 emx_get_mgmt(sc);
d84018e9
SZ
990 for (i = 0; i < sc->tx_ring_inuse; ++i)
991 ifsq_devstart_sched(sc->tx_data[i].ifsq);
5330213c 992
6d435846 993 ifnet_deserialize_all(ifp);
5330213c
SZ
994
995 return bus_generic_resume(dev);
996}
997
998static void
f0a26983 999emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
5330213c
SZ
1000{
1001 struct emx_softc *sc = ifp->if_softc;
d84018e9 1002 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
5330213c 1003 struct mbuf *m_head;
7f32a9b0 1004 int idx = -1, nsegs = 0;
5330213c 1005
d84018e9
SZ
1006 KKASSERT(tdata->ifsq == ifsq);
1007 ASSERT_SERIALIZED(&tdata->tx_serialize);
5330213c 1008
d84018e9 1009 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5330213c
SZ
1010 return;
1011
d84018e9
SZ
1012 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) {
1013 ifsq_purge(ifsq);
5330213c
SZ
1014 return;
1015 }
1016
d84018e9 1017 while (!ifsq_is_empty(ifsq)) {
5330213c 1018 /* Now do we at least have a minimal? */
ec1c60bb
SZ
1019 if (EMX_IS_OACTIVE(tdata)) {
1020 emx_tx_collect(tdata);
1021 if (EMX_IS_OACTIVE(tdata)) {
d84018e9 1022 ifsq_set_oactive(ifsq);
5330213c
SZ
1023 break;
1024 }
1025 }
1026
1027 logif(pkt_txqueue);
ac9843a1 1028 m_head = ifsq_dequeue(ifsq);
5330213c
SZ
1029 if (m_head == NULL)
1030 break;
1031
7f32a9b0 1032 if (emx_encap(tdata, &m_head, &nsegs, &idx)) {
d40991ef 1033 IFNET_STAT_INC(ifp, oerrors, 1);
ec1c60bb 1034 emx_tx_collect(tdata);
5330213c
SZ
1035 continue;
1036 }
1037
7f32a9b0 1038 if (nsegs >= tdata->tx_wreg_nsegs) {
d84018e9 1039 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
7f32a9b0
SZ
1040 nsegs = 0;
1041 idx = -1;
1042 }
1043
5330213c
SZ
1044 /* Send a copy of the frame to the BPF listener */
1045 ETHER_BPF_MTAP(ifp, m_head);
1046
1047 /* Set timeout in case hardware has problems transmitting. */
d84018e9 1048 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
5330213c 1049 }
7f32a9b0 1050 if (idx >= 0)
d84018e9 1051 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
5330213c
SZ
1052}
1053
1054static int
1055emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1056{
1057 struct emx_softc *sc = ifp->if_softc;
1058 struct ifreq *ifr = (struct ifreq *)data;
1059 uint16_t eeprom_data = 0;
1060 int max_frame_size, mask, reinit;
1061 int error = 0;
1062
2c9effcf 1063 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1064
1065 switch (command) {
1066 case SIOCSIFMTU:
1067 switch (sc->hw.mac.type) {
1068 case e1000_82573:
1069 /*
1070 * 82573 only supports jumbo frames
1071 * if ASPM is disabled.
1072 */
1073 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1,
1074 &eeprom_data);
1075 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1076 max_frame_size = ETHER_MAX_LEN;
1077 break;
1078 }
1079 /* FALL THROUGH */
1080
1081 /* Limit Jumbo Frame size */
1082 case e1000_82571:
1083 case e1000_82572:
1084 case e1000_82574:
a5807b81 1085 case e1000_pch_lpt:
5330213c
SZ
1086 case e1000_80003es2lan:
1087 max_frame_size = 9234;
1088 break;
1089
1090 default:
1091 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1092 break;
1093 }
1094 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1095 ETHER_CRC_LEN) {
1096 error = EINVAL;
1097 break;
1098 }
1099
1100 ifp->if_mtu = ifr->ifr_mtu;
a5807b81
SZ
1101 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
1102 ETHER_CRC_LEN;
5330213c
SZ
1103
1104 if (ifp->if_flags & IFF_RUNNING)
1105 emx_init(sc);
1106 break;
1107
1108 case SIOCSIFFLAGS:
1109 if (ifp->if_flags & IFF_UP) {
1110 if ((ifp->if_flags & IFF_RUNNING)) {
1111 if ((ifp->if_flags ^ sc->if_flags) &
1112 (IFF_PROMISC | IFF_ALLMULTI)) {
1113 emx_disable_promisc(sc);
1114 emx_set_promisc(sc);
1115 }
1116 } else {
1117 emx_init(sc);
1118 }
1119 } else if (ifp->if_flags & IFF_RUNNING) {
1120 emx_stop(sc);
1121 }
1122 sc->if_flags = ifp->if_flags;
1123 break;
1124
1125 case SIOCADDMULTI:
1126 case SIOCDELMULTI:
1127 if (ifp->if_flags & IFF_RUNNING) {
1128 emx_disable_intr(sc);
1129 emx_set_multi(sc);
b3a7093f
SZ
1130#ifdef IFPOLL_ENABLE
1131 if (!(ifp->if_flags & IFF_NPOLLING))
5330213c
SZ
1132#endif
1133 emx_enable_intr(sc);
1134 }
1135 break;
1136
1137 case SIOCSIFMEDIA:
1138 /* Check SOL/IDER usage */
1139 if (e1000_check_reset_block(&sc->hw)) {
1140 device_printf(sc->dev, "Media change is"
1141 " blocked due to SOL/IDER session.\n");
1142 break;
1143 }
1144 /* FALL THROUGH */
1145
1146 case SIOCGIFMEDIA:
1147 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1148 break;
1149
1150 case SIOCSIFCAP:
1151 reinit = 0;
1152 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3eb0ea09
SZ
1153 if (mask & IFCAP_RXCSUM) {
1154 ifp->if_capenable ^= IFCAP_RXCSUM;
5330213c
SZ
1155 reinit = 1;
1156 }
1157 if (mask & IFCAP_VLAN_HWTAGGING) {
1158 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1159 reinit = 1;
1160 }
3eb0ea09
SZ
1161 if (mask & IFCAP_TXCSUM) {
1162 ifp->if_capenable ^= IFCAP_TXCSUM;
1163 if (ifp->if_capenable & IFCAP_TXCSUM)
1164 ifp->if_hwassist |= EMX_CSUM_FEATURES;
1165 else
1166 ifp->if_hwassist &= ~EMX_CSUM_FEATURES;
1167 }
1168 if (mask & IFCAP_TSO) {
1169 ifp->if_capenable ^= IFCAP_TSO;
1170 if (ifp->if_capenable & IFCAP_TSO)
1171 ifp->if_hwassist |= CSUM_TSO;
1172 else
1173 ifp->if_hwassist &= ~CSUM_TSO;
1174 }
13890b61 1175 if (mask & IFCAP_RSS)
8434a83b 1176 ifp->if_capenable ^= IFCAP_RSS;
5330213c
SZ
1177 if (reinit && (ifp->if_flags & IFF_RUNNING))
1178 emx_init(sc);
1179 break;
1180
1181 default:
1182 error = ether_ioctl(ifp, command, data);
1183 break;
1184 }
1185 return (error);
1186}
1187
1188static void
d84018e9 1189emx_watchdog(struct ifaltq_subque *ifsq)
5330213c 1190{
d84018e9
SZ
1191 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1192 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5330213c 1193 struct emx_softc *sc = ifp->if_softc;
d84018e9 1194 int i;
5330213c 1195
2c9effcf 1196 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1197
1198 /*
1199 * The timer is set to 5 every time start queues a packet.
1200 * Then txeof keeps resetting it as long as it cleans at
1201 * least one descriptor.
1202 * Finally, anytime all descriptors are clean the timer is
1203 * set to 0.
1204 */
1205
d84018e9
SZ
1206 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) ==
1207 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) {
5330213c
SZ
1208 /*
1209 * If we reach here, all TX jobs are completed and
1210 * the TX engine should have been idled for some time.
d84018e9 1211 * We don't need to call ifsq_devstart_sched() here.
5330213c 1212 */
d84018e9
SZ
1213 ifsq_clr_oactive(ifsq);
1214 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
1215 return;
1216 }
1217
1218 /*
1219 * If we are in this routine because of pause frames, then
1220 * don't reset the hardware.
1221 */
1222 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) {
d84018e9 1223 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
5330213c
SZ
1224 return;
1225 }
1226
d84018e9 1227 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx);
5330213c 1228
d40991ef 1229 IFNET_STAT_INC(ifp, oerrors, 1);
5330213c
SZ
1230
1231 emx_init(sc);
d84018e9
SZ
1232 for (i = 0; i < sc->tx_ring_inuse; ++i)
1233 ifsq_devstart_sched(sc->tx_data[i].ifsq);
5330213c
SZ
1234}
1235
1236static void
1237emx_init(void *xsc)
1238{
1239 struct emx_softc *sc = xsc;
1240 struct ifnet *ifp = &sc->arpcom.ac_if;
1241 device_t dev = sc->dev;
d84018e9 1242 boolean_t polling;
3f939c23 1243 int i;
5330213c 1244
2c9effcf 1245 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1246
1247 emx_stop(sc);
1248
5330213c
SZ
1249 /* Get the latest mac address, User can use a LAA */
1250 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
1251
1252 /* Put the address into the Receive Address Array */
1253 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1254
1255 /*
1256 * With the 82571 sc, RAR[0] may be overwritten
1257 * when the other port is reset, we make a duplicate
1258 * in RAR[14] for that eventuality, this assures
1259 * the interface continues to function.
1260 */
1261 if (sc->hw.mac.type == e1000_82571) {
1262 e1000_set_laa_state_82571(&sc->hw, TRUE);
1263 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1264 E1000_RAR_ENTRIES - 1);
1265 }
1266
1267 /* Initialize the hardware */
2d0e5700
SZ
1268 if (emx_reset(sc)) {
1269 device_printf(dev, "Unable to reset the hardware\n");
5330213c
SZ
1270 /* XXX emx_stop()? */
1271 return;
1272 }
1273 emx_update_link_status(sc);
1274
1275 /* Setup VLAN support, basic and offload if available */
1276 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1277
1278 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1279 uint32_t ctrl;
1280
1281 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
1282 ctrl |= E1000_CTRL_VME;
1283 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
1284 }
1285
5330213c
SZ
1286 /* Configure for OS presence */
1287 emx_get_mgmt(sc);
1288
d84018e9
SZ
1289 polling = FALSE;
1290#ifdef IFPOLL_ENABLE
1291 if (ifp->if_flags & IFF_NPOLLING)
1292 polling = TRUE;
1293#endif
1294 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling);
1295 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1);
1296
5330213c 1297 /* Prepare transmit descriptors and buffers */
d84018e9
SZ
1298 for (i = 0; i < sc->tx_ring_inuse; ++i)
1299 emx_init_tx_ring(&sc->tx_data[i]);
5330213c
SZ
1300 emx_init_tx_unit(sc);
1301
1302 /* Setup Multicast table */
1303 emx_set_multi(sc);
1304
1305 /* Prepare receive descriptors and buffers */
13890b61 1306 for (i = 0; i < sc->rx_ring_cnt; ++i) {
9f831fa8 1307 if (emx_init_rx_ring(&sc->rx_data[i])) {
3f939c23
SZ
1308 device_printf(dev,
1309 "Could not setup receive structures\n");
1310 emx_stop(sc);
1311 return;
1312 }
5330213c
SZ
1313 }
1314 emx_init_rx_unit(sc);
1315
1316 /* Don't lose promiscuous settings */
1317 emx_set_promisc(sc);
1318
1319 ifp->if_flags |= IFF_RUNNING;
d84018e9
SZ
1320 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1321 ifsq_clr_oactive(sc->tx_data[i].ifsq);
1322 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog);
1323 }
5330213c
SZ
1324
1325 callout_reset(&sc->timer, hz, emx_timer, sc);
1326 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1327
1328 /* MSI/X configuration for 82574 */
1329 if (sc->hw.mac.type == e1000_82574) {
1330 int tmp;
1331
1332 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1333 tmp |= E1000_CTRL_EXT_PBA_CLR;
1334 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1335 /*
2d0e5700 1336 * XXX MSIX
5330213c
SZ
1337 * Set the IVAR - interrupt vector routing.
1338 * Each nibble represents a vector, high bit
1339 * is enable, other 3 bits are the MSIX table
1340 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1341 * Link (other) to 2, hence the magic number.
1342 */
1343 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908);
1344 }
1345
5330213c
SZ
1346 /*
1347 * Only enable interrupts if we are not polling, make sure
1348 * they are off otherwise.
1349 */
d84018e9 1350 if (polling)
5330213c
SZ
1351 emx_disable_intr(sc);
1352 else
5330213c
SZ
1353 emx_enable_intr(sc);
1354
2d0e5700 1355 /* AMT based hardware can now take control from firmware */
de0836d4
SZ
1356 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
1357 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT))
2d0e5700 1358 emx_get_hw_control(sc);
5330213c
SZ
1359}
1360
5330213c
SZ
1361static void
1362emx_intr(void *xsc)
1363{
4cb541ae
SZ
1364 emx_intr_body(xsc, TRUE);
1365}
1366
1367static void
1368emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted)
1369{
5330213c
SZ
1370 struct ifnet *ifp = &sc->arpcom.ac_if;
1371 uint32_t reg_icr;
1372
1373 logif(intr_beg);
6d435846 1374 ASSERT_SERIALIZED(&sc->main_serialize);
5330213c
SZ
1375
1376 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1377
4cb541ae 1378 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) {
5330213c
SZ
1379 logif(intr_end);
1380 return;
1381 }
1382
1383 /*
1384 * XXX: some laptops trigger several spurious interrupts
df50f778 1385 * on emx(4) when in the resume cycle. The ICR register
5330213c
SZ
1386 * reports all-ones value in this case. Processing such
1387 * interrupts would lead to a freeze. I don't know why.
1388 */
1389 if (reg_icr == 0xffffffff) {
1390 logif(intr_end);
1391 return;
1392 }
1393
1394 if (ifp->if_flags & IFF_RUNNING) {
1395 if (reg_icr &
3f939c23
SZ
1396 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
1397 int i;
1398
13890b61 1399 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6d435846
SZ
1400 lwkt_serialize_enter(
1401 &sc->rx_data[i].rx_serialize);
9f831fa8 1402 emx_rxeof(&sc->rx_data[i], -1);
6d435846
SZ
1403 lwkt_serialize_exit(
1404 &sc->rx_data[i].rx_serialize);
1405 }
3f939c23 1406 }
6446af7b 1407 if (reg_icr & E1000_ICR_TXDW) {
d84018e9
SZ
1408 struct emx_txdata *tdata = &sc->tx_data[0];
1409
1410 lwkt_serialize_enter(&tdata->tx_serialize);
1411 emx_txeof(tdata);
1412 if (!ifsq_is_empty(tdata->ifsq))
1413 ifsq_devstart(tdata->ifsq);
1414 lwkt_serialize_exit(&tdata->tx_serialize);
5330213c
SZ
1415 }
1416 }
1417
1418 /* Link status change */
1419 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
bca7c435 1420 emx_serialize_skipmain(sc);
6d435846 1421
5330213c
SZ
1422 callout_stop(&sc->timer);
1423 sc->hw.mac.get_link_status = 1;
1424 emx_update_link_status(sc);
1425
1426 /* Deal with TX cruft when link lost */
1427 emx_tx_purge(sc);
1428
1429 callout_reset(&sc->timer, hz, emx_timer, sc);
6d435846 1430
bca7c435 1431 emx_deserialize_skipmain(sc);
5330213c
SZ
1432 }
1433
1434 if (reg_icr & E1000_ICR_RXO)
1435 sc->rx_overruns++;
1436
1437 logif(intr_end);
1438}
1439
4cb541ae
SZ
1440static void
1441emx_intr_mask(void *xsc)
1442{
1443 struct emx_softc *sc = xsc;
1444
1445 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
1446 /*
1447 * NOTE:
1448 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1449 * so don't check it.
1450 */
1451 emx_intr_body(sc, FALSE);
1452 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
1453}
1454
5330213c
SZ
1455static void
1456emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1457{
1458 struct emx_softc *sc = ifp->if_softc;
1459
2c9effcf 1460 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1461
1462 emx_update_link_status(sc);
1463
1464 ifmr->ifm_status = IFM_AVALID;
1465 ifmr->ifm_active = IFM_ETHER;
1466
1467 if (!sc->link_active)
1468 return;
1469
1470 ifmr->ifm_status |= IFM_ACTIVE;
1471
1472 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1473 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1474 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1475 } else {
1476 switch (sc->link_speed) {
1477 case 10:
1478 ifmr->ifm_active |= IFM_10_T;
1479 break;
1480 case 100:
1481 ifmr->ifm_active |= IFM_100_TX;
1482 break;
1483
1484 case 1000:
1485 ifmr->ifm_active |= IFM_1000_T;
1486 break;
1487 }
1488 if (sc->link_duplex == FULL_DUPLEX)
1489 ifmr->ifm_active |= IFM_FDX;
1490 else
1491 ifmr->ifm_active |= IFM_HDX;
1492 }
1493}
1494
1495static int
1496emx_media_change(struct ifnet *ifp)
1497{
1498 struct emx_softc *sc = ifp->if_softc;
1499 struct ifmedia *ifm = &sc->media;
1500
2c9effcf 1501 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1502
1503 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1504 return (EINVAL);
1505
1506 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1507 case IFM_AUTO:
1508 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1509 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
1510 break;
1511
1512 case IFM_1000_LX:
1513 case IFM_1000_SX:
1514 case IFM_1000_T:
1515 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1516 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1517 break;
1518
1519 case IFM_100_TX:
1520 sc->hw.mac.autoneg = FALSE;
1521 sc->hw.phy.autoneg_advertised = 0;
1522 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1523 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1524 else
1525 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1526 break;
1527
1528 case IFM_10_T:
1529 sc->hw.mac.autoneg = FALSE;
1530 sc->hw.phy.autoneg_advertised = 0;
1531 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1532 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1533 else
1534 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1535 break;
1536
1537 default:
1538 if_printf(ifp, "Unsupported media type\n");
1539 break;
1540 }
1541
5330213c
SZ
1542 emx_init(sc);
1543
1544 return (0);
1545}
1546
1547static int
7f32a9b0
SZ
1548emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp,
1549 int *segs_used, int *idx)
5330213c
SZ
1550{
1551 bus_dma_segment_t segs[EMX_MAX_SCATTER];
1552 bus_dmamap_t map;
323e5ecd 1553 struct emx_txbuf *tx_buffer, *tx_buffer_mapped;
5330213c
SZ
1554 struct e1000_tx_desc *ctxd = NULL;
1555 struct mbuf *m_head = *m_headp;
1556 uint32_t txd_upper, txd_lower, cmd = 0;
1557 int maxsegs, nsegs, i, j, first, last = 0, error;
1558
3eb0ea09 1559 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
ec1c60bb 1560 error = emx_tso_pullup(tdata, m_headp);
3eb0ea09
SZ
1561 if (error)
1562 return error;
1563 m_head = *m_headp;
1564 }
1565
5330213c
SZ
1566 txd_upper = txd_lower = 0;
1567
1568 /*
1569 * Capture the first descriptor index, this descriptor
1570 * will have the index of the EOP which is the only one
1571 * that now gets a DONE bit writeback.
1572 */
ec1c60bb
SZ
1573 first = tdata->next_avail_tx_desc;
1574 tx_buffer = &tdata->tx_buf[first];
5330213c
SZ
1575 tx_buffer_mapped = tx_buffer;
1576 map = tx_buffer->map;
1577
ec1c60bb
SZ
1578 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED;
1579 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc"));
5330213c
SZ
1580 if (maxsegs > EMX_MAX_SCATTER)
1581 maxsegs = EMX_MAX_SCATTER;
1582
ec1c60bb 1583 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp,
5330213c
SZ
1584 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1585 if (error) {
5330213c
SZ
1586 m_freem(*m_headp);
1587 *m_headp = NULL;
1588 return error;
1589 }
ec1c60bb 1590 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE);
5330213c
SZ
1591
1592 m_head = *m_headp;
ec1c60bb 1593 tdata->tx_nsegs += nsegs;
7f32a9b0 1594 *segs_used += nsegs;
5330213c 1595
3eb0ea09
SZ
1596 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1597 /* TSO will consume one TX desc */
7f32a9b0
SZ
1598 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower);
1599 tdata->tx_nsegs += i;
1600 *segs_used += i;
3eb0ea09 1601 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) {
5330213c 1602 /* TX csum offloading will consume one TX desc */
7f32a9b0
SZ
1603 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower);
1604 tdata->tx_nsegs += i;
1605 *segs_used += i;
5330213c 1606 }
d37cc902
SZ
1607
1608 /* Handle VLAN tag */
1609 if (m_head->m_flags & M_VLANTAG) {
1610 /* Set the vlan id. */
1611 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16);
1612 /* Tell hardware to add tag */
1613 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1614 }
1615
ec1c60bb 1616 i = tdata->next_avail_tx_desc;
5330213c
SZ
1617
1618 /* Set up our transmit descriptors */
1619 for (j = 0; j < nsegs; j++) {
ec1c60bb
SZ
1620 tx_buffer = &tdata->tx_buf[i];
1621 ctxd = &tdata->tx_desc_base[i];
5330213c
SZ
1622
1623 ctxd->buffer_addr = htole64(segs[j].ds_addr);
1624 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
1625 txd_lower | segs[j].ds_len);
1626 ctxd->upper.data = htole32(txd_upper);
1627
1628 last = i;
ec1c60bb 1629 if (++i == tdata->num_tx_desc)
5330213c 1630 i = 0;
5330213c
SZ
1631 }
1632
ec1c60bb 1633 tdata->next_avail_tx_desc = i;
5330213c 1634
ec1c60bb
SZ
1635 KKASSERT(tdata->num_tx_desc_avail > nsegs);
1636 tdata->num_tx_desc_avail -= nsegs;
5330213c 1637
5330213c
SZ
1638 tx_buffer->m_head = m_head;
1639 tx_buffer_mapped->map = tx_buffer->map;
1640 tx_buffer->map = map;
1641
d84018e9 1642 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) {
ec1c60bb 1643 tdata->tx_nsegs = 0;
4e4e8481
SZ
1644
1645 /*
1646 * Report Status (RS) is turned on
d84018e9 1647 * every tx_intr_nsegs descriptors.
4e4e8481 1648 */
5330213c
SZ
1649 cmd = E1000_TXD_CMD_RS;
1650
b4b0a2b4
SZ
1651 /*
1652 * Keep track of the descriptor, which will
1653 * be written back by hardware.
1654 */
ec1c60bb
SZ
1655 tdata->tx_dd[tdata->tx_dd_tail] = last;
1656 EMX_INC_TXDD_IDX(tdata->tx_dd_tail);
1657 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head);
5330213c
SZ
1658 }
1659
1660 /*
1661 * Last Descriptor of Packet needs End Of Packet (EOP)
5330213c
SZ
1662 */
1663 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
1664
5330213c 1665 /*
b691889c 1666 * Defer TDT updating, until enough descriptors are setup
5330213c 1667 */
7f32a9b0 1668 *idx = i;
5330213c 1669
d84018e9
SZ
1670#ifdef EMX_TSS_DEBUG
1671 tdata->tx_pkts++;
1672#endif
1673
5330213c
SZ
1674 return (0);
1675}
1676
1677static void
1678emx_set_promisc(struct emx_softc *sc)
1679{
1680 struct ifnet *ifp = &sc->arpcom.ac_if;
1681 uint32_t reg_rctl;
1682
1683 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1684
1685 if (ifp->if_flags & IFF_PROMISC) {
1686 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1687 /* Turn this on if you want to see bad packets */
1688 if (emx_debug_sbp)
1689 reg_rctl |= E1000_RCTL_SBP;
1690 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1691 } else if (ifp->if_flags & IFF_ALLMULTI) {
1692 reg_rctl |= E1000_RCTL_MPE;
1693 reg_rctl &= ~E1000_RCTL_UPE;
1694 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1695 }
1696}
1697
1698static void
1699emx_disable_promisc(struct emx_softc *sc)
1700{
1701 uint32_t reg_rctl;
1702
1703 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1704
1705 reg_rctl &= ~E1000_RCTL_UPE;
1706 reg_rctl &= ~E1000_RCTL_MPE;
1707 reg_rctl &= ~E1000_RCTL_SBP;
1708 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1709}
1710
1711static void
1712emx_set_multi(struct emx_softc *sc)
1713{
1714 struct ifnet *ifp = &sc->arpcom.ac_if;
1715 struct ifmultiaddr *ifma;
1716 uint32_t reg_rctl = 0;
2d0e5700 1717 uint8_t *mta;
5330213c
SZ
1718 int mcnt = 0;
1719
2d0e5700
SZ
1720 mta = sc->mta;
1721 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX);
1722
441d34b2 1723 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5330213c
SZ
1724 if (ifma->ifma_addr->sa_family != AF_LINK)
1725 continue;
1726
1727 if (mcnt == EMX_MCAST_ADDR_MAX)
1728 break;
1729
1730 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1731 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1732 mcnt++;
1733 }
1734
1735 if (mcnt >= EMX_MCAST_ADDR_MAX) {
1736 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1737 reg_rctl |= E1000_RCTL_MPE;
1738 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1739 } else {
6a5a645e 1740 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
5330213c
SZ
1741 }
1742}
1743
1744/*
1745 * This routine checks for link status and updates statistics.
1746 */
1747static void
1748emx_timer(void *xsc)
1749{
1750 struct emx_softc *sc = xsc;
1751 struct ifnet *ifp = &sc->arpcom.ac_if;
1752
37e854ff 1753 lwkt_serialize_enter(&sc->main_serialize);
5330213c
SZ
1754
1755 emx_update_link_status(sc);
1756 emx_update_stats(sc);
1757
1758 /* Reset LAA into RAR[0] on 82571 */
1759 if (e1000_get_laa_state_82571(&sc->hw) == TRUE)
1760 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1761
1762 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
1763 emx_print_hw_stats(sc);
1764
1765 emx_smartspeed(sc);
1766
1767 callout_reset(&sc->timer, hz, emx_timer, sc);
1768
37e854ff 1769 lwkt_serialize_exit(&sc->main_serialize);
5330213c
SZ
1770}
1771
1772static void
1773emx_update_link_status(struct emx_softc *sc)
1774{
1775 struct e1000_hw *hw = &sc->hw;
1776 struct ifnet *ifp = &sc->arpcom.ac_if;
1777 device_t dev = sc->dev;
1778 uint32_t link_check = 0;
1779
1780 /* Get the cached link value or read phy for real */
1781 switch (hw->phy.media_type) {
1782 case e1000_media_type_copper:
1783 if (hw->mac.get_link_status) {
1784 /* Do the work to read phy */
1785 e1000_check_for_link(hw);
1786 link_check = !hw->mac.get_link_status;
1787 if (link_check) /* ESB2 fix */
1788 e1000_cfg_on_link_up(hw);
1789 } else {
1790 link_check = TRUE;
1791 }
1792 break;
1793
1794 case e1000_media_type_fiber:
1795 e1000_check_for_link(hw);
1796 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1797 break;
1798
1799 case e1000_media_type_internal_serdes:
1800 e1000_check_for_link(hw);
1801 link_check = sc->hw.mac.serdes_has_link;
1802 break;
1803
1804 case e1000_media_type_unknown:
1805 default:
1806 break;
1807 }
1808
1809 /* Now check for a transition */
1810 if (link_check && sc->link_active == 0) {
1811 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1812 &sc->link_duplex);
1813
1814 /*
1815 * Check if we should enable/disable SPEED_MODE bit on
1816 * 82571EB/82572EI
1817 */
2d0e5700
SZ
1818 if (sc->link_speed != SPEED_1000 &&
1819 (hw->mac.type == e1000_82571 ||
1820 hw->mac.type == e1000_82572)) {
5330213c
SZ
1821 int tarc0;
1822
1823 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2d0e5700 1824 tarc0 &= ~EMX_TARC_SPEED_MODE;
5330213c
SZ
1825 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1826 }
1827 if (bootverbose) {
1828 device_printf(dev, "Link is up %d Mbps %s\n",
1829 sc->link_speed,
1830 ((sc->link_duplex == FULL_DUPLEX) ?
1831 "Full Duplex" : "Half Duplex"));
1832 }
1833 sc->link_active = 1;
1834 sc->smartspeed = 0;
1835 ifp->if_baudrate = sc->link_speed * 1000000;
1836 ifp->if_link_state = LINK_STATE_UP;
1837 if_link_state_change(ifp);
1838 } else if (!link_check && sc->link_active == 1) {
1839 ifp->if_baudrate = sc->link_speed = 0;
1840 sc->link_duplex = 0;
1841 if (bootverbose)
1842 device_printf(dev, "Link is Down\n");
1843 sc->link_active = 0;
5330213c
SZ
1844 ifp->if_link_state = LINK_STATE_DOWN;
1845 if_link_state_change(ifp);
1846 }
1847}
1848
1849static void
1850emx_stop(struct emx_softc *sc)
1851{
1852 struct ifnet *ifp = &sc->arpcom.ac_if;
1853 int i;
1854
2c9effcf 1855 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1856
1857 emx_disable_intr(sc);
1858
1859 callout_stop(&sc->timer);
1860
9ed293e0 1861 ifp->if_flags &= ~IFF_RUNNING;
d84018e9
SZ
1862 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1863 struct emx_txdata *tdata = &sc->tx_data[i];
1864
1865 ifsq_clr_oactive(tdata->ifsq);
1866 ifsq_watchdog_stop(&tdata->tx_watchdog);
1867 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED;
1868 }
5330213c 1869
3f939c23
SZ
1870 /*
1871 * Disable multiple receive queues.
1872 *
1873 * NOTE:
1874 * We should disable multiple receive queues before
1875 * resetting the hardware.
1876 */
1877 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0);
1878
5330213c
SZ
1879 e1000_reset_hw(&sc->hw);
1880 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1881
d84018e9
SZ
1882 for (i = 0; i < sc->tx_ring_cnt; ++i)
1883 emx_free_tx_ring(&sc->tx_data[i]);
13890b61 1884 for (i = 0; i < sc->rx_ring_cnt; ++i)
9f831fa8 1885 emx_free_rx_ring(&sc->rx_data[i]);
5330213c
SZ
1886}
1887
1888static int
2d0e5700 1889emx_reset(struct emx_softc *sc)
5330213c
SZ
1890{
1891 device_t dev = sc->dev;
1892 uint16_t rx_buffer_size;
be5807d4 1893 uint32_t pba;
5330213c 1894
5330213c
SZ
1895 /* Set up smart power down as default off on newer adapters. */
1896 if (!emx_smart_pwr_down &&
1897 (sc->hw.mac.type == e1000_82571 ||
1898 sc->hw.mac.type == e1000_82572)) {
1899 uint16_t phy_tmp = 0;
1900
1901 /* Speed up time to link by disabling smart power down. */
1902 e1000_read_phy_reg(&sc->hw,
1903 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1904 phy_tmp &= ~IGP02E1000_PM_SPD;
1905 e1000_write_phy_reg(&sc->hw,
1906 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1907 }
1908
be5807d4
SZ
1909 /*
1910 * Packet Buffer Allocation (PBA)
1911 * Writing PBA sets the receive portion of the buffer
1912 * the remainder is used for the transmit buffer.
1913 */
1914 switch (sc->hw.mac.type) {
1915 /* Total Packet Buffer on these is 48K */
1916 case e1000_82571:
1917 case e1000_82572:
1918 case e1000_80003es2lan:
1919 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1920 break;
1921
1922 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1923 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1924 break;
1925
1926 case e1000_82574:
1927 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1928 break;
1929
a5807b81
SZ
1930 case e1000_pch_lpt:
1931 pba = E1000_PBA_26K;
1932 break;
1933
be5807d4
SZ
1934 default:
1935 /* Devices before 82547 had a Packet Buffer of 64K. */
a5807b81 1936 if (sc->hw.mac.max_frame_size > 8192)
be5807d4
SZ
1937 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1938 else
1939 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1940 }
1941 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba);
1942
5330213c
SZ
1943 /*
1944 * These parameters control the automatic generation (Tx) and
1945 * response (Rx) to Ethernet PAUSE frames.
1946 * - High water mark should allow for at least two frames to be
1947 * received after sending an XOFF.
1948 * - Low water mark works best when it is very near the high water mark.
1949 * This allows the receiver to restart by sending XON when it has
1950 * drained a bit. Here we use an arbitary value of 1500 which will
1951 * restart after one full frame is pulled from the buffer. There
1952 * could be several smaller frames in the buffer and if so they will
1953 * not trigger the XON until their total number reduces the buffer
1954 * by 1500.
1955 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
1956 */
1957 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10;
1958
1959 sc->hw.fc.high_water = rx_buffer_size -
a5807b81 1960 roundup2(sc->hw.mac.max_frame_size, 1024);
5330213c
SZ
1961 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500;
1962
a5807b81 1963 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME;
5330213c
SZ
1964 sc->hw.fc.send_xon = TRUE;
1965 sc->hw.fc.requested_mode = e1000_fc_full;
1966
a5807b81
SZ
1967 /*
1968 * Device specific overrides/settings
1969 */
1970 if (sc->hw.mac.type == e1000_pch_lpt) {
1971 sc->hw.fc.high_water = 0x5C20;
1972 sc->hw.fc.low_water = 0x5048;
1973 sc->hw.fc.pause_time = 0x0650;
1974 sc->hw.fc.refresh_time = 0x0400;
1975 /* Jumbos need adjusted PBA */
1976 if (sc->arpcom.ac_if.if_mtu > ETHERMTU)
1977 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12);
1978 else
1979 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26);
1980 } else if (sc->hw.mac.type == e1000_80003es2lan) {
1981 sc->hw.fc.pause_time = 0xFFFF;
1982 }
1983
2d0e5700
SZ
1984 /* Issue a global reset */
1985 e1000_reset_hw(&sc->hw);
1986 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
6d5e2922 1987 emx_disable_aspm(sc);
2d0e5700 1988
5330213c
SZ
1989 if (e1000_init_hw(&sc->hw) < 0) {
1990 device_printf(dev, "Hardware Initialization Failed\n");
1991 return (EIO);
1992 }
1993
2d0e5700
SZ
1994 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1995 e1000_get_phy_info(&sc->hw);
5330213c
SZ
1996 e1000_check_for_link(&sc->hw);
1997
1998 return (0);
1999}
2000
2001static void
2002emx_setup_ifp(struct emx_softc *sc)
2003{
2004 struct ifnet *ifp = &sc->arpcom.ac_if;
dce0b08a 2005 int i;
5330213c
SZ
2006
2007 if_initname(ifp, device_get_name(sc->dev),
2008 device_get_unit(sc->dev));
2009 ifp->if_softc = sc;
2010 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2011 ifp->if_init = emx_init;
2012 ifp->if_ioctl = emx_ioctl;
2013 ifp->if_start = emx_start;
b3a7093f 2014#ifdef IFPOLL_ENABLE
f994de37 2015 ifp->if_npoll = emx_npoll;
5330213c 2016#endif
6d435846
SZ
2017 ifp->if_serialize = emx_serialize;
2018 ifp->if_deserialize = emx_deserialize;
2019 ifp->if_tryserialize = emx_tryserialize;
2c9effcf
SZ
2020#ifdef INVARIANTS
2021 ifp->if_serialize_assert = emx_serialize_assert;
2022#endif
d84018e9
SZ
2023
2024 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1);
5330213c 2025 ifq_set_ready(&ifp->if_snd);
d84018e9
SZ
2026 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
2027
2028 ifp->if_mapsubq = ifq_mapsubq_mask;
2029 ifq_set_subq_mask(&ifp->if_snd, 0);
5330213c 2030
ae474cfa 2031 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
5330213c
SZ
2032
2033 ifp->if_capabilities = IFCAP_HWCSUM |
2034 IFCAP_VLAN_HWTAGGING |
3eb0ea09
SZ
2035 IFCAP_VLAN_MTU |
2036 IFCAP_TSO;
8434a83b
SZ
2037 if (sc->rx_ring_cnt > 1)
2038 ifp->if_capabilities |= IFCAP_RSS;
5330213c 2039 ifp->if_capenable = ifp->if_capabilities;
3eb0ea09 2040 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO;
5330213c
SZ
2041
2042 /*
2043 * Tell the upper layer(s) we support long frames.
2044 */
2045 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2046
dce0b08a
SZ
2047 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2048 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2049 struct emx_txdata *tdata = &sc->tx_data[i];
2050
2051 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res));
2052 ifsq_set_priv(ifsq, tdata);
bfefe4a6 2053 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize);
dce0b08a
SZ
2054 tdata->ifsq = ifsq;
2055
2056 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog);
2057 }
2058
5330213c
SZ
2059 /*
2060 * Specify the media types supported by this sc and register
2061 * callbacks to update media and link information
2062 */
5330213c
SZ
2063 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2064 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
2065 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2066 0, NULL);
2067 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
2068 } else {
2069 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
2070 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2071 0, NULL);
2072 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2073 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2074 0, NULL);
2075 if (sc->hw.phy.type != e1000_phy_ife) {
2076 ifmedia_add(&sc->media,
2077 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2078 ifmedia_add(&sc->media,
2079 IFM_ETHER | IFM_1000_T, 0, NULL);
2080 }
2081 }
2082 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2083 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
2084}
2085
2086/*
2087 * Workaround for SmartSpeed on 82541 and 82547 controllers
2088 */
2089static void
2090emx_smartspeed(struct emx_softc *sc)
2091{
2092 uint16_t phy_tmp;
2093
2094 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp ||
2095 sc->hw.mac.autoneg == 0 ||
2096 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2097 return;
2098
2099 if (sc->smartspeed == 0) {
2100 /*
2101 * If Master/Slave config fault is asserted twice,
2102 * we assume back-to-back
2103 */
2104 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2105 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2106 return;
2107 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2108 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2109 e1000_read_phy_reg(&sc->hw,
2110 PHY_1000T_CTRL, &phy_tmp);
2111 if (phy_tmp & CR_1000T_MS_ENABLE) {
2112 phy_tmp &= ~CR_1000T_MS_ENABLE;
2113 e1000_write_phy_reg(&sc->hw,
2114 PHY_1000T_CTRL, phy_tmp);
2115 sc->smartspeed++;
2116 if (sc->hw.mac.autoneg &&
2117 !e1000_phy_setup_autoneg(&sc->hw) &&
2118 !e1000_read_phy_reg(&sc->hw,
2119 PHY_CONTROL, &phy_tmp)) {
2120 phy_tmp |= MII_CR_AUTO_NEG_EN |
2121 MII_CR_RESTART_AUTO_NEG;
2122 e1000_write_phy_reg(&sc->hw,
2123 PHY_CONTROL, phy_tmp);
2124 }
2125 }
2126 }
2127 return;
2128 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) {
2129 /* If still no link, perhaps using 2/3 pair cable */
2130 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2131 phy_tmp |= CR_1000T_MS_ENABLE;
2132 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2133 if (sc->hw.mac.autoneg &&
2134 !e1000_phy_setup_autoneg(&sc->hw) &&
2135 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2136 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2137 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2138 }
2139 }
2140
2141 /* Restart process after EMX_SMARTSPEED_MAX iterations */
2142 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX)
2143 sc->smartspeed = 0;
2144}
2145
5330213c 2146static int
ec1c60bb 2147emx_create_tx_ring(struct emx_txdata *tdata)
5330213c 2148{
ec1c60bb 2149 device_t dev = tdata->sc->dev;
323e5ecd 2150 struct emx_txbuf *tx_buffer;
b4d8c36b 2151 int error, i, tsize, ntxd;
bdca134f
SZ
2152
2153 /*
2154 * Validate number of transmit descriptors. It must not exceed
2155 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2156 */
b4d8c36b
SZ
2157 ntxd = device_getenv_int(dev, "txd", emx_txd);
2158 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 ||
2159 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) {
bdca134f 2160 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
b4d8c36b 2161 EMX_DEFAULT_TXD, ntxd);
ec1c60bb 2162 tdata->num_tx_desc = EMX_DEFAULT_TXD;
bdca134f 2163 } else {
ec1c60bb 2164 tdata->num_tx_desc = ntxd;
bdca134f
SZ
2165 }
2166
2167 /*
2168 * Allocate Transmit Descriptor ring
2169 */
ec1c60bb 2170 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc),
bdca134f 2171 EMX_DBA_ALIGN);
ec1c60bb 2172 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag,
a596084c 2173 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
ec1c60bb
SZ
2174 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap,
2175 &tdata->tx_desc_paddr);
2176 if (tdata->tx_desc_base == NULL) {
bdca134f 2177 device_printf(dev, "Unable to allocate tx_desc memory\n");
a596084c 2178 return ENOMEM;
bdca134f 2179 }
5330213c 2180
5a7acd69
SZ
2181 tsize = __VM_CACHELINE_ALIGN(
2182 sizeof(struct emx_txbuf) * tdata->num_tx_desc);
2183 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
5330213c
SZ
2184
2185 /*
2186 * Create DMA tags for tx buffers
2187 */
ec1c60bb 2188 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */
5330213c
SZ
2189 1, 0, /* alignment, bounds */
2190 BUS_SPACE_MAXADDR, /* lowaddr */
2191 BUS_SPACE_MAXADDR, /* highaddr */
2192 NULL, NULL, /* filter, filterarg */
2193 EMX_TSO_SIZE, /* maxsize */
2194 EMX_MAX_SCATTER, /* nsegments */
2195 EMX_MAX_SEGSIZE, /* maxsegsize */
2196 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2197 BUS_DMA_ONEBPAGE, /* flags */
ec1c60bb 2198 &tdata->txtag);
5330213c
SZ
2199 if (error) {
2200 device_printf(dev, "Unable to allocate TX DMA tag\n");
ec1c60bb
SZ
2201 kfree(tdata->tx_buf, M_DEVBUF);
2202 tdata->tx_buf = NULL;
5330213c
SZ
2203 return error;
2204 }
2205
2206 /*
2207 * Create DMA maps for tx buffers
2208 */
ec1c60bb
SZ
2209 for (i = 0; i < tdata->num_tx_desc; i++) {
2210 tx_buffer = &tdata->tx_buf[i];
5330213c 2211
ec1c60bb 2212 error = bus_dmamap_create(tdata->txtag,
5330213c
SZ
2213 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2214 &tx_buffer->map);
2215 if (error) {
2216 device_printf(dev, "Unable to create TX DMA map\n");
ec1c60bb 2217 emx_destroy_tx_ring(tdata, i);
5330213c
SZ
2218 return error;
2219 }
2220 }
d84018e9
SZ
2221
2222 /*
2223 * Setup TX parameters
2224 */
2225 tdata->spare_tx_desc = EMX_TX_SPARE;
55471c55 2226 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG;
d84018e9
SZ
2227
2228 /*
2229 * Keep following relationship between spare_tx_desc, oact_tx_desc
2230 * and tx_intr_nsegs:
2231 * (spare_tx_desc + EMX_TX_RESERVED) <=
2232 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs
2233 */
2234 tdata->oact_tx_desc = tdata->num_tx_desc / 8;
2235 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX)
2236 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX;
2237 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED)
2238 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED;
2239
2240 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16;
2241 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc)
2242 tdata->tx_intr_nsegs = tdata->oact_tx_desc;
2243
2244 /*
1fabd251 2245 * Pullup extra 4bytes into the first data segment for TSO, see:
d84018e9
SZ
2246 * 82571/82572 specification update errata #7
2247 *
1fabd251
SZ
2248 * Same applies to I217 (and maybe I218).
2249 *
d84018e9
SZ
2250 * NOTE:
2251 * 4bytes instead of 2bytes, which are mentioned in the errata,
2252 * are pulled; mainly to keep rest of the data properly aligned.
2253 */
2254 if (tdata->sc->hw.mac.type == e1000_82571 ||
1fabd251
SZ
2255 tdata->sc->hw.mac.type == e1000_82572 ||
2256 tdata->sc->hw.mac.type == e1000_pch_lpt)
d84018e9
SZ
2257 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX;
2258
5330213c
SZ
2259 return (0);
2260}
2261
2262static void
ec1c60bb 2263emx_init_tx_ring(struct emx_txdata *tdata)
5330213c
SZ
2264{
2265 /* Clear the old ring contents */
ec1c60bb
SZ
2266 bzero(tdata->tx_desc_base,
2267 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc);
5330213c
SZ
2268
2269 /* Reset state */
ec1c60bb
SZ
2270 tdata->next_avail_tx_desc = 0;
2271 tdata->next_tx_to_clean = 0;
2272 tdata->num_tx_desc_avail = tdata->num_tx_desc;
d84018e9
SZ
2273
2274 tdata->tx_flags |= EMX_TXFLAG_ENABLED;
2275 if (tdata->sc->tx_ring_inuse > 1) {
2276 tdata->tx_flags |= EMX_TXFLAG_FORCECTX;
2277 if (bootverbose) {
2278 if_printf(&tdata->sc->arpcom.ac_if,
2279 "TX %d force ctx setup\n", tdata->idx);
2280 }
2281 }
5330213c
SZ
2282}
2283
2284static void
2285emx_init_tx_unit(struct emx_softc *sc)
2286{
57f26b35 2287 uint32_t tctl, tarc, tipg = 0, txdctl;
d84018e9
SZ
2288 int i;
2289
2290 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2291 struct emx_txdata *tdata = &sc->tx_data[i];
2292 uint64_t bus_addr;
5330213c 2293
d84018e9
SZ
2294 /* Setup the Base and Length of the Tx Descriptor Ring */
2295 bus_addr = tdata->tx_desc_paddr;
2296 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i),
2297 tdata->num_tx_desc * sizeof(struct e1000_tx_desc));
2298 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i),
2299 (uint32_t)(bus_addr >> 32));
2300 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i),
2301 (uint32_t)bus_addr);
2302 /* Setup the HW Tx Head and Tail descriptor pointers */
2303 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0);
2304 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0);
2305 }
5330213c
SZ
2306
2307 /* Set the default values for the Tx Inter Packet Gap timer */
2308 switch (sc->hw.mac.type) {
2309 case e1000_80003es2lan:
2310 tipg = DEFAULT_82543_TIPG_IPGR1;
2311 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2312 E1000_TIPG_IPGR2_SHIFT;
2313 break;
2314
2315 default:
2316 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2317 sc->hw.phy.media_type == e1000_media_type_internal_serdes)
2318 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2319 else
2320 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2321 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2322 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2323 break;
2324 }
2325
2326 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg);
2327
2328 /* NOTE: 0 is not allowed for TIDV */
2329 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1);
2330 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0);
2331
57f26b35
SZ
2332 /*
2333 * Errata workaround (obtained from Linux). This is necessary
2334 * to make multiple TX queues work on 82574.
2335 * XXX can't find it in any published errata though.
2336 */
2337 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0));
2338 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl);
2339
5330213c
SZ
2340 if (sc->hw.mac.type == e1000_82571 ||
2341 sc->hw.mac.type == e1000_82572) {
2342 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2343 tarc |= EMX_TARC_SPEED_MODE;
2344 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2345 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2346 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2347 tarc |= 1;
2348 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2349 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2350 tarc |= 1;
2351 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2352 }
2353
2354 /* Program the Transmit Control Register */
2355 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL);
2356 tctl &= ~E1000_TCTL_CT;
2357 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2358 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2359 tctl |= E1000_TCTL_MULR;
2360
2361 /* This write will effectively turn on the transmit unit. */
2362 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl);
01058531
SZ
2363
2364 if (sc->hw.mac.type == e1000_82571 ||
2365 sc->hw.mac.type == e1000_82572 ||
2366 sc->hw.mac.type == e1000_80003es2lan) {
2367 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2368 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2369 tarc &= ~(1 << 28);
2370 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2371 }
d84018e9
SZ
2372
2373 if (sc->tx_ring_inuse > 1) {
2374 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2375 tarc &= ~EMX_TARC_COUNT_MASK;
2376 tarc |= 1;
2377 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2378
2379 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2380 tarc &= ~EMX_TARC_COUNT_MASK;
2381 tarc |= 1;
2382 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2383 }
5330213c
SZ
2384}
2385
2386static void
ec1c60bb 2387emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc)
5330213c 2388{
323e5ecd 2389 struct emx_txbuf *tx_buffer;
5330213c
SZ
2390 int i;
2391
bdca134f 2392 /* Free Transmit Descriptor ring */
ec1c60bb
SZ
2393 if (tdata->tx_desc_base) {
2394 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap);
2395 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base,
2396 tdata->tx_desc_dmap);
2397 bus_dma_tag_destroy(tdata->tx_desc_dtag);
a596084c 2398
ec1c60bb 2399 tdata->tx_desc_base = NULL;
a596084c 2400 }
bdca134f 2401
ec1c60bb 2402 if (tdata->tx_buf == NULL)
5330213c
SZ
2403 return;
2404
2405 for (i = 0; i < ndesc; i++) {
ec1c60bb 2406 tx_buffer = &tdata->tx_buf[i];
5330213c
SZ
2407
2408 KKASSERT(tx_buffer->m_head == NULL);
ec1c60bb 2409 bus_dmamap_destroy(tdata->txtag, tx_buffer->map);
5330213c 2410 }
ec1c60bb 2411 bus_dma_tag_destroy(tdata->txtag);
5330213c 2412
ec1c60bb
SZ
2413 kfree(tdata->tx_buf, M_DEVBUF);
2414 tdata->tx_buf = NULL;
5330213c
SZ
2415}
2416
2417/*
2418 * The offload context needs to be set when we transfer the first
2419 * packet of a particular protocol (TCP/UDP). This routine has been
2420 * enhanced to deal with inserted VLAN headers.
2421 *
2422 * If the new packet's ether header length, ip header length and
2423 * csum offloading type are same as the previous packet, we should
2424 * avoid allocating a new csum context descriptor; mainly to take
2425 * advantage of the pipeline effect of the TX data read request.
2426 *
2427 * This function returns number of TX descrptors allocated for
2428 * csum context.
2429 */
2430static int
ec1c60bb 2431emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp,
5330213c
SZ
2432 uint32_t *txd_upper, uint32_t *txd_lower)
2433{
2434 struct e1000_context_desc *TXD;
5330213c
SZ
2435 int curr_txd, ehdrlen, csum_flags;
2436 uint32_t cmd, hdr_len, ip_hlen;
5330213c
SZ
2437
2438 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES;
68447568
SZ
2439 ip_hlen = mp->m_pkthdr.csum_iphlen;
2440 ehdrlen = mp->m_pkthdr.csum_lhlen;
5330213c 2441
d84018e9
SZ
2442 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
2443 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen &&
ec1c60bb 2444 tdata->csum_flags == csum_flags) {
5330213c
SZ
2445 /*
2446 * Same csum offload context as the previous packets;
2447 * just return.
2448 */
ec1c60bb
SZ
2449 *txd_upper = tdata->csum_txd_upper;
2450 *txd_lower = tdata->csum_txd_lower;
5330213c
SZ
2451 return 0;
2452 }
2453
2454 /*
2455 * Setup a new csum offload context.
2456 */
2457
ec1c60bb
SZ
2458 curr_txd = tdata->next_avail_tx_desc;
2459 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
5330213c
SZ
2460
2461 cmd = 0;
2462
2463 /* Setup of IP header checksum. */
2464 if (csum_flags & CSUM_IP) {
2465 /*
2466 * Start offset for header checksum calculation.
2467 * End offset for header checksum calculation.
2468 * Offset of place to put the checksum.
2469 */
2470 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2471 TXD->lower_setup.ip_fields.ipcse =
2472 htole16(ehdrlen + ip_hlen - 1);
2473 TXD->lower_setup.ip_fields.ipcso =
2474 ehdrlen + offsetof(struct ip, ip_sum);
2475 cmd |= E1000_TXD_CMD_IP;
2476 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2477 }
2478 hdr_len = ehdrlen + ip_hlen;
2479
2480 if (csum_flags & CSUM_TCP) {
2481 /*
2482 * Start offset for payload checksum calculation.
2483 * End offset for payload checksum calculation.
2484 * Offset of place to put the checksum.
2485 */
2486 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2487 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2488 TXD->upper_setup.tcp_fields.tucso =
2489 hdr_len + offsetof(struct tcphdr, th_sum);
2490 cmd |= E1000_TXD_CMD_TCP;
2491 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2492 } else if (csum_flags & CSUM_UDP) {
2493 /*
2494 * Start offset for header checksum calculation.
2495 * End offset for header checksum calculation.
2496 * Offset of place to put the checksum.
2497 */
2498 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2499 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2500 TXD->upper_setup.tcp_fields.tucso =
2501 hdr_len + offsetof(struct udphdr, uh_sum);
2502 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2503 }
2504
2505 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2506 E1000_TXD_DTYP_D; /* Data descr */
2507
2508 /* Save the information for this csum offloading context */
ec1c60bb
SZ
2509 tdata->csum_lhlen = ehdrlen;
2510 tdata->csum_iphlen = ip_hlen;
2511 tdata->csum_flags = csum_flags;
2512 tdata->csum_txd_upper = *txd_upper;
2513 tdata->csum_txd_lower = *txd_lower;
5330213c
SZ
2514
2515 TXD->tcp_seg_setup.data = htole32(0);
2516 TXD->cmd_and_length =
2517 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
5330213c 2518
ec1c60bb 2519 if (++curr_txd == tdata->num_tx_desc)
5330213c
SZ
2520 curr_txd = 0;
2521
ec1c60bb
SZ
2522 KKASSERT(tdata->num_tx_desc_avail > 0);
2523 tdata->num_tx_desc_avail--;
5330213c 2524
ec1c60bb 2525 tdata->next_avail_tx_desc = curr_txd;
5330213c
SZ
2526 return 1;
2527}
2528
5330213c 2529static void
ec1c60bb 2530emx_txeof(struct emx_txdata *tdata)
5330213c 2531{
ec1c60bb 2532 struct ifnet *ifp = &tdata->sc->arpcom.ac_if;
323e5ecd 2533 struct emx_txbuf *tx_buffer;
5330213c
SZ
2534 int first, num_avail;
2535
ec1c60bb 2536 if (tdata->tx_dd_head == tdata->tx_dd_tail)
5330213c
SZ
2537 return;
2538
ec1c60bb 2539 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
5330213c
SZ
2540 return;
2541
ec1c60bb
SZ
2542 num_avail = tdata->num_tx_desc_avail;
2543 first = tdata->next_tx_to_clean;
5330213c 2544
ec1c60bb
SZ
2545 while (tdata->tx_dd_head != tdata->tx_dd_tail) {
2546 int dd_idx = tdata->tx_dd[tdata->tx_dd_head];
70172a73 2547 struct e1000_tx_desc *tx_desc;
5330213c 2548
ec1c60bb 2549 tx_desc = &tdata->tx_desc_base[dd_idx];
5330213c 2550 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
ec1c60bb 2551 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
5330213c 2552
ec1c60bb 2553 if (++dd_idx == tdata->num_tx_desc)
5330213c
SZ
2554 dd_idx = 0;
2555
2556 while (first != dd_idx) {
2557 logif(pkt_txclean);
2558
5330213c
SZ
2559 num_avail++;
2560
ec1c60bb 2561 tx_buffer = &tdata->tx_buf[first];
5330213c 2562 if (tx_buffer->m_head) {
d40991ef 2563 IFNET_STAT_INC(ifp, opackets, 1);
ec1c60bb 2564 bus_dmamap_unload(tdata->txtag,
5330213c
SZ
2565 tx_buffer->map);
2566 m_freem(tx_buffer->m_head);
2567 tx_buffer->m_head = NULL;
2568 }
2569
ec1c60bb 2570 if (++first == tdata->num_tx_desc)
5330213c
SZ
2571 first = 0;
2572 }
2573 } else {
2574 break;
2575 }
2576 }
ec1c60bb
SZ
2577 tdata->next_tx_to_clean = first;
2578 tdata->num_tx_desc_avail = num_avail;
5330213c 2579
ec1c60bb
SZ
2580 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2581 tdata->tx_dd_head = 0;
2582 tdata->tx_dd_tail = 0;
5330213c
SZ
2583 }
2584
ec1c60bb 2585 if (!EMX_IS_OACTIVE(tdata)) {
d84018e9 2586 ifsq_clr_oactive(tdata->ifsq);
5330213c
SZ
2587
2588 /* All clean, turn off the timer */
ec1c60bb 2589 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
d84018e9 2590 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
2591 }
2592}
2593
2594static void
ec1c60bb 2595emx_tx_collect(struct emx_txdata *tdata)
5330213c 2596{
ec1c60bb 2597 struct ifnet *ifp = &tdata->sc->arpcom.ac_if;
323e5ecd 2598 struct emx_txbuf *tx_buffer;
5330213c
SZ
2599 int tdh, first, num_avail, dd_idx = -1;
2600
ec1c60bb 2601 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
5330213c
SZ
2602 return;
2603
d84018e9 2604 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx));
ec1c60bb 2605 if (tdh == tdata->next_tx_to_clean)
5330213c
SZ
2606 return;
2607
ec1c60bb
SZ
2608 if (tdata->tx_dd_head != tdata->tx_dd_tail)
2609 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
5330213c 2610
ec1c60bb
SZ
2611 num_avail = tdata->num_tx_desc_avail;
2612 first = tdata->next_tx_to_clean;
5330213c
SZ
2613
2614 while (first != tdh) {
2615 logif(pkt_txclean);
2616
5330213c
SZ
2617 num_avail++;
2618
ec1c60bb 2619 tx_buffer = &tdata->tx_buf[first];
5330213c 2620 if (tx_buffer->m_head) {
d40991ef 2621 IFNET_STAT_INC(ifp, opackets, 1);
ec1c60bb 2622 bus_dmamap_unload(tdata->txtag,
5330213c
SZ
2623 tx_buffer->map);
2624 m_freem(tx_buffer->m_head);
2625 tx_buffer->m_head = NULL;
2626 }
2627
2628 if (first == dd_idx) {
ec1c60bb
SZ
2629 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2630 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2631 tdata->tx_dd_head = 0;
2632 tdata->tx_dd_tail = 0;
5330213c
SZ
2633 dd_idx = -1;
2634 } else {
ec1c60bb 2635 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
5330213c
SZ
2636 }
2637 }
2638
ec1c60bb 2639 if (++first == tdata->num_tx_desc)
5330213c
SZ
2640 first = 0;
2641 }
ec1c60bb
SZ
2642 tdata->next_tx_to_clean = first;
2643 tdata->num_tx_desc_avail = num_avail;
5330213c 2644
ec1c60bb 2645 if (!EMX_IS_OACTIVE(tdata)) {
d84018e9 2646 ifsq_clr_oactive(tdata->ifsq);
5330213c
SZ
2647
2648 /* All clean, turn off the timer */
ec1c60bb 2649 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
d84018e9 2650 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
2651 }
2652}
2653
2654/*
2655 * When Link is lost sometimes there is work still in the TX ring
2656 * which will result in a watchdog, rather than allow that do an
2657 * attempted cleanup and then reinit here. Note that this has been
2658 * seens mostly with fiber adapters.
2659 */
2660static void
2661emx_tx_purge(struct emx_softc *sc)
2662{
d84018e9 2663 int i;
5330213c 2664
d84018e9
SZ
2665 if (sc->link_active)
2666 return;
2667
2668 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2669 struct emx_txdata *tdata = &sc->tx_data[i];
2670
2671 if (tdata->tx_watchdog.wd_timer) {
2672 emx_tx_collect(tdata);
2673 if (tdata->tx_watchdog.wd_timer) {
2674 if_printf(&sc->arpcom.ac_if,
2675 "Link lost, TX pending, reinit\n");
2676 emx_init(sc);
2677 return;
2678 }
5330213c
SZ
2679 }
2680 }
2681}
2682
2683static int
9f831fa8 2684emx_newbuf(struct emx_rxdata *rdata, int i, int init)
5330213c
SZ
2685{
2686 struct mbuf *m;
2687 bus_dma_segment_t seg;
2688 bus_dmamap_t map;
323e5ecd 2689 struct emx_rxbuf *rx_buffer;
5330213c
SZ
2690 int error, nseg;
2691
b5523eac 2692 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
5330213c 2693 if (m == NULL) {
5330213c 2694 if (init) {
9f831fa8 2695 if_printf(&rdata->sc->arpcom.ac_if,
5330213c
SZ
2696 "Unable to allocate RX mbuf\n");
2697 }
2698 return (ENOBUFS);
2699 }
2700 m->m_len = m->m_pkthdr.len = MCLBYTES;
2701
a5807b81 2702 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN)
5330213c
SZ
2703 m_adj(m, ETHER_ALIGN);
2704
c39e3a1f
SZ
2705 error = bus_dmamap_load_mbuf_segment(rdata->rxtag,
2706 rdata->rx_sparemap, m,
5330213c
SZ
2707 &seg, 1, &nseg, BUS_DMA_NOWAIT);
2708 if (error) {
2709 m_freem(m);
2710 if (init) {
9f831fa8 2711 if_printf(&rdata->sc->arpcom.ac_if,
5330213c
SZ
2712 "Unable to load RX mbuf\n");
2713 }
2714 return (error);
2715 }
2716
323e5ecd 2717 rx_buffer = &rdata->rx_buf[i];
5330213c 2718 if (rx_buffer->m_head != NULL)
c39e3a1f 2719 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
5330213c
SZ
2720
2721 map = rx_buffer->map;
c39e3a1f
SZ
2722 rx_buffer->map = rdata->rx_sparemap;
2723 rdata->rx_sparemap = map;
5330213c
SZ
2724
2725 rx_buffer->m_head = m;
235b9d30 2726 rx_buffer->paddr = seg.ds_addr;
5330213c 2727
235b9d30 2728 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer);
5330213c
SZ
2729 return (0);
2730}
2731
2732static int
9f831fa8 2733emx_create_rx_ring(struct emx_rxdata *rdata)
5330213c 2734{
9f831fa8 2735 device_t dev = rdata->sc->dev;
323e5ecd 2736 struct emx_rxbuf *rx_buffer;
b4d8c36b 2737 int i, error, rsize, nrxd;
bdca134f
SZ
2738
2739 /*
2740 * Validate number of receive descriptors. It must not exceed
2741 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2742 */
b4d8c36b
SZ
2743 nrxd = device_getenv_int(dev, "rxd", emx_rxd);
2744 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 ||
2745 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) {
bdca134f 2746 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
b4d8c36b 2747 EMX_DEFAULT_RXD, nrxd);
c39e3a1f 2748 rdata->num_rx_desc = EMX_DEFAULT_RXD;
bdca134f 2749 } else {
b4d8c36b 2750 rdata->num_rx_desc = nrxd;
bdca134f
SZ
2751 }
2752
2753 /*
2754 * Allocate Receive Descriptor ring
2755 */
235b9d30 2756 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t),
bdca134f 2757 EMX_DBA_ALIGN);
9f831fa8 2758 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag,
a596084c 2759 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
c39e3a1f
SZ
2760 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap,
2761 &rdata->rx_desc_paddr);
235b9d30 2762 if (rdata->rx_desc == NULL) {
bdca134f 2763 device_printf(dev, "Unable to allocate rx_desc memory\n");
a596084c 2764 return ENOMEM;
bdca134f 2765 }
5330213c 2766
5a7acd69
SZ
2767 rsize = __VM_CACHELINE_ALIGN(
2768 sizeof(struct emx_rxbuf) * rdata->num_rx_desc);
2769 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
5330213c
SZ
2770
2771 /*
2772 * Create DMA tag for rx buffers
2773 */
9f831fa8 2774 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */
5330213c
SZ
2775 1, 0, /* alignment, bounds */
2776 BUS_SPACE_MAXADDR, /* lowaddr */
2777 BUS_SPACE_MAXADDR, /* highaddr */
2778 NULL, NULL, /* filter, filterarg */
2779 MCLBYTES, /* maxsize */
2780 1, /* nsegments */
2781 MCLBYTES, /* maxsegsize */
2782 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
c39e3a1f 2783 &rdata->rxtag);
5330213c
SZ
2784 if (error) {
2785 device_printf(dev, "Unable to allocate RX DMA tag\n");
323e5ecd
SZ
2786 kfree(rdata->rx_buf, M_DEVBUF);
2787 rdata->rx_buf = NULL;
5330213c
SZ
2788 return error;
2789 }
2790
2791 /*
2792 * Create spare DMA map for rx buffers
2793 */
c39e3a1f
SZ
2794 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
2795 &rdata->rx_sparemap);
5330213c
SZ
2796 if (error) {
2797 device_printf(dev, "Unable to create spare RX DMA map\n");
c39e3a1f 2798 bus_dma_tag_destroy(rdata->rxtag);
323e5ecd
SZ
2799 kfree(rdata->rx_buf, M_DEVBUF);
2800 rdata->rx_buf = NULL;
5330213c
SZ
2801 return error;
2802 }
2803
2804 /*
2805 * Create DMA maps for rx buffers
2806 */
c39e3a1f 2807 for (i = 0; i < rdata->num_rx_desc; i++) {
323e5ecd 2808 rx_buffer = &rdata->rx_buf[i];
5330213c 2809
c39e3a1f 2810 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
5330213c
SZ
2811 &rx_buffer->map);
2812 if (error) {
2813 device_printf(dev, "Unable to create RX DMA map\n");
9f831fa8 2814 emx_destroy_rx_ring(rdata, i);
5330213c
SZ
2815 return error;
2816 }
2817 }
2818 return (0);
2819}
2820
c39e3a1f 2821static void
9f831fa8 2822emx_free_rx_ring(struct emx_rxdata *rdata)
c39e3a1f
SZ
2823{
2824 int i;
2825
2826 for (i = 0; i < rdata->num_rx_desc; i++) {
323e5ecd 2827 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i];
c39e3a1f
SZ
2828
2829 if (rx_buffer->m_head != NULL) {
2830 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2831 m_freem(rx_buffer->m_head);
2832 rx_buffer->m_head = NULL;
2833 }
2834 }
2835
2836 if (rdata->fmp != NULL)
2837 m_freem(rdata->fmp);
2838 rdata->fmp = NULL;
2839 rdata->lmp = NULL;
2840}
2841
d84018e9
SZ
2842static void
2843emx_free_tx_ring(struct emx_txdata *tdata)
2844{
2845 int i;
2846
2847 for (i = 0; i < tdata->num_tx_desc; i++) {
2848 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i];
2849
2850 if (tx_buffer->m_head != NULL) {
2851 bus_dmamap_unload(tdata->txtag, tx_buffer->map);
2852 m_freem(tx_buffer->m_head);
2853 tx_buffer->m_head = NULL;
2854 }
2855 }
2856
2857 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX;
2858
2859 tdata->csum_flags = 0;
2860 tdata->csum_lhlen = 0;
2861 tdata->csum_iphlen = 0;
2862 tdata->csum_thlen = 0;
2863 tdata->csum_mss = 0;
2864 tdata->csum_pktlen = 0;
2865
2866 tdata->tx_dd_head = 0;
2867 tdata->tx_dd_tail = 0;
2868 tdata->tx_nsegs = 0;
2869}
2870
5330213c 2871static int
9f831fa8 2872emx_init_rx_ring(struct emx_rxdata *rdata)
5330213c
SZ
2873{
2874 int i, error;
2875
2876 /* Reset descriptor ring */
235b9d30 2877 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc);
5330213c
SZ
2878
2879 /* Allocate new ones. */
c39e3a1f 2880 for (i = 0; i < rdata->num_rx_desc; i++) {
9f831fa8 2881 error = emx_newbuf(rdata, i, 1);
5330213c
SZ
2882 if (error)
2883 return (error);
2884 }
2885
2886 /* Setup our descriptor pointers */
c39e3a1f 2887 rdata->next_rx_desc_to_check = 0;
5330213c
SZ
2888
2889 return (0);
2890}
2891
2892static void
2893emx_init_rx_unit(struct emx_softc *sc)
2894{
2895 struct ifnet *ifp = &sc->arpcom.ac_if;
2896 uint64_t bus_addr;
2d0e5700 2897 uint32_t rctl, itr, rfctl;
3f939c23 2898 int i;
5330213c
SZ
2899
2900 /*
2901 * Make sure receives are disabled while setting
2902 * up the descriptor ring
2903 */
2904 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
2905 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2906
2907 /*
2908 * Set the interrupt throttling rate. Value is calculated
2909 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
2910 */
2d0e5700
SZ
2911 if (sc->int_throttle_ceil)
2912 itr = 1000000000 / 256 / sc->int_throttle_ceil;
2913 else
2914 itr = 0;
2915 emx_set_itr(sc, itr);
5330213c 2916
235b9d30
SZ
2917 /* Use extended RX descriptor */
2918 rfctl = E1000_RFCTL_EXTEN;
2919
5330213c 2920 /* Disable accelerated ackknowledge */
235b9d30
SZ
2921 if (sc->hw.mac.type == e1000_82574)
2922 rfctl |= E1000_RFCTL_ACK_DIS;
2923
2924 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl);
5330213c 2925
65c7a6af
SZ
2926 /*
2927 * Receive Checksum Offload for TCP and UDP
2928 *
2929 * Checksum offloading is also enabled if multiple receive
2930 * queue is to be supported, since we need it to figure out
2931 * packet type.
2932 */
13890b61
SZ
2933 if ((ifp->if_capenable & IFCAP_RXCSUM) ||
2934 sc->rx_ring_cnt > 1) {
2d0e5700
SZ
2935 uint32_t rxcsum;
2936
5330213c 2937 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
3f939c23
SZ
2938
2939 /*
2940 * NOTE:
2941 * PCSD must be enabled to enable multiple
2942 * receive queues.
2943 */
2944 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2945 E1000_RXCSUM_PCSD;
5330213c
SZ
2946 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2947 }
2948
3f939c23 2949 /*
65c7a6af 2950 * Configure multiple receive queue (RSS)
3f939c23 2951 */
13890b61 2952 if (sc->rx_ring_cnt > 1) {
89d8e73d
SZ
2953 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE];
2954 uint32_t reta;
2955
13890b61
SZ
2956 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING,
2957 ("invalid number of RX ring (%d)", sc->rx_ring_cnt));
89d8e73d 2958
65c7a6af
SZ
2959 /*
2960 * NOTE:
2961 * When we reach here, RSS has already been disabled
2962 * in emx_stop(), so we could safely configure RSS key
2963 * and redirect table.
2964 */
3f939c23 2965
65c7a6af
SZ
2966 /*
2967 * Configure RSS key
2968 */
89d8e73d
SZ
2969 toeplitz_get_key(key, sizeof(key));
2970 for (i = 0; i < EMX_NRSSRK; ++i) {
2971 uint32_t rssrk;
2972
2973 rssrk = EMX_RSSRK_VAL(key, i);
2974 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2975
2976 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk);
2977 }
3f939c23 2978
65c7a6af 2979 /*
89d8e73d
SZ
2980 * Configure RSS redirect table in following fashion:
2981 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
65c7a6af 2982 */
89d8e73d
SZ
2983 reta = 0;
2984 for (i = 0; i < EMX_RETA_SIZE; ++i) {
2985 uint32_t q;
2986
13890b61 2987 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT;
89d8e73d
SZ
2988 reta |= q << (8 * i);
2989 }
2990 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2991
65c7a6af
SZ
2992 for (i = 0; i < EMX_NRETA; ++i)
2993 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta);
3f939c23 2994
65c7a6af
SZ
2995 /*
2996 * Enable multiple receive queues.
2997 * Enable IPv4 RSS standard hash functions.
2998 * Disable RSS interrupt.
2999 */
3000 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
3001 E1000_MRQC_ENABLE_RSS_2Q |
3002 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3003 E1000_MRQC_RSS_FIELD_IPV4);
3004 }
3f939c23 3005
5330213c
SZ
3006 /*
3007 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3008 * long latencies are observed, like Lenovo X60. This
3009 * change eliminates the problem, but since having positive
3010 * values in RDTR is a known source of problems on other
3011 * platforms another solution is being sought.
3012 */
3013 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) {
3014 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573);
3015 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573);
3016 }
3017
13890b61 3018 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2d0e5700
SZ
3019 struct emx_rxdata *rdata = &sc->rx_data[i];
3020
3021 /*
3022 * Setup the Base and Length of the Rx Descriptor Ring
3023 */
3024 bus_addr = rdata->rx_desc_paddr;
3025 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i),
3026 rdata->num_rx_desc * sizeof(emx_rxdesc_t));
3027 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i),
3028 (uint32_t)(bus_addr >> 32));
3029 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i),
3030 (uint32_t)bus_addr);
3031
3032 /*
3033 * Setup the HW Rx Head and Tail Descriptor Pointers
3034 */
3f939c23
SZ
3035 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0);
3036 E1000_WRITE_REG(&sc->hw, E1000_RDT(i),
3037 sc->rx_data[i].num_rx_desc - 1);
3038 }
3039
a5807b81
SZ
3040 if (sc->hw.mac.type >= e1000_pch2lan) {
3041 if (ifp->if_mtu > ETHERMTU)
3042 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE);
3043 else
3044 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE);
3045 }
3046
2d0e5700
SZ
3047 /* Setup the Receive Control Register */
3048 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3049 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3050 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC |
3051 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3052
3053 /* Make sure VLAN Filters are off */
3054 rctl &= ~E1000_RCTL_VFE;
3055
3056 /* Don't store bad paket */
3057 rctl &= ~E1000_RCTL_SBP;
3058
3059 /* MCLBYTES */
3060 rctl |= E1000_RCTL_SZ_2048;
3061
3062 if (ifp->if_mtu > ETHERMTU)
3063 rctl |= E1000_RCTL_LPE;
3064 else
3065 rctl &= ~E1000_RCTL_LPE;
3066
3f939c23
SZ
3067 /* Enable Receives */
3068 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
5330213c
SZ
3069}
3070
3071static void
9f831fa8 3072emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc)
5330213c 3073{
323e5ecd 3074 struct emx_rxbuf *rx_buffer;
5330213c
SZ
3075 int i;
3076
bdca134f 3077 /* Free Receive Descriptor ring */
235b9d30 3078 if (rdata->rx_desc) {
c39e3a1f 3079 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap);
235b9d30 3080 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc,
c39e3a1f
SZ
3081 rdata->rx_desc_dmap);
3082 bus_dma_tag_destroy(rdata->rx_desc_dtag);
a596084c 3083
235b9d30 3084 rdata->rx_desc = NULL;
a596084c 3085 }
bdca134f 3086
323e5ecd 3087 if (rdata->rx_buf == NULL)
5330213c
SZ
3088 return;
3089
3090 for (i = 0; i < ndesc; i++) {
323e5ecd 3091 rx_buffer = &rdata->rx_buf[i];
5330213c
SZ
3092
3093 KKASSERT(rx_buffer->m_head == NULL);
c39e3a1f 3094 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map);
5330213c 3095 }
c39e3a1f
SZ
3096 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap);
3097 bus_dma_tag_destroy(rdata->rxtag);
5330213c 3098
323e5ecd
SZ
3099 kfree(rdata->rx_buf, M_DEVBUF);
3100 rdata->rx_buf = NULL;
5330213c
SZ
3101}
3102
3103static void
9f831fa8 3104emx_rxeof(struct emx_rxdata *rdata, int count)
5330213c 3105{
9f831fa8 3106 struct ifnet *ifp = &rdata->sc->arpcom.ac_if;
235b9d30 3107 uint32_t staterr;
235b9d30 3108 emx_rxdesc_t *current_desc;
5330213c 3109 struct mbuf *mp;
ff37a356 3110 int i, cpuid = mycpuid;
5330213c 3111
c39e3a1f 3112 i = rdata->next_rx_desc_to_check;
235b9d30
SZ
3113 current_desc = &rdata->rx_desc[i];
3114 staterr = le32toh(current_desc->rxd_staterr);
5330213c 3115
235b9d30 3116 if (!(staterr & E1000_RXD_STAT_DD))
5330213c
SZ
3117 return;
3118
235b9d30 3119 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
9cc86e17 3120 struct pktinfo *pi = NULL, pi0;
235b9d30 3121 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i];
5330213c 3122 struct mbuf *m = NULL;
0acc29d6 3123 int eop, len;
5330213c
SZ
3124
3125 logif(pkt_receive);
3126
235b9d30 3127 mp = rx_buf->m_head;
5330213c
SZ
3128
3129 /*
3130 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3131 * needs to access the last received byte in the mbuf.
3132 */
235b9d30 3133 bus_dmamap_sync(rdata->rxtag, rx_buf->map,
5330213c
SZ
3134 BUS_DMASYNC_POSTREAD);
3135
0acc29d6 3136 len = le16toh(current_desc->rxd_length);
235b9d30 3137 if (staterr & E1000_RXD_STAT_EOP) {
5330213c
SZ
3138 count--;
3139 eop = 1;
5330213c
SZ
3140 } else {
3141 eop = 0;
5330213c
SZ
3142 }
3143
235b9d30
SZ
3144 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3145 uint16_t vlan = 0;
3f939c23 3146 uint32_t mrq, rss_hash;
235b9d30
SZ
3147
3148 /*
3149 * Save several necessary information,
3150 * before emx_newbuf() destroy it.
3151 */
3152 if ((staterr & E1000_RXD_STAT_VP) && eop)
3153 vlan = le16toh(current_desc->rxd_vlan);
5330213c 3154
3f939c23
SZ
3155 mrq = le32toh(current_desc->rxd_mrq);
3156 rss_hash = le32toh(current_desc->rxd_rss);
3157
9f831fa8 3158 EMX_RSS_DPRINTF(rdata->sc, 10,
3f939c23 3159 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n",
9f831fa8 3160 rdata->idx, mrq, rss_hash);
3f939c23 3161
9f831fa8 3162 if (emx_newbuf(rdata, i, 0) != 0) {
d40991ef 3163 IFNET_STAT_INC(ifp, iqdrops, 1);
5330213c
SZ
3164 goto discard;
3165 }
3166
3167 /* Assign correct length to the current fragment */
3168 mp->m_len = len;
3169
c39e3a1f 3170 if (rdata->fmp == NULL) {
5330213c 3171 mp->m_pkthdr.len = len;
c39e3a1f
SZ
3172 rdata->fmp = mp; /* Store the first mbuf */
3173 rdata->lmp = mp;
5330213c
SZ
3174 } else {
3175 /*
3176 * Chain mbuf's together
3177 */
c39e3a1f
SZ
3178 rdata->lmp->m_next = mp;
3179 rdata->lmp = rdata->lmp->m_next;
3180 rdata->fmp->m_pkthdr.len += len;
5330213c
SZ
3181 }
3182
3183 if (eop) {
c39e3a1f 3184 rdata->fmp->m_pkthdr.rcvif = ifp;
d40991ef 3185 IFNET_STAT_INC(ifp, ipackets, 1);
5330213c 3186
235b9d30
SZ
3187 if (ifp->if_capenable & IFCAP_RXCSUM)
3188 emx_rxcsum(staterr, rdata->fmp);
5330213c 3189
235b9d30 3190 if (staterr & E1000_RXD_STAT_VP) {
c39e3a1f 3191 rdata->fmp->m_pkthdr.ether_vlantag =
235b9d30 3192 vlan;
c39e3a1f 3193 rdata->fmp->m_flags |= M_VLANTAG;
5330213c 3194 }
c39e3a1f
SZ
3195 m = rdata->fmp;
3196 rdata->fmp = NULL;
3197 rdata->lmp = NULL;
3f939c23 3198
9cc86e17
SZ
3199 if (ifp->if_capenable & IFCAP_RSS) {
3200 pi = emx_rssinfo(m, &pi0, mrq,
3201 rss_hash, staterr);
3202 }
3f939c23
SZ
3203#ifdef EMX_RSS_DEBUG
3204 rdata->rx_pkts++;
3205#endif
5330213c
SZ
3206 }
3207 } else {
d40991ef 3208 IFNET_STAT_INC(ifp, ierrors, 1);
5330213c 3209discard:
235b9d30 3210 emx_setup_rxdesc(current_desc, rx_buf);
c39e3a1f
SZ
3211 if (rdata->fmp != NULL) {
3212 m_freem(rdata->fmp);
3213 rdata->fmp = NULL;
3214 rdata->lmp = NULL;
5330213c
SZ
3215 }
3216 m = NULL;
3217 }
3218
5330213c 3219 if (m != NULL)
be4134c6 3220 ifp->if_input(ifp, m, pi, cpuid);
5330213c
SZ
3221
3222 /* Advance our pointers to the next descriptor. */
c39e3a1f 3223 if (++i == rdata->num_rx_desc)
5330213c 3224 i = 0;
235b9d30
SZ
3225
3226 current_desc = &rdata->rx_desc[i];
3227 staterr = le32toh(current_desc->rxd_staterr);
5330213c 3228 }
c39e3a1f 3229 rdata->next_rx_desc_to_check = i;
5330213c 3230
3f939c23 3231 /* Advance the E1000's Receive Queue "Tail Pointer". */
5330213c 3232 if (--i < 0)
c39e3a1f 3233 i = rdata->num_rx_desc - 1;
9f831fa8 3234 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i);
5330213c
SZ
3235}
3236
5330213c
SZ
3237static void
3238emx_enable_intr(struct emx_softc *sc)
3239{
2d0e5700
SZ
3240 uint32_t ims_mask = IMS_ENABLE_MASK;
3241
6d435846 3242 lwkt_serialize_handler_enable(&sc->main_serialize);
2d0e5700
SZ
3243
3244#if 0
3245 if (sc->hw.mac.type == e1000_82574) {
3246 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK);
3247 ims_mask |= EM_MSIX_MASK;
3248 }
3249#endif
3250 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask);
5330213c
SZ
3251}
3252
3253static void
3254emx_disable_intr(struct emx_softc *sc)
3255{
2d0e5700
SZ
3256 if (sc->hw.mac.type == e1000_82574)
3257 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0);
5330213c 3258 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2d0e5700 3259
6d435846 3260 lwkt_serialize_handler_disable(&sc->main_serialize);
5330213c
SZ
3261}
3262
3263/*
3264 * Bit of a misnomer, what this really means is
3265 * to enable OS management of the system... aka
3266 * to disable special hardware management features
3267 */
3268static void
3269emx_get_mgmt(struct emx_softc *sc)
3270{
3271 /* A shared code workaround */
de0836d4 3272 if (sc->flags & EMX_FLAG_HAS_MGMT) {
5330213c
SZ
3273 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3274 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3275
3276 /* disable hardware interception of ARP */
3277 manc &= ~(E1000_MANC_ARP_EN);
3278
3279 /* enable receiving management packets to the host */
3280 manc |= E1000_MANC_EN_MNG2HOST;
3281#define E1000_MNG2HOST_PORT_623 (1 << 5)
3282#define E1000_MNG2HOST_PORT_664 (1 << 6)
3283 manc2h |= E1000_MNG2HOST_PORT_623;
3284 manc2h |= E1000_MNG2HOST_PORT_664;
3285 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3286
3287 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3288 }
3289}
3290
3291/*
3292 * Give control back to hardware management
3293 * controller if there is one.
3294 */
3295static void
3296emx_rel_mgmt(struct emx_softc *sc)
3297{
de0836d4 3298 if (sc->flags & EMX_FLAG_HAS_MGMT) {
5330213c
SZ
3299 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3300
3301 /* re-enable hardware interception of ARP */
3302 manc |= E1000_MANC_ARP_EN;
3303 manc &= ~E1000_MANC_EN_MNG2HOST;
3304
3305 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3306 }
3307}
3308
3309/*
3310 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3311 * For ASF and Pass Through versions of f/w this means that
3312 * the driver is loaded. For AMT version (only with 82573)
3313 * of the f/w this means that the network i/f is open.
3314 */
3315static void
3316emx_get_hw_control(struct emx_softc *sc)
3317{
5330213c 3318 /* Let firmware know the driver has taken over */
2d0e5700
SZ
3319 if (sc->hw.mac.type == e1000_82573) {
3320 uint32_t swsm;
3321
5330213c
SZ
3322 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3323 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3324 swsm | E1000_SWSM_DRV_LOAD);
2d0e5700
SZ
3325 } else {
3326 uint32_t ctrl_ext;
5330213c 3327
5330213c
SZ
3328 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3329 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3330 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
5330213c 3331 }
de0836d4 3332 sc->flags |= EMX_FLAG_HW_CTRL;
5330213c
SZ
3333}
3334
3335/*
3336 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3337 * For ASF and Pass Through versions of f/w this means that the
3338 * driver is no longer loaded. For AMT version (only with 82573)
3339 * of the f/w this means that the network i/f is closed.
3340 */
3341static void
3342emx_rel_hw_control(struct emx_softc *sc)
3343{
de0836d4 3344 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0)
2d0e5700 3345 return;
de0836d4 3346 sc->flags &= ~EMX_FLAG_HW_CTRL;
5330213c
SZ
3347
3348 /* Let firmware taken over control of h/w */
2d0e5700
SZ
3349 if (sc->hw.mac.type == e1000_82573) {
3350 uint32_t swsm;
3351
5330213c
SZ
3352 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3353 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3354 swsm & ~E1000_SWSM_DRV_LOAD);
2d0e5700
SZ
3355 } else {
3356 uint32_t ctrl_ext;
5330213c 3357
5330213c
SZ
3358 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3359 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3360 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
5330213c
SZ
3361 }
3362}
3363
3364static int
3365emx_is_valid_eaddr(const uint8_t *addr)
3366{
3367 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
3368
3369 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3370 return (FALSE);
3371
3372 return (TRUE);
3373}
3374
3375/*
3376 * Enable PCI Wake On Lan capability
3377 */
3378void
3379emx_enable_wol(device_t dev)
3380{
3381 uint16_t cap, status;
3382 uint8_t id;
3383
3384 /* First find the capabilities pointer*/
3385 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
3386
3387 /* Read the PM Capabilities */
3388 id = pci_read_config(dev, cap, 1);
3389 if (id != PCIY_PMG) /* Something wrong */
3390 return;
3391
3392 /*
3393 * OK, we have the power capabilities,
3394 * so now get the status register
3395 */
3396 cap += PCIR_POWER_STATUS;
3397 status = pci_read_config(dev, cap, 2);
3398 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3399 pci_write_config(dev, cap, status, 2);
3400}
3401
3402static void
3403emx_update_stats(struct emx_softc *sc)
3404{
3405 struct ifnet *ifp = &sc->arpcom.ac_if;
3406
3407 if (sc->hw.phy.media_type == e1000_media_type_copper ||
3408 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
3409 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
3410 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
3411 }
3412 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
3413 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
3414 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
3415 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
3416
3417 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
3418 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
3419 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
3420 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
3421 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
3422 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
3423 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
3424 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
3425 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
3426 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
3427 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
3428 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
3429 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
3430 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
3431 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
3432 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
3433 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
3434 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
3435 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
3436 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
3437
3438 /* For the 64-bit byte counters the low dword must be read first. */
3439 /* Both registers clear on the read of the high dword */
3440
3441 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH);
3442 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH);
3443
3444 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
3445 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
3446 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
3447 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
3448 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
3449
3450 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
3451 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
3452
3453 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
3454 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
3455 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
3456 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
3457 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
3458 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
3459 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
3460 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
3461 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
3462 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
3463
3464 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
3465 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC);
3466 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS);
3467 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR);
3468 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC);
3469 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC);
3470
d40991ef 3471 IFNET_STAT_SET(ifp, collisions, sc->stats.colc);
5330213c
SZ
3472
3473 /* Rx Errors */
d40991ef
SZ
3474 IFNET_STAT_SET(ifp, ierrors,
3475 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc +
3476 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr);
5330213c
SZ
3477
3478 /* Tx Errors */
d40991ef 3479 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol);
5330213c
SZ
3480}
3481
3482static void
3483emx_print_debug_info(struct emx_softc *sc)
3484{
3485 device_t dev = sc->dev;
3486 uint8_t *hw_addr = sc->hw.hw_addr;
d84018e9 3487 int i;
5330213c
SZ
3488
3489 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3490 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
3491 E1000_READ_REG(&sc->hw, E1000_CTRL),
3492 E1000_READ_REG(&sc->hw, E1000_RCTL));
3493 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
3494 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\
3495 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) );
3496 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3497 sc->hw.fc.high_water, sc->hw.fc.low_water);
3498 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3499 E1000_READ_REG(&sc->hw, E1000_TIDV),
3500 E1000_READ_REG(&sc->hw, E1000_TADV));
3501 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3502 E1000_READ_REG(&sc->hw, E1000_RDTR),
3503