em/emx: Retry interrupt allocation with MSI
[dragonfly.git] / sys / dev / netif / emx / if_emx.c
CommitLineData
5330213c
SZ
1/*
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
4 * Copyright (c) 2001-2008, Intel Corporation
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
35 *
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 */
66
b3a7093f 67#include "opt_ifpoll.h"
e6cde6e6 68#include "opt_emx.h"
5330213c
SZ
69
70#include <sys/param.h>
71#include <sys/bus.h>
72#include <sys/endian.h>
73#include <sys/interrupt.h>
74#include <sys/kernel.h>
75#include <sys/ktr.h>
76#include <sys/malloc.h>
77#include <sys/mbuf.h>
78#include <sys/proc.h>
79#include <sys/rman.h>
80#include <sys/serialize.h>
bc197380 81#include <sys/serialize2.h>
5330213c
SZ
82#include <sys/socket.h>
83#include <sys/sockio.h>
84#include <sys/sysctl.h>
85#include <sys/systm.h>
86
87#include <net/bpf.h>
88#include <net/ethernet.h>
89#include <net/if.h>
90#include <net/if_arp.h>
91#include <net/if_dl.h>
92#include <net/if_media.h>
93#include <net/ifq_var.h>
89d8e73d 94#include <net/toeplitz.h>
9cc86e17 95#include <net/toeplitz2.h>
5330213c
SZ
96#include <net/vlan/if_vlan_var.h>
97#include <net/vlan/if_vlan_ether.h>
b3a7093f 98#include <net/if_poll.h>
5330213c
SZ
99
100#include <netinet/in_systm.h>
101#include <netinet/in.h>
102#include <netinet/ip.h>
103#include <netinet/tcp.h>
104#include <netinet/udp.h>
105
106#include <bus/pci/pcivar.h>
107#include <bus/pci/pcireg.h>
108
109#include <dev/netif/ig_hal/e1000_api.h>
110#include <dev/netif/ig_hal/e1000_82571.h>
efd6aee8 111#include <dev/netif/ig_hal/e1000_dragonfly.h>
5330213c
SZ
112#include <dev/netif/emx/if_emx.h>
113
b2653751
SW
114#define DEBUG_HW 0
115
3f939c23
SZ
116#ifdef EMX_RSS_DEBUG
117#define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \
118do { \
89d8e73d 119 if (sc->rss_debug >= lvl) \
3f939c23
SZ
120 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
121} while (0)
122#else /* !EMX_RSS_DEBUG */
123#define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
124#endif /* EMX_RSS_DEBUG */
125
5330213c
SZ
126#define EMX_NAME "Intel(R) PRO/1000 "
127
128#define EMX_DEVICE(id) \
129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id }
130#define EMX_DEVICE_NULL { 0, 0, NULL }
131
132static const struct emx_device {
133 uint16_t vid;
134 uint16_t did;
135 const char *desc;
136} emx_devices[] = {
137 EMX_DEVICE(82571EB_COPPER),
138 EMX_DEVICE(82571EB_FIBER),
139 EMX_DEVICE(82571EB_SERDES),
140 EMX_DEVICE(82571EB_SERDES_DUAL),
141 EMX_DEVICE(82571EB_SERDES_QUAD),
142 EMX_DEVICE(82571EB_QUAD_COPPER),
75a5634e 143 EMX_DEVICE(82571EB_QUAD_COPPER_BP),
5330213c
SZ
144 EMX_DEVICE(82571EB_QUAD_COPPER_LP),
145 EMX_DEVICE(82571EB_QUAD_FIBER),
146 EMX_DEVICE(82571PT_QUAD_COPPER),
147
148 EMX_DEVICE(82572EI_COPPER),
149 EMX_DEVICE(82572EI_FIBER),
150 EMX_DEVICE(82572EI_SERDES),
151 EMX_DEVICE(82572EI),
152
153 EMX_DEVICE(82573E),
154 EMX_DEVICE(82573E_IAMT),
155 EMX_DEVICE(82573L),
156
157 EMX_DEVICE(80003ES2LAN_COPPER_SPT),
158 EMX_DEVICE(80003ES2LAN_SERDES_SPT),
159 EMX_DEVICE(80003ES2LAN_COPPER_DPT),
160 EMX_DEVICE(80003ES2LAN_SERDES_DPT),
161
162 EMX_DEVICE(82574L),
2d0e5700 163 EMX_DEVICE(82574LA),
5330213c 164
a5807b81
SZ
165 EMX_DEVICE(PCH_LPT_I217_LM),
166 EMX_DEVICE(PCH_LPT_I217_V),
167 EMX_DEVICE(PCH_LPTLP_I218_LM),
168 EMX_DEVICE(PCH_LPTLP_I218_V),
4765c386
MN
169 EMX_DEVICE(PCH_I218_LM2),
170 EMX_DEVICE(PCH_I218_V2),
171 EMX_DEVICE(PCH_I218_LM3),
172 EMX_DEVICE(PCH_I218_V3),
524ce499
SZ
173 EMX_DEVICE(PCH_SPT_I219_LM),
174 EMX_DEVICE(PCH_SPT_I219_V),
175 EMX_DEVICE(PCH_SPT_I219_LM2),
176 EMX_DEVICE(PCH_SPT_I219_V2),
a5807b81 177
5330213c
SZ
178 /* required last entry */
179 EMX_DEVICE_NULL
180};
181
182static int emx_probe(device_t);
183static int emx_attach(device_t);
184static int emx_detach(device_t);
185static int emx_shutdown(device_t);
186static int emx_suspend(device_t);
187static int emx_resume(device_t);
188
189static void emx_init(void *);
190static void emx_stop(struct emx_softc *);
191static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
f0a26983 192static void emx_start(struct ifnet *, struct ifaltq_subque *);
b3a7093f 193#ifdef IFPOLL_ENABLE
f994de37 194static void emx_npoll(struct ifnet *, struct ifpoll_info *);
2f00683b
SZ
195static void emx_npoll_status(struct ifnet *);
196static void emx_npoll_tx(struct ifnet *, void *, int);
197static void emx_npoll_rx(struct ifnet *, void *, int);
5330213c 198#endif
d84018e9 199static void emx_watchdog(struct ifaltq_subque *);
5330213c
SZ
200static void emx_media_status(struct ifnet *, struct ifmediareq *);
201static int emx_media_change(struct ifnet *);
202static void emx_timer(void *);
6d435846
SZ
203static void emx_serialize(struct ifnet *, enum ifnet_serialize);
204static void emx_deserialize(struct ifnet *, enum ifnet_serialize);
205static int emx_tryserialize(struct ifnet *, enum ifnet_serialize);
2c9effcf
SZ
206#ifdef INVARIANTS
207static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize,
208 boolean_t);
209#endif
5330213c
SZ
210
211static void emx_intr(void *);
4cb541ae
SZ
212static void emx_intr_mask(void *);
213static void emx_intr_body(struct emx_softc *, boolean_t);
9f831fa8 214static void emx_rxeof(struct emx_rxdata *, int);
ec1c60bb
SZ
215static void emx_txeof(struct emx_txdata *);
216static void emx_tx_collect(struct emx_txdata *);
5330213c
SZ
217static void emx_tx_purge(struct emx_softc *);
218static void emx_enable_intr(struct emx_softc *);
219static void emx_disable_intr(struct emx_softc *);
220
071699f8
SZ
221static int emx_dma_alloc(struct emx_softc *);
222static void emx_dma_free(struct emx_softc *);
ec1c60bb 223static void emx_init_tx_ring(struct emx_txdata *);
9f831fa8 224static int emx_init_rx_ring(struct emx_rxdata *);
d84018e9 225static void emx_free_tx_ring(struct emx_txdata *);
9f831fa8 226static void emx_free_rx_ring(struct emx_rxdata *);
ec1c60bb 227static int emx_create_tx_ring(struct emx_txdata *);
9f831fa8 228static int emx_create_rx_ring(struct emx_rxdata *);
ec1c60bb 229static void emx_destroy_tx_ring(struct emx_txdata *, int);
9f831fa8
SZ
230static void emx_destroy_rx_ring(struct emx_rxdata *, int);
231static int emx_newbuf(struct emx_rxdata *, int, int);
7f32a9b0 232static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *);
ec1c60bb 233static int emx_txcsum(struct emx_txdata *, struct mbuf *,
5330213c 234 uint32_t *, uint32_t *);
ec1c60bb
SZ
235static int emx_tso_pullup(struct emx_txdata *, struct mbuf **);
236static int emx_tso_setup(struct emx_txdata *, struct mbuf *,
3eb0ea09 237 uint32_t *, uint32_t *);
d84018e9 238static int emx_get_txring_inuse(const struct emx_softc *, boolean_t);
5330213c
SZ
239
240static int emx_is_valid_eaddr(const uint8_t *);
2d0e5700 241static int emx_reset(struct emx_softc *);
5330213c
SZ
242static void emx_setup_ifp(struct emx_softc *);
243static void emx_init_tx_unit(struct emx_softc *);
244static void emx_init_rx_unit(struct emx_softc *);
245static void emx_update_stats(struct emx_softc *);
246static void emx_set_promisc(struct emx_softc *);
247static void emx_disable_promisc(struct emx_softc *);
248static void emx_set_multi(struct emx_softc *);
249static void emx_update_link_status(struct emx_softc *);
250static void emx_smartspeed(struct emx_softc *);
2d0e5700 251static void emx_set_itr(struct emx_softc *, uint32_t);
6d5e2922 252static void emx_disable_aspm(struct emx_softc *);
5330213c
SZ
253
254static void emx_print_debug_info(struct emx_softc *);
255static void emx_print_nvm_info(struct emx_softc *);
256static void emx_print_hw_stats(struct emx_softc *);
257
258static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS);
259static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
260static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
d84018e9
SZ
261static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
262static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
09f49d52
SZ
263#ifdef IFPOLL_ENABLE
264static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
265static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
266#endif
5330213c
SZ
267static void emx_add_sysctl(struct emx_softc *);
268
bca7c435
SZ
269static void emx_serialize_skipmain(struct emx_softc *);
270static void emx_deserialize_skipmain(struct emx_softc *);
271
5330213c
SZ
272/* Management and WOL Support */
273static void emx_get_mgmt(struct emx_softc *);
274static void emx_rel_mgmt(struct emx_softc *);
275static void emx_get_hw_control(struct emx_softc *);
276static void emx_rel_hw_control(struct emx_softc *);
277static void emx_enable_wol(device_t);
278
279static device_method_t emx_methods[] = {
280 /* Device interface */
281 DEVMETHOD(device_probe, emx_probe),
282 DEVMETHOD(device_attach, emx_attach),
283 DEVMETHOD(device_detach, emx_detach),
284 DEVMETHOD(device_shutdown, emx_shutdown),
285 DEVMETHOD(device_suspend, emx_suspend),
286 DEVMETHOD(device_resume, emx_resume),
d3c9c58e 287 DEVMETHOD_END
5330213c
SZ
288};
289
290static driver_t emx_driver = {
291 "emx",
292 emx_methods,
293 sizeof(struct emx_softc),
294};
295
296static devclass_t emx_devclass;
297
298DECLARE_DUMMY_MODULE(if_emx);
299MODULE_DEPEND(emx, ig_hal, 1, 1, 1);
aa2b9d05 300DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL);
5330213c
SZ
301
302/*
303 * Tunables
304 */
305static int emx_int_throttle_ceil = EMX_DEFAULT_ITR;
306static int emx_rxd = EMX_DEFAULT_RXD;
307static int emx_txd = EMX_DEFAULT_TXD;
704b6287 308static int emx_smart_pwr_down = 0;
724cbff8 309static int emx_rxr = 0;
d84018e9 310static int emx_txr = 1;
5330213c
SZ
311
312/* Controls whether promiscuous also shows bad packets */
b4d8c36b 313static int emx_debug_sbp = 0;
5330213c 314
704b6287
SZ
315static int emx_82573_workaround = 1;
316static int emx_msi_enable = 1;
5330213c 317
81ac62f7 318static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE;
212c030e 319
5330213c
SZ
320TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil);
321TUNABLE_INT("hw.emx.rxd", &emx_rxd);
724cbff8 322TUNABLE_INT("hw.emx.rxr", &emx_rxr);
5330213c 323TUNABLE_INT("hw.emx.txd", &emx_txd);
d84018e9 324TUNABLE_INT("hw.emx.txr", &emx_txr);
5330213c
SZ
325TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down);
326TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp);
327TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround);
704b6287 328TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable);
212c030e 329TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl));
5330213c
SZ
330
331/* Global used in WOL setup with multiport cards */
332static int emx_global_quad_port_a = 0;
333
334/* Set this to one to display debug statistics */
335static int emx_display_debug_stats = 0;
336
337#if !defined(KTR_IF_EMX)
338#define KTR_IF_EMX KTR_ALL
339#endif
340KTR_INFO_MASTER(if_emx);
5bf48697
AE
341KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin");
342KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end");
343KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet");
344KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet");
345KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean");
5330213c
SZ
346#define logif(name) KTR_LOG(if_emx_ ## name)
347
235b9d30
SZ
348static __inline void
349emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf)
350{
351 rxd->rxd_bufaddr = htole64(rxbuf->paddr);
3f939c23 352 /* DD bit must be cleared */
235b9d30
SZ
353 rxd->rxd_staterr = 0;
354}
355
356static __inline void
357emx_rxcsum(uint32_t staterr, struct mbuf *mp)
358{
359 /* Ignore Checksum bit is set */
360 if (staterr & E1000_RXD_STAT_IXSM)
361 return;
362
363 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
364 E1000_RXD_STAT_IPCS)
365 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
366
367 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
368 E1000_RXD_STAT_TCPCS) {
369 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
370 CSUM_PSEUDO_HDR |
371 CSUM_FRAG_NOT_CHECKED;
372 mp->m_pkthdr.csum_data = htons(0xffff);
373 }
374}
375
9cc86e17
SZ
376static __inline struct pktinfo *
377emx_rssinfo(struct mbuf *m, struct pktinfo *pi,
378 uint32_t mrq, uint32_t hash, uint32_t staterr)
379{
380 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) {
381 case EMX_RXDMRQ_IPV4_TCP:
382 pi->pi_netisr = NETISR_IP;
383 pi->pi_flags = 0;
384 pi->pi_l3proto = IPPROTO_TCP;
385 break;
386
387 case EMX_RXDMRQ_IPV6_TCP:
388 pi->pi_netisr = NETISR_IPV6;
389 pi->pi_flags = 0;
390 pi->pi_l3proto = IPPROTO_TCP;
391 break;
392
393 case EMX_RXDMRQ_IPV4:
394 if (staterr & E1000_RXD_STAT_IXSM)
395 return NULL;
396
397 if ((staterr &
398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
399 E1000_RXD_STAT_TCPCS) {
400 pi->pi_netisr = NETISR_IP;
401 pi->pi_flags = 0;
402 pi->pi_l3proto = IPPROTO_UDP;
403 break;
404 }
405 /* FALL THROUGH */
406 default:
407 return NULL;
408 }
409
7558541b 410 m_sethash(m, toeplitz_hash(hash));
9cc86e17
SZ
411 return pi;
412}
413
5330213c
SZ
414static int
415emx_probe(device_t dev)
416{
417 const struct emx_device *d;
418 uint16_t vid, did;
419
420 vid = pci_get_vendor(dev);
421 did = pci_get_device(dev);
422
423 for (d = emx_devices; d->desc != NULL; ++d) {
424 if (vid == d->vid && did == d->did) {
425 device_set_desc(dev, d->desc);
426 device_set_async_attach(dev, TRUE);
427 return 0;
428 }
429 }
430 return ENXIO;
431}
432
433static int
434emx_attach(device_t dev)
435{
436 struct emx_softc *sc = device_get_softc(dev);
d84018e9 437 int error = 0, i, throttle, msi_enable, tx_ring_max;
704b6287 438 u_int intr_flags;
2d0e5700 439 uint16_t eeprom_data, device_id, apme_mask;
4cb541ae 440 driver_intr_t *intr_func;
81ac62f7 441 char flowctrl[IFM_ETH_FC_STRLEN];
09f49d52
SZ
442#ifdef IFPOLL_ENABLE
443 int offset, offset_def;
444#endif
5330213c 445
9f831fa8
SZ
446 /*
447 * Setup RX rings
448 */
449 for (i = 0; i < EMX_NRX_RING; ++i) {
450 sc->rx_data[i].sc = sc;
451 sc->rx_data[i].idx = i;
452 }
453
ec1c60bb
SZ
454 /*
455 * Setup TX ring
456 */
d84018e9
SZ
457 for (i = 0; i < EMX_NTX_RING; ++i) {
458 sc->tx_data[i].sc = sc;
459 sc->tx_data[i].idx = i;
460 }
ec1c60bb 461
167d2eae
SZ
462 /*
463 * Initialize serializers
464 */
6d435846 465 lwkt_serialize_init(&sc->main_serialize);
d84018e9
SZ
466 for (i = 0; i < EMX_NTX_RING; ++i)
467 lwkt_serialize_init(&sc->tx_data[i].tx_serialize);
6d435846
SZ
468 for (i = 0; i < EMX_NRX_RING; ++i)
469 lwkt_serialize_init(&sc->rx_data[i].rx_serialize);
470
167d2eae
SZ
471 /*
472 * Initialize serializer array
473 */
6d435846 474 i = 0;
06421337
SZ
475
476 KKASSERT(i < EMX_NSERIALIZE);
6d435846 477 sc->serializes[i++] = &sc->main_serialize;
167d2eae 478
06421337 479 KKASSERT(i < EMX_NSERIALIZE);
d84018e9 480 sc->serializes[i++] = &sc->tx_data[0].tx_serialize;
06421337 481 KKASSERT(i < EMX_NSERIALIZE);
d84018e9 482 sc->serializes[i++] = &sc->tx_data[1].tx_serialize;
167d2eae 483
06421337 484 KKASSERT(i < EMX_NSERIALIZE);
6d435846 485 sc->serializes[i++] = &sc->rx_data[0].rx_serialize;
06421337 486 KKASSERT(i < EMX_NSERIALIZE);
6d435846 487 sc->serializes[i++] = &sc->rx_data[1].rx_serialize;
06421337 488
6d435846
SZ
489 KKASSERT(i == EMX_NSERIALIZE);
490
81ac62f7
SZ
491 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK,
492 emx_media_change, emx_media_status);
c2022416 493 callout_init_mp(&sc->timer);
5330213c
SZ
494
495 sc->dev = sc->osdep.dev = dev;
496
497 /*
498 * Determine hardware and mac type
499 */
500 sc->hw.vendor_id = pci_get_vendor(dev);
501 sc->hw.device_id = pci_get_device(dev);
502 sc->hw.revision_id = pci_get_revid(dev);
503 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev);
504 sc->hw.subsystem_device_id = pci_get_subdevice(dev);
505
506 if (e1000_set_mac_type(&sc->hw))
507 return ENXIO;
508
509 /* Enable bus mastering */
510 pci_enable_busmaster(dev);
511
512 /*
513 * Allocate IO memory
514 */
515 sc->memory_rid = EMX_BAR_MEM;
516 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
517 &sc->memory_rid, RF_ACTIVE);
518 if (sc->memory == NULL) {
519 device_printf(dev, "Unable to allocate bus resource: memory\n");
520 error = ENXIO;
521 goto fail;
522 }
523 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
524 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
525
526 /* XXX This is quite goofy, it is not actually used */
527 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
528
d01335e8 529 /*
a835687d
SZ
530 * Don't enable MSI-X on 82574, see:
531 * 82574 specification update errata #15
532 *
d01335e8 533 * Don't enable MSI on 82571/82572, see:
a835687d 534 * 82571/82572 specification update errata #63
d01335e8
SZ
535 */
536 msi_enable = emx_msi_enable;
537 if (msi_enable &&
538 (sc->hw.mac.type == e1000_82571 ||
539 sc->hw.mac.type == e1000_82572))
540 msi_enable = 0;
120fda08 541again:
5330213c
SZ
542 /*
543 * Allocate interrupt
544 */
d01335e8 545 sc->intr_type = pci_alloc_1intr(dev, msi_enable,
7fb43956 546 &sc->intr_rid, &intr_flags);
704b6287 547
4cb541ae
SZ
548 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
549 int unshared;
550
551 unshared = device_getenv_int(dev, "irq.unshared", 0);
552 if (!unshared) {
553 sc->flags |= EMX_FLAG_SHARED_INTR;
554 if (bootverbose)
555 device_printf(dev, "IRQ shared\n");
556 } else {
557 intr_flags &= ~RF_SHAREABLE;
558 if (bootverbose)
559 device_printf(dev, "IRQ unshared\n");
560 }
561 }
562
5330213c 563 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
704b6287 564 intr_flags);
5330213c 565 if (sc->intr_res == NULL) {
120fda08
SZ
566 device_printf(dev, "Unable to allocate bus resource: %s\n",
567 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr");
568 if (!msi_enable) {
569 /* Retry with MSI. */
570 msi_enable = 1;
571 sc->flags &= ~EMX_FLAG_SHARED_INTR;
572 goto again;
573 }
5330213c
SZ
574 error = ENXIO;
575 goto fail;
576 }
577
578 /* Save PCI command register for Shared Code */
579 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
580 sc->hw.back = &sc->osdep;
581
a5807b81
SZ
582 /*
583 * For I217/I218, we need to map the flash memory and this
584 * must happen after the MAC is identified.
585 */
586 if (sc->hw.mac.type == e1000_pch_lpt) {
587 sc->flash_rid = EMX_BAR_FLASH;
588
589 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
590 &sc->flash_rid, RF_ACTIVE);
591 if (sc->flash == NULL) {
592 device_printf(dev, "Mapping of Flash failed\n");
593 error = ENXIO;
594 goto fail;
595 }
596 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash);
597 sc->osdep.flash_bus_space_handle =
598 rman_get_bushandle(sc->flash);
599
600 /*
601 * This is used in the shared code
602 * XXX this goof is actually not used.
603 */
604 sc->hw.flash_address = (uint8_t *)sc->flash;
605 }
606
5330213c
SZ
607 /* Do Shared Code initialization */
608 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
609 device_printf(dev, "Setup of Shared code failed\n");
610 error = ENXIO;
611 goto fail;
612 }
613 e1000_get_bus_info(&sc->hw);
614
5330213c
SZ
615 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
616 sc->hw.phy.autoneg_wait_to_complete = FALSE;
617 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
5330213c
SZ
618
619 /*
620 * Interrupt throttle rate
621 */
b4d8c36b
SZ
622 throttle = device_getenv_int(dev, "int_throttle_ceil",
623 emx_int_throttle_ceil);
624 if (throttle == 0) {
5330213c
SZ
625 sc->int_throttle_ceil = 0;
626 } else {
5330213c
SZ
627 if (throttle < 0)
628 throttle = EMX_DEFAULT_ITR;
629
630 /* Recalculate the tunable value to get the exact frequency. */
631 throttle = 1000000000 / 256 / throttle;
632
633 /* Upper 16bits of ITR is reserved and should be zero */
634 if (throttle & 0xffff0000)
635 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR;
636
637 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
638 }
639
640 e1000_init_script_state_82541(&sc->hw, TRUE);
641 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE);
642
643 /* Copper options */
644 if (sc->hw.phy.media_type == e1000_media_type_copper) {
645 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES;
646 sc->hw.phy.disable_polarity_correction = FALSE;
647 sc->hw.phy.ms_type = EMX_MASTER_SLAVE;
648 }
649
650 /* Set the frame limits assuming standard ethernet sized frames. */
a5807b81 651 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
5330213c
SZ
652
653 /* This controls when hardware reports transmit completion status. */
654 sc->hw.mac.report_tx_early = 1;
655
65c7a6af 656 /* Calculate # of RX rings */
724cbff8 657 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr);
a317449e 658 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING);
65c7a6af 659
d84018e9
SZ
660 /*
661 * Calculate # of TX rings
662 *
a5807b81
SZ
663 * XXX
664 * I217/I218 claims to have 2 TX queues
665 *
d84018e9
SZ
666 * NOTE:
667 * Don't enable multiple TX queues on 82574; it always gives
aff530c4
SZ
668 * watchdog timeout on TX queue0, when multiple TCP streams are
669 * received. It was originally suspected that the hardware TX
670 * checksum offloading caused this watchdog timeout, since only
671 * TCP ACKs are sent during TCP receiving tests. However, even
672 * if the hardware TX checksum offloading is disable, TX queue0
673 * still will give watchdog.
d84018e9
SZ
674 */
675 tx_ring_max = 1;
676 if (sc->hw.mac.type == e1000_82571 ||
677 sc->hw.mac.type == e1000_82572 ||
da83e939 678 sc->hw.mac.type == e1000_80003es2lan ||
57f26b35 679 sc->hw.mac.type == e1000_pch_lpt ||
524ce499 680 sc->hw.mac.type == e1000_pch_spt ||
57f26b35 681 sc->hw.mac.type == e1000_82574)
d84018e9
SZ
682 tx_ring_max = EMX_NTX_RING;
683 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr);
684 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max);
685
071699f8
SZ
686 /* Allocate RX/TX rings' busdma(9) stuffs */
687 error = emx_dma_alloc(sc);
688 if (error)
e5b3bcc4 689 goto fail;
e5b3bcc4 690
2d0e5700
SZ
691 /* Allocate multicast array memory. */
692 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX,
693 M_DEVBUF, M_WAITOK);
694
695 /* Indicate SOL/IDER usage */
696 if (e1000_check_reset_block(&sc->hw)) {
697 device_printf(dev,
698 "PHY reset is blocked due to SOL/IDER session.\n");
699 }
700
a5807b81
SZ
701 /* Disable EEE on I217/I218 */
702 sc->hw.dev_spec.ich8lan.eee_disable = 1;
703
2d0e5700
SZ
704 /*
705 * Start from a known state, this is important in reading the
706 * nvm and mac from that.
707 */
708 e1000_reset_hw(&sc->hw);
709
5330213c
SZ
710 /* Make sure we have a good EEPROM before we read from it */
711 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
712 /*
713 * Some PCI-E parts fail the first check due to
714 * the link being in sleep state, call it again,
715 * if it fails a second time its a real issue.
716 */
717 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
718 device_printf(dev,
719 "The EEPROM Checksum Is Not Valid\n");
720 error = EIO;
721 goto fail;
722 }
723 }
724
5330213c
SZ
725 /* Copy the permanent MAC address out of the EEPROM */
726 if (e1000_read_mac_addr(&sc->hw) < 0) {
727 device_printf(dev, "EEPROM read error while reading MAC"
728 " address\n");
729 error = EIO;
730 goto fail;
731 }
732 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) {
733 device_printf(dev, "Invalid MAC address\n");
734 error = EIO;
735 goto fail;
736 }
737
4765c386
MN
738 /* Disable ULP support */
739 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE);
740
5330213c 741 /* Determine if we have to control management hardware */
de0836d4
SZ
742 if (e1000_enable_mng_pass_thru(&sc->hw))
743 sc->flags |= EMX_FLAG_HAS_MGMT;
5330213c
SZ
744
745 /*
746 * Setup Wake-on-Lan
747 */
2d0e5700
SZ
748 apme_mask = EMX_EEPROM_APME;
749 eeprom_data = 0;
5330213c 750 switch (sc->hw.mac.type) {
2d0e5700 751 case e1000_82573:
de0836d4 752 sc->flags |= EMX_FLAG_HAS_AMT;
2d0e5700
SZ
753 /* FALL THROUGH */
754
5330213c 755 case e1000_82571:
2d0e5700 756 case e1000_82572:
5330213c
SZ
757 case e1000_80003es2lan:
758 if (sc->hw.bus.func == 1) {
759 e1000_read_nvm(&sc->hw,
760 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
761 } else {
762 e1000_read_nvm(&sc->hw,
763 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
764 }
5330213c
SZ
765 break;
766
767 default:
2d0e5700
SZ
768 e1000_read_nvm(&sc->hw,
769 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
5330213c
SZ
770 break;
771 }
2d0e5700
SZ
772 if (eeprom_data & apme_mask)
773 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
774
5330213c
SZ
775 /*
776 * We have the eeprom settings, now apply the special cases
777 * where the eeprom may be wrong or the board won't support
778 * wake on lan on a particular port
779 */
780 device_id = pci_get_device(dev);
781 switch (device_id) {
782 case E1000_DEV_ID_82571EB_FIBER:
783 /*
784 * Wake events only supported on port A for dual fiber
785 * regardless of eeprom setting
786 */
787 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
788 E1000_STATUS_FUNC_1)
789 sc->wol = 0;
790 break;
791
792 case E1000_DEV_ID_82571EB_QUAD_COPPER:
793 case E1000_DEV_ID_82571EB_QUAD_FIBER:
794 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
795 /* if quad port sc, disable WoL on all but port A */
796 if (emx_global_quad_port_a != 0)
797 sc->wol = 0;
798 /* Reset for multiple quad port adapters */
799 if (++emx_global_quad_port_a == 4)
800 emx_global_quad_port_a = 0;
801 break;
802 }
803
804 /* XXX disable wol */
805 sc->wol = 0;
806
09f49d52
SZ
807#ifdef IFPOLL_ENABLE
808 /*
809 * NPOLLING RX CPU offset
810 */
811 if (sc->rx_ring_cnt == ncpus2) {
812 offset = 0;
813 } else {
814 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
815 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
816 if (offset >= ncpus2 ||
817 offset % sc->rx_ring_cnt != 0) {
818 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
819 offset, offset_def);
820 offset = offset_def;
821 }
822 }
823 sc->rx_npoll_off = offset;
824
825 /*
826 * NPOLLING TX CPU offset
827 */
d84018e9
SZ
828 if (sc->tx_ring_cnt == ncpus2) {
829 offset = 0;
830 } else {
831 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2;
832 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
833 if (offset >= ncpus2 ||
834 offset % sc->tx_ring_cnt != 0) {
835 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
836 offset, offset_def);
837 offset = offset_def;
838 }
09f49d52
SZ
839 }
840 sc->tx_npoll_off = offset;
841#endif
dce0b08a 842 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE);
09f49d52 843
2ed95bba
SZ
844 /* Setup flow control. */
845 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl),
846 emx_flowctrl);
81ac62f7 847 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl);
212c030e 848
2d0e5700
SZ
849 /* Setup OS specific network interface */
850 emx_setup_ifp(sc);
851
852 /* Add sysctl tree, must after em_setup_ifp() */
853 emx_add_sysctl(sc);
854
855 /* Reset the hardware */
856 error = emx_reset(sc);
857 if (error) {
bacca38f
SZ
858 /*
859 * Some 82573 parts fail the first reset, call it again,
860 * if it fails a second time its a real issue.
861 */
862 error = emx_reset(sc);
863 if (error) {
864 device_printf(dev, "Unable to reset the hardware\n");
865 ether_ifdetach(&sc->arpcom.ac_if);
866 goto fail;
867 }
2d0e5700
SZ
868 }
869
870 /* Initialize statistics */
871 emx_update_stats(sc);
872
873 sc->hw.mac.get_link_status = 1;
874 emx_update_link_status(sc);
875
2d0e5700 876 /* Non-AMT based hardware can now take control from firmware */
de0836d4
SZ
877 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
878 EMX_FLAG_HAS_MGMT)
2d0e5700
SZ
879 emx_get_hw_control(sc);
880
4cb541ae
SZ
881 /*
882 * Missing Interrupt Following ICR read:
883 *
a835687d
SZ
884 * 82571/82572 specification update errata #76
885 * 82573 specification update errata #31
886 * 82574 specification update errata #12
4cb541ae
SZ
887 */
888 intr_func = emx_intr;
889 if ((sc->flags & EMX_FLAG_SHARED_INTR) &&
890 (sc->hw.mac.type == e1000_82571 ||
891 sc->hw.mac.type == e1000_82572 ||
892 sc->hw.mac.type == e1000_82573 ||
893 sc->hw.mac.type == e1000_82574))
894 intr_func = emx_intr_mask;
895
896 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc,
6d435846 897 &sc->intr_tag, &sc->main_serialize);
5330213c
SZ
898 if (error) {
899 device_printf(dev, "Failed to register interrupt handler");
900 ether_ifdetach(&sc->arpcom.ac_if);
901 goto fail;
902 }
5330213c
SZ
903 return (0);
904fail:
905 emx_detach(dev);
906 return (error);
907}
908
909static int
910emx_detach(device_t dev)
911{
912 struct emx_softc *sc = device_get_softc(dev);
913
914 if (device_is_attached(dev)) {
915 struct ifnet *ifp = &sc->arpcom.ac_if;
916
6d435846 917 ifnet_serialize_all(ifp);
5330213c
SZ
918
919 emx_stop(sc);
920
921 e1000_phy_hw_reset(&sc->hw);
922
923 emx_rel_mgmt(sc);
2d0e5700 924 emx_rel_hw_control(sc);
5330213c
SZ
925
926 if (sc->wol) {
927 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
928 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
929 emx_enable_wol(dev);
930 }
931
932 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
933
6d435846 934 ifnet_deserialize_all(ifp);
5330213c
SZ
935
936 ether_ifdetach(ifp);
a19a8754 937 } else if (sc->memory != NULL) {
2d0e5700 938 emx_rel_hw_control(sc);
5330213c 939 }
d2811227
SZ
940
941 ifmedia_removeall(&sc->media);
5330213c
SZ
942 bus_generic_detach(dev);
943
944 if (sc->intr_res != NULL) {
945 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
946 sc->intr_res);
947 }
948
7fb43956 949 if (sc->intr_type == PCI_INTR_TYPE_MSI)
704b6287
SZ
950 pci_release_msi(dev);
951
5330213c
SZ
952 if (sc->memory != NULL) {
953 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid,
954 sc->memory);
955 }
956
a5807b81
SZ
957 if (sc->flash != NULL) {
958 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid,
959 sc->flash);
960 }
961
071699f8 962 emx_dma_free(sc);
5330213c 963
a19a8754
SZ
964 if (sc->mta != NULL)
965 kfree(sc->mta, M_DEVBUF);
966
5330213c
SZ
967 return (0);
968}
969
970static int
971emx_shutdown(device_t dev)
972{
973 return emx_suspend(dev);
974}
975
976static int
977emx_suspend(device_t dev)
978{
979 struct emx_softc *sc = device_get_softc(dev);
980 struct ifnet *ifp = &sc->arpcom.ac_if;
981
6d435846 982 ifnet_serialize_all(ifp);
5330213c
SZ
983
984 emx_stop(sc);
985
986 emx_rel_mgmt(sc);
2d0e5700 987 emx_rel_hw_control(sc);
5330213c 988
2d0e5700 989 if (sc->wol) {
5330213c
SZ
990 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
991 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
992 emx_enable_wol(dev);
2d0e5700 993 }
5330213c 994
6d435846 995 ifnet_deserialize_all(ifp);
5330213c
SZ
996
997 return bus_generic_suspend(dev);
998}
999
1000static int
1001emx_resume(device_t dev)
1002{
1003 struct emx_softc *sc = device_get_softc(dev);
1004 struct ifnet *ifp = &sc->arpcom.ac_if;
d84018e9 1005 int i;
5330213c 1006
6d435846 1007 ifnet_serialize_all(ifp);
5330213c
SZ
1008
1009 emx_init(sc);
1010 emx_get_mgmt(sc);
d84018e9
SZ
1011 for (i = 0; i < sc->tx_ring_inuse; ++i)
1012 ifsq_devstart_sched(sc->tx_data[i].ifsq);
5330213c 1013
6d435846 1014 ifnet_deserialize_all(ifp);
5330213c
SZ
1015
1016 return bus_generic_resume(dev);
1017}
1018
1019static void
f0a26983 1020emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
5330213c
SZ
1021{
1022 struct emx_softc *sc = ifp->if_softc;
d84018e9 1023 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
5330213c 1024 struct mbuf *m_head;
7f32a9b0 1025 int idx = -1, nsegs = 0;
5330213c 1026
d84018e9
SZ
1027 KKASSERT(tdata->ifsq == ifsq);
1028 ASSERT_SERIALIZED(&tdata->tx_serialize);
5330213c 1029
d84018e9 1030 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5330213c
SZ
1031 return;
1032
d84018e9
SZ
1033 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) {
1034 ifsq_purge(ifsq);
5330213c
SZ
1035 return;
1036 }
1037
d84018e9 1038 while (!ifsq_is_empty(ifsq)) {
5330213c 1039 /* Now do we at least have a minimal? */
ec1c60bb
SZ
1040 if (EMX_IS_OACTIVE(tdata)) {
1041 emx_tx_collect(tdata);
1042 if (EMX_IS_OACTIVE(tdata)) {
d84018e9 1043 ifsq_set_oactive(ifsq);
5330213c
SZ
1044 break;
1045 }
1046 }
1047
1048 logif(pkt_txqueue);
ac9843a1 1049 m_head = ifsq_dequeue(ifsq);
5330213c
SZ
1050 if (m_head == NULL)
1051 break;
1052
7f32a9b0 1053 if (emx_encap(tdata, &m_head, &nsegs, &idx)) {
d40991ef 1054 IFNET_STAT_INC(ifp, oerrors, 1);
ec1c60bb 1055 emx_tx_collect(tdata);
5330213c
SZ
1056 continue;
1057 }
1058
608dda76
SZ
1059 /*
1060 * TX interrupt are aggressively aggregated, so increasing
1061 * opackets at TX interrupt time will make the opackets
1062 * statistics vastly inaccurate; we do the opackets increment
1063 * now.
1064 */
1065 IFNET_STAT_INC(ifp, opackets, 1);
1066
7f32a9b0 1067 if (nsegs >= tdata->tx_wreg_nsegs) {
d84018e9 1068 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
7f32a9b0
SZ
1069 nsegs = 0;
1070 idx = -1;
1071 }
1072
5330213c
SZ
1073 /* Send a copy of the frame to the BPF listener */
1074 ETHER_BPF_MTAP(ifp, m_head);
1075
1076 /* Set timeout in case hardware has problems transmitting. */
d84018e9 1077 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
5330213c 1078 }
7f32a9b0 1079 if (idx >= 0)
d84018e9 1080 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
5330213c
SZ
1081}
1082
1083static int
1084emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1085{
1086 struct emx_softc *sc = ifp->if_softc;
1087 struct ifreq *ifr = (struct ifreq *)data;
1088 uint16_t eeprom_data = 0;
1089 int max_frame_size, mask, reinit;
1090 int error = 0;
1091
2c9effcf 1092 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1093
1094 switch (command) {
1095 case SIOCSIFMTU:
1096 switch (sc->hw.mac.type) {
1097 case e1000_82573:
1098 /*
1099 * 82573 only supports jumbo frames
1100 * if ASPM is disabled.
1101 */
1102 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1,
1103 &eeprom_data);
1104 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1105 max_frame_size = ETHER_MAX_LEN;
1106 break;
1107 }
1108 /* FALL THROUGH */
1109
1110 /* Limit Jumbo Frame size */
1111 case e1000_82571:
1112 case e1000_82572:
1113 case e1000_82574:
a5807b81 1114 case e1000_pch_lpt:
524ce499 1115 case e1000_pch_spt:
5330213c
SZ
1116 case e1000_80003es2lan:
1117 max_frame_size = 9234;
1118 break;
1119
1120 default:
1121 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1122 break;
1123 }
1124 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1125 ETHER_CRC_LEN) {
1126 error = EINVAL;
1127 break;
1128 }
1129
1130 ifp->if_mtu = ifr->ifr_mtu;
a5807b81
SZ
1131 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
1132 ETHER_CRC_LEN;
5330213c
SZ
1133
1134 if (ifp->if_flags & IFF_RUNNING)
1135 emx_init(sc);
1136 break;
1137
1138 case SIOCSIFFLAGS:
1139 if (ifp->if_flags & IFF_UP) {
1140 if ((ifp->if_flags & IFF_RUNNING)) {
1141 if ((ifp->if_flags ^ sc->if_flags) &
1142 (IFF_PROMISC | IFF_ALLMULTI)) {
1143 emx_disable_promisc(sc);
1144 emx_set_promisc(sc);
1145 }
1146 } else {
1147 emx_init(sc);
1148 }
1149 } else if (ifp->if_flags & IFF_RUNNING) {
1150 emx_stop(sc);
1151 }
1152 sc->if_flags = ifp->if_flags;
1153 break;
1154
1155 case SIOCADDMULTI:
1156 case SIOCDELMULTI:
1157 if (ifp->if_flags & IFF_RUNNING) {
1158 emx_disable_intr(sc);
1159 emx_set_multi(sc);
b3a7093f
SZ
1160#ifdef IFPOLL_ENABLE
1161 if (!(ifp->if_flags & IFF_NPOLLING))
5330213c
SZ
1162#endif
1163 emx_enable_intr(sc);
1164 }
1165 break;
1166
1167 case SIOCSIFMEDIA:
1168 /* Check SOL/IDER usage */
1169 if (e1000_check_reset_block(&sc->hw)) {
1170 device_printf(sc->dev, "Media change is"
1171 " blocked due to SOL/IDER session.\n");
1172 break;
1173 }
1174 /* FALL THROUGH */
1175
1176 case SIOCGIFMEDIA:
1177 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1178 break;
1179
1180 case SIOCSIFCAP:
1181 reinit = 0;
1182 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3eb0ea09
SZ
1183 if (mask & IFCAP_RXCSUM) {
1184 ifp->if_capenable ^= IFCAP_RXCSUM;
5330213c
SZ
1185 reinit = 1;
1186 }
1187 if (mask & IFCAP_VLAN_HWTAGGING) {
1188 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1189 reinit = 1;
1190 }
3eb0ea09
SZ
1191 if (mask & IFCAP_TXCSUM) {
1192 ifp->if_capenable ^= IFCAP_TXCSUM;
1193 if (ifp->if_capenable & IFCAP_TXCSUM)
1194 ifp->if_hwassist |= EMX_CSUM_FEATURES;
1195 else
1196 ifp->if_hwassist &= ~EMX_CSUM_FEATURES;
1197 }
1198 if (mask & IFCAP_TSO) {
1199 ifp->if_capenable ^= IFCAP_TSO;
1200 if (ifp->if_capenable & IFCAP_TSO)
1201 ifp->if_hwassist |= CSUM_TSO;
1202 else
1203 ifp->if_hwassist &= ~CSUM_TSO;
1204 }
13890b61 1205 if (mask & IFCAP_RSS)
8434a83b 1206 ifp->if_capenable ^= IFCAP_RSS;
5330213c
SZ
1207 if (reinit && (ifp->if_flags & IFF_RUNNING))
1208 emx_init(sc);
1209 break;
1210
1211 default:
1212 error = ether_ioctl(ifp, command, data);
1213 break;
1214 }
1215 return (error);
1216}
1217
1218static void
d84018e9 1219emx_watchdog(struct ifaltq_subque *ifsq)
5330213c 1220{
d84018e9
SZ
1221 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1222 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5330213c 1223 struct emx_softc *sc = ifp->if_softc;
d84018e9 1224 int i;
5330213c 1225
2c9effcf 1226 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1227
1228 /*
1229 * The timer is set to 5 every time start queues a packet.
1230 * Then txeof keeps resetting it as long as it cleans at
1231 * least one descriptor.
1232 * Finally, anytime all descriptors are clean the timer is
1233 * set to 0.
1234 */
1235
d84018e9
SZ
1236 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) ==
1237 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) {
5330213c
SZ
1238 /*
1239 * If we reach here, all TX jobs are completed and
1240 * the TX engine should have been idled for some time.
d84018e9 1241 * We don't need to call ifsq_devstart_sched() here.
5330213c 1242 */
d84018e9
SZ
1243 ifsq_clr_oactive(ifsq);
1244 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
1245 return;
1246 }
1247
1248 /*
1249 * If we are in this routine because of pause frames, then
1250 * don't reset the hardware.
1251 */
1252 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) {
d84018e9 1253 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT;
5330213c
SZ
1254 return;
1255 }
1256
d84018e9 1257 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx);
5330213c 1258
d40991ef 1259 IFNET_STAT_INC(ifp, oerrors, 1);
5330213c
SZ
1260
1261 emx_init(sc);
d84018e9
SZ
1262 for (i = 0; i < sc->tx_ring_inuse; ++i)
1263 ifsq_devstart_sched(sc->tx_data[i].ifsq);
5330213c
SZ
1264}
1265
1266static void
1267emx_init(void *xsc)
1268{
1269 struct emx_softc *sc = xsc;
1270 struct ifnet *ifp = &sc->arpcom.ac_if;
1271 device_t dev = sc->dev;
d84018e9 1272 boolean_t polling;
3f939c23 1273 int i;
5330213c 1274
2c9effcf 1275 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1276
1277 emx_stop(sc);
1278
5330213c
SZ
1279 /* Get the latest mac address, User can use a LAA */
1280 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
1281
1282 /* Put the address into the Receive Address Array */
1283 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1284
1285 /*
1286 * With the 82571 sc, RAR[0] may be overwritten
1287 * when the other port is reset, we make a duplicate
1288 * in RAR[14] for that eventuality, this assures
1289 * the interface continues to function.
1290 */
1291 if (sc->hw.mac.type == e1000_82571) {
1292 e1000_set_laa_state_82571(&sc->hw, TRUE);
1293 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1294 E1000_RAR_ENTRIES - 1);
1295 }
1296
1297 /* Initialize the hardware */
2d0e5700
SZ
1298 if (emx_reset(sc)) {
1299 device_printf(dev, "Unable to reset the hardware\n");
5330213c
SZ
1300 /* XXX emx_stop()? */
1301 return;
1302 }
1303 emx_update_link_status(sc);
1304
1305 /* Setup VLAN support, basic and offload if available */
1306 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1307
1308 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1309 uint32_t ctrl;
1310
1311 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
1312 ctrl |= E1000_CTRL_VME;
1313 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
1314 }
1315
5330213c
SZ
1316 /* Configure for OS presence */
1317 emx_get_mgmt(sc);
1318
d84018e9
SZ
1319 polling = FALSE;
1320#ifdef IFPOLL_ENABLE
1321 if (ifp->if_flags & IFF_NPOLLING)
1322 polling = TRUE;
1323#endif
1324 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling);
1325 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1);
1326
5330213c 1327 /* Prepare transmit descriptors and buffers */
d84018e9
SZ
1328 for (i = 0; i < sc->tx_ring_inuse; ++i)
1329 emx_init_tx_ring(&sc->tx_data[i]);
5330213c
SZ
1330 emx_init_tx_unit(sc);
1331
1332 /* Setup Multicast table */
1333 emx_set_multi(sc);
1334
1335 /* Prepare receive descriptors and buffers */
13890b61 1336 for (i = 0; i < sc->rx_ring_cnt; ++i) {
9f831fa8 1337 if (emx_init_rx_ring(&sc->rx_data[i])) {
3f939c23
SZ
1338 device_printf(dev,
1339 "Could not setup receive structures\n");
1340 emx_stop(sc);
1341 return;
1342 }
5330213c
SZ
1343 }
1344 emx_init_rx_unit(sc);
1345
1346 /* Don't lose promiscuous settings */
1347 emx_set_promisc(sc);
1348
1349 ifp->if_flags |= IFF_RUNNING;
d84018e9
SZ
1350 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1351 ifsq_clr_oactive(sc->tx_data[i].ifsq);
1352 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog);
1353 }
5330213c
SZ
1354
1355 callout_reset(&sc->timer, hz, emx_timer, sc);
1356 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1357
1358 /* MSI/X configuration for 82574 */
1359 if (sc->hw.mac.type == e1000_82574) {
1360 int tmp;
1361
1362 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1363 tmp |= E1000_CTRL_EXT_PBA_CLR;
1364 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1365 /*
2d0e5700 1366 * XXX MSIX
5330213c
SZ
1367 * Set the IVAR - interrupt vector routing.
1368 * Each nibble represents a vector, high bit
1369 * is enable, other 3 bits are the MSIX table
1370 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1371 * Link (other) to 2, hence the magic number.
1372 */
1373 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908);
1374 }
1375
5330213c
SZ
1376 /*
1377 * Only enable interrupts if we are not polling, make sure
1378 * they are off otherwise.
1379 */
d84018e9 1380 if (polling)
5330213c
SZ
1381 emx_disable_intr(sc);
1382 else
5330213c
SZ
1383 emx_enable_intr(sc);
1384
2d0e5700 1385 /* AMT based hardware can now take control from firmware */
de0836d4
SZ
1386 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
1387 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT))
2d0e5700 1388 emx_get_hw_control(sc);
5330213c
SZ
1389}
1390
5330213c
SZ
1391static void
1392emx_intr(void *xsc)
1393{
4cb541ae
SZ
1394 emx_intr_body(xsc, TRUE);
1395}
1396
1397static void
1398emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted)
1399{
5330213c
SZ
1400 struct ifnet *ifp = &sc->arpcom.ac_if;
1401 uint32_t reg_icr;
1402
1403 logif(intr_beg);
6d435846 1404 ASSERT_SERIALIZED(&sc->main_serialize);
5330213c
SZ
1405
1406 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1407
4cb541ae 1408 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) {
5330213c
SZ
1409 logif(intr_end);
1410 return;
1411 }
1412
1413 /*
1414 * XXX: some laptops trigger several spurious interrupts
df50f778 1415 * on emx(4) when in the resume cycle. The ICR register
5330213c
SZ
1416 * reports all-ones value in this case. Processing such
1417 * interrupts would lead to a freeze. I don't know why.
1418 */
1419 if (reg_icr == 0xffffffff) {
1420 logif(intr_end);
1421 return;
1422 }
1423
1424 if (ifp->if_flags & IFF_RUNNING) {
1425 if (reg_icr &
3f939c23
SZ
1426 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
1427 int i;
1428
13890b61 1429 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6d435846
SZ
1430 lwkt_serialize_enter(
1431 &sc->rx_data[i].rx_serialize);
9f831fa8 1432 emx_rxeof(&sc->rx_data[i], -1);
6d435846
SZ
1433 lwkt_serialize_exit(
1434 &sc->rx_data[i].rx_serialize);
1435 }
3f939c23 1436 }
6446af7b 1437 if (reg_icr & E1000_ICR_TXDW) {
d84018e9
SZ
1438 struct emx_txdata *tdata = &sc->tx_data[0];
1439
1440 lwkt_serialize_enter(&tdata->tx_serialize);
1441 emx_txeof(tdata);
1442 if (!ifsq_is_empty(tdata->ifsq))
1443 ifsq_devstart(tdata->ifsq);
1444 lwkt_serialize_exit(&tdata->tx_serialize);
5330213c
SZ
1445 }
1446 }
1447
1448 /* Link status change */
1449 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
bca7c435 1450 emx_serialize_skipmain(sc);
6d435846 1451
5330213c
SZ
1452 callout_stop(&sc->timer);
1453 sc->hw.mac.get_link_status = 1;
1454 emx_update_link_status(sc);
1455
1456 /* Deal with TX cruft when link lost */
1457 emx_tx_purge(sc);
1458
1459 callout_reset(&sc->timer, hz, emx_timer, sc);
6d435846 1460
bca7c435 1461 emx_deserialize_skipmain(sc);
5330213c
SZ
1462 }
1463
1464 if (reg_icr & E1000_ICR_RXO)
1465 sc->rx_overruns++;
1466
1467 logif(intr_end);
1468}
1469
4cb541ae
SZ
1470static void
1471emx_intr_mask(void *xsc)
1472{
1473 struct emx_softc *sc = xsc;
1474
1475 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
1476 /*
1477 * NOTE:
1478 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1479 * so don't check it.
1480 */
1481 emx_intr_body(sc, FALSE);
1482 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
1483}
1484
5330213c
SZ
1485static void
1486emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1487{
1488 struct emx_softc *sc = ifp->if_softc;
1489
2c9effcf 1490 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1491
1492 emx_update_link_status(sc);
1493
1494 ifmr->ifm_status = IFM_AVALID;
1495 ifmr->ifm_active = IFM_ETHER;
1496
81ac62f7 1497 if (!sc->link_active) {
05297aca
SZ
1498 if (sc->hw.mac.autoneg)
1499 ifmr->ifm_active |= IFM_NONE;
1500 else
1501 ifmr->ifm_active |= sc->media.ifm_media;
5330213c 1502 return;
81ac62f7 1503 }
5330213c
SZ
1504
1505 ifmr->ifm_status |= IFM_ACTIVE;
81ac62f7 1506 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
05297aca 1507 ifmr->ifm_active |= sc->ifm_flowctrl;
5330213c
SZ
1508
1509 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1510 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1511 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1512 } else {
1513 switch (sc->link_speed) {
1514 case 10:
1515 ifmr->ifm_active |= IFM_10_T;
1516 break;
1517 case 100:
1518 ifmr->ifm_active |= IFM_100_TX;
1519 break;
1520
1521 case 1000:
1522 ifmr->ifm_active |= IFM_1000_T;
1523 break;
1524 }
1525 if (sc->link_duplex == FULL_DUPLEX)
1526 ifmr->ifm_active |= IFM_FDX;
1527 else
1528 ifmr->ifm_active |= IFM_HDX;
1529 }
81ac62f7
SZ
1530 if (ifmr->ifm_active & IFM_FDX)
1531 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode);
5330213c
SZ
1532}
1533
1534static int
1535emx_media_change(struct ifnet *ifp)
1536{
1537 struct emx_softc *sc = ifp->if_softc;
1538 struct ifmedia *ifm = &sc->media;
1539
2c9effcf 1540 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1541
1542 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1543 return (EINVAL);
1544
1545 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1546 case IFM_AUTO:
1547 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1548 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
1549 break;
1550
5330213c
SZ
1551 case IFM_1000_SX:
1552 case IFM_1000_T:
1553 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1554 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1555 break;
1556
1557 case IFM_100_TX:
81ac62f7 1558 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
5330213c 1559 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
81ac62f7
SZ
1560 } else {
1561 if (IFM_OPTIONS(ifm->ifm_media) &
1562 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1563 if (bootverbose) {
1564 if_printf(ifp, "Flow control is not "
1565 "allowed for half-duplex\n");
1566 }
1567 return EINVAL;
1568 }
5330213c 1569 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
81ac62f7
SZ
1570 }
1571 sc->hw.mac.autoneg = FALSE;
1572 sc->hw.phy.autoneg_advertised = 0;
5330213c
SZ
1573 break;
1574
1575 case IFM_10_T:
81ac62f7 1576 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
5330213c 1577 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
81ac62f7
SZ
1578 } else {
1579 if (IFM_OPTIONS(ifm->ifm_media) &
1580 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1581 if (bootverbose) {
1582 if_printf(ifp, "Flow control is not "
1583 "allowed for half-duplex\n");
1584 }
1585 return EINVAL;
1586 }
5330213c 1587 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
81ac62f7
SZ
1588 }
1589 sc->hw.mac.autoneg = FALSE;
1590 sc->hw.phy.autoneg_advertised = 0;
5330213c
SZ
1591 break;
1592
1593 default:
81ac62f7
SZ
1594 if (bootverbose) {
1595 if_printf(ifp, "Unsupported media type %d\n",
1596 IFM_SUBTYPE(ifm->ifm_media));
1597 }
1598 return EINVAL;
5330213c 1599 }
81ac62f7 1600 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK;
5330213c 1601
81ac62f7
SZ
1602 if (ifp->if_flags & IFF_RUNNING)
1603 emx_init(sc);
5330213c
SZ
1604
1605 return (0);
1606}
1607
1608static int
7f32a9b0
SZ
1609emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp,
1610 int *segs_used, int *idx)
5330213c
SZ
1611{
1612 bus_dma_segment_t segs[EMX_MAX_SCATTER];
1613 bus_dmamap_t map;
323e5ecd 1614 struct emx_txbuf *tx_buffer, *tx_buffer_mapped;
5330213c
SZ
1615 struct e1000_tx_desc *ctxd = NULL;
1616 struct mbuf *m_head = *m_headp;
1617 uint32_t txd_upper, txd_lower, cmd = 0;
1618 int maxsegs, nsegs, i, j, first, last = 0, error;
1619
3eb0ea09 1620 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
ec1c60bb 1621 error = emx_tso_pullup(tdata, m_headp);
3eb0ea09
SZ
1622 if (error)
1623 return error;
1624 m_head = *m_headp;
1625 }
1626
5330213c
SZ
1627 txd_upper = txd_lower = 0;
1628
1629 /*
1630 * Capture the first descriptor index, this descriptor
1631 * will have the index of the EOP which is the only one
1632 * that now gets a DONE bit writeback.
1633 */
ec1c60bb
SZ
1634 first = tdata->next_avail_tx_desc;
1635 tx_buffer = &tdata->tx_buf[first];
5330213c
SZ
1636 tx_buffer_mapped = tx_buffer;
1637 map = tx_buffer->map;
1638
ec1c60bb
SZ
1639 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED;
1640 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc"));
5330213c
SZ
1641 if (maxsegs > EMX_MAX_SCATTER)
1642 maxsegs = EMX_MAX_SCATTER;
1643
ec1c60bb 1644 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp,
5330213c
SZ
1645 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1646 if (error) {
5330213c
SZ
1647 m_freem(*m_headp);
1648 *m_headp = NULL;
1649 return error;
1650 }
ec1c60bb 1651 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE);
5330213c
SZ
1652
1653 m_head = *m_headp;
ec1c60bb 1654 tdata->tx_nsegs += nsegs;
7f32a9b0 1655 *segs_used += nsegs;
5330213c 1656
3eb0ea09
SZ
1657 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1658 /* TSO will consume one TX desc */
7f32a9b0
SZ
1659 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower);
1660 tdata->tx_nsegs += i;
1661 *segs_used += i;
3eb0ea09 1662 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) {
5330213c 1663 /* TX csum offloading will consume one TX desc */
7f32a9b0
SZ
1664 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower);
1665 tdata->tx_nsegs += i;
1666 *segs_used += i;
5330213c 1667 }
d37cc902
SZ
1668
1669 /* Handle VLAN tag */
1670 if (m_head->m_flags & M_VLANTAG) {
1671 /* Set the vlan id. */
1672 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16);
1673 /* Tell hardware to add tag */
1674 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1675 }
1676
ec1c60bb 1677 i = tdata->next_avail_tx_desc;
5330213c
SZ
1678
1679 /* Set up our transmit descriptors */
1680 for (j = 0; j < nsegs; j++) {
ec1c60bb
SZ
1681 tx_buffer = &tdata->tx_buf[i];
1682 ctxd = &tdata->tx_desc_base[i];
5330213c
SZ
1683
1684 ctxd->buffer_addr = htole64(segs[j].ds_addr);
1685 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
1686 txd_lower | segs[j].ds_len);
1687 ctxd->upper.data = htole32(txd_upper);
1688
1689 last = i;
ec1c60bb 1690 if (++i == tdata->num_tx_desc)
5330213c 1691 i = 0;
5330213c
SZ
1692 }
1693
ec1c60bb 1694 tdata->next_avail_tx_desc = i;
5330213c 1695
ec1c60bb
SZ
1696 KKASSERT(tdata->num_tx_desc_avail > nsegs);
1697 tdata->num_tx_desc_avail -= nsegs;
5330213c 1698
5330213c
SZ
1699 tx_buffer->m_head = m_head;
1700 tx_buffer_mapped->map = tx_buffer->map;
1701 tx_buffer->map = map;
1702
d84018e9 1703 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) {
ec1c60bb 1704 tdata->tx_nsegs = 0;
4e4e8481
SZ
1705
1706 /*
1707 * Report Status (RS) is turned on
d84018e9 1708 * every tx_intr_nsegs descriptors.
4e4e8481 1709 */
5330213c
SZ
1710 cmd = E1000_TXD_CMD_RS;
1711
b4b0a2b4
SZ
1712 /*
1713 * Keep track of the descriptor, which will
1714 * be written back by hardware.
1715 */
ec1c60bb
SZ
1716 tdata->tx_dd[tdata->tx_dd_tail] = last;
1717 EMX_INC_TXDD_IDX(tdata->tx_dd_tail);
1718 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head);
5330213c
SZ
1719 }
1720
1721 /*
1722 * Last Descriptor of Packet needs End Of Packet (EOP)
5330213c
SZ
1723 */
1724 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
1725
5330213c 1726 /*
b691889c 1727 * Defer TDT updating, until enough descriptors are setup
5330213c 1728 */
7f32a9b0 1729 *idx = i;
5330213c 1730
d84018e9
SZ
1731#ifdef EMX_TSS_DEBUG
1732 tdata->tx_pkts++;
1733#endif
1734
5330213c
SZ
1735 return (0);
1736}
1737
1738static void
1739emx_set_promisc(struct emx_softc *sc)
1740{
1741 struct ifnet *ifp = &sc->arpcom.ac_if;
1742 uint32_t reg_rctl;
1743
1744 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1745
1746 if (ifp->if_flags & IFF_PROMISC) {
1747 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1748 /* Turn this on if you want to see bad packets */
1749 if (emx_debug_sbp)
1750 reg_rctl |= E1000_RCTL_SBP;
1751 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1752 } else if (ifp->if_flags & IFF_ALLMULTI) {
1753 reg_rctl |= E1000_RCTL_MPE;
1754 reg_rctl &= ~E1000_RCTL_UPE;
1755 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1756 }
1757}
1758
1759static void
1760emx_disable_promisc(struct emx_softc *sc)
1761{
1762 uint32_t reg_rctl;
1763
1764 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1765
1766 reg_rctl &= ~E1000_RCTL_UPE;
1767 reg_rctl &= ~E1000_RCTL_MPE;
1768 reg_rctl &= ~E1000_RCTL_SBP;
1769 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1770}
1771
1772static void
1773emx_set_multi(struct emx_softc *sc)
1774{
1775 struct ifnet *ifp = &sc->arpcom.ac_if;
1776 struct ifmultiaddr *ifma;
1777 uint32_t reg_rctl = 0;
2d0e5700 1778 uint8_t *mta;
5330213c
SZ
1779 int mcnt = 0;
1780
2d0e5700
SZ
1781 mta = sc->mta;
1782 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX);
1783
441d34b2 1784 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5330213c
SZ
1785 if (ifma->ifma_addr->sa_family != AF_LINK)
1786 continue;
1787
1788 if (mcnt == EMX_MCAST_ADDR_MAX)
1789 break;
1790
1791 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1792 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1793 mcnt++;
1794 }
1795
1796 if (mcnt >= EMX_MCAST_ADDR_MAX) {
1797 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1798 reg_rctl |= E1000_RCTL_MPE;
1799 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1800 } else {
6a5a645e 1801 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
5330213c
SZ
1802 }
1803}
1804
1805/*
1806 * This routine checks for link status and updates statistics.
1807 */
1808static void
1809emx_timer(void *xsc)
1810{
1811 struct emx_softc *sc = xsc;
1812 struct ifnet *ifp = &sc->arpcom.ac_if;
1813
37e854ff 1814 lwkt_serialize_enter(&sc->main_serialize);
5330213c
SZ
1815
1816 emx_update_link_status(sc);
1817 emx_update_stats(sc);
1818
1819 /* Reset LAA into RAR[0] on 82571 */
1820 if (e1000_get_laa_state_82571(&sc->hw) == TRUE)
1821 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1822
1823 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
1824 emx_print_hw_stats(sc);
1825
1826 emx_smartspeed(sc);
1827
1828 callout_reset(&sc->timer, hz, emx_timer, sc);
1829
37e854ff 1830 lwkt_serialize_exit(&sc->main_serialize);
5330213c
SZ
1831}
1832
1833static void
1834emx_update_link_status(struct emx_softc *sc)
1835{
1836 struct e1000_hw *hw = &sc->hw;
1837 struct ifnet *ifp = &sc->arpcom.ac_if;
1838 device_t dev = sc->dev;
1839 uint32_t link_check = 0;
1840
1841 /* Get the cached link value or read phy for real */
1842 switch (hw->phy.media_type) {
1843 case e1000_media_type_copper:
1844 if (hw->mac.get_link_status) {
1845 /* Do the work to read phy */
1846 e1000_check_for_link(hw);
1847 link_check = !hw->mac.get_link_status;
1848 if (link_check) /* ESB2 fix */
1849 e1000_cfg_on_link_up(hw);
1850 } else {
1851 link_check = TRUE;
1852 }
1853 break;
1854
1855 case e1000_media_type_fiber:
1856 e1000_check_for_link(hw);
1857 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1858 break;
1859
1860 case e1000_media_type_internal_serdes:
1861 e1000_check_for_link(hw);
1862 link_check = sc->hw.mac.serdes_has_link;
1863 break;
1864
1865 case e1000_media_type_unknown:
1866 default:
1867 break;
1868 }
1869
1870 /* Now check for a transition */
1871 if (link_check && sc->link_active == 0) {
1872 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1873 &sc->link_duplex);
1874
1875 /*
1876 * Check if we should enable/disable SPEED_MODE bit on
1877 * 82571EB/82572EI
1878 */
2d0e5700
SZ
1879 if (sc->link_speed != SPEED_1000 &&
1880 (hw->mac.type == e1000_82571 ||
1881 hw->mac.type == e1000_82572)) {
5330213c
SZ
1882 int tarc0;
1883
1884 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2d0e5700 1885 tarc0 &= ~EMX_TARC_SPEED_MODE;
5330213c
SZ
1886 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1887 }
1888 if (bootverbose) {
81ac62f7
SZ
1889 char flowctrl[IFM_ETH_FC_STRLEN];
1890
1891 e1000_fc2str(hw->fc.current_mode, flowctrl,
1892 sizeof(flowctrl));
1893 device_printf(dev, "Link is up %d Mbps %s, "
1894 "Flow control: %s\n",
5330213c 1895 sc->link_speed,
81ac62f7
SZ
1896 (sc->link_duplex == FULL_DUPLEX) ?
1897 "Full Duplex" : "Half Duplex",
1898 flowctrl);
1899 }
9b8968bb
SZ
1900 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1901 e1000_force_flowctrl(hw, sc->ifm_flowctrl);
5330213c
SZ
1902 sc->link_active = 1;
1903 sc->smartspeed = 0;
1904 ifp->if_baudrate = sc->link_speed * 1000000;
1905 ifp->if_link_state = LINK_STATE_UP;
1906 if_link_state_change(ifp);
1907 } else if (!link_check && sc->link_active == 1) {
1908 ifp->if_baudrate = sc->link_speed = 0;
1909 sc->link_duplex = 0;
1910 if (bootverbose)
1911 device_printf(dev, "Link is Down\n");
1912 sc->link_active = 0;
5330213c
SZ
1913 ifp->if_link_state = LINK_STATE_DOWN;
1914 if_link_state_change(ifp);
1915 }
1916}
1917
1918static void
1919emx_stop(struct emx_softc *sc)
1920{
1921 struct ifnet *ifp = &sc->arpcom.ac_if;
1922 int i;
1923
2c9effcf 1924 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5330213c
SZ
1925
1926 emx_disable_intr(sc);
1927
1928 callout_stop(&sc->timer);
1929
9ed293e0 1930 ifp->if_flags &= ~IFF_RUNNING;
d84018e9
SZ
1931 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1932 struct emx_txdata *tdata = &sc->tx_data[i];
1933
1934 ifsq_clr_oactive(tdata->ifsq);
1935 ifsq_watchdog_stop(&tdata->tx_watchdog);
1936 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED;
1937 }
5330213c 1938
3f939c23
SZ
1939 /*
1940 * Disable multiple receive queues.
1941 *
1942 * NOTE:
1943 * We should disable multiple receive queues before
1944 * resetting the hardware.
1945 */
1946 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0);
1947
5330213c
SZ
1948 e1000_reset_hw(&sc->hw);
1949 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1950
d84018e9
SZ
1951 for (i = 0; i < sc->tx_ring_cnt; ++i)
1952 emx_free_tx_ring(&sc->tx_data[i]);
13890b61 1953 for (i = 0; i < sc->rx_ring_cnt; ++i)
9f831fa8 1954 emx_free_rx_ring(&sc->rx_data[i]);
5330213c
SZ
1955}
1956
1957static int
2d0e5700 1958emx_reset(struct emx_softc *sc)
5330213c
SZ
1959{
1960 device_t dev = sc->dev;
1961 uint16_t rx_buffer_size;
be5807d4 1962 uint32_t pba;
5330213c 1963
5330213c
SZ
1964 /* Set up smart power down as default off on newer adapters. */
1965 if (!emx_smart_pwr_down &&
1966 (sc->hw.mac.type == e1000_82571 ||
1967 sc->hw.mac.type == e1000_82572)) {
1968 uint16_t phy_tmp = 0;
1969
1970 /* Speed up time to link by disabling smart power down. */
1971 e1000_read_phy_reg(&sc->hw,
1972 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1973 phy_tmp &= ~IGP02E1000_PM_SPD;
1974 e1000_write_phy_reg(&sc->hw,
1975 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1976 }
1977
be5807d4
SZ
1978 /*
1979 * Packet Buffer Allocation (PBA)
1980 * Writing PBA sets the receive portion of the buffer
1981 * the remainder is used for the transmit buffer.
1982 */
1983 switch (sc->hw.mac.type) {
1984 /* Total Packet Buffer on these is 48K */
1985 case e1000_82571:
1986 case e1000_82572:
1987 case e1000_80003es2lan:
1988 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1989 break;
1990
1991 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
1992 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1993 break;
1994
1995 case e1000_82574:
1996 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1997 break;
1998
a5807b81 1999 case e1000_pch_lpt:
524ce499 2000 case e1000_pch_spt:
a5807b81
SZ
2001 pba = E1000_PBA_26K;
2002 break;
2003
be5807d4
SZ
2004 default:
2005 /* Devices before 82547 had a Packet Buffer of 64K. */
a5807b81 2006 if (sc->hw.mac.max_frame_size > 8192)
be5807d4
SZ
2007 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2008 else
2009 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2010 }
2011 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba);
2012
5330213c
SZ
2013 /*
2014 * These parameters control the automatic generation (Tx) and
2015 * response (Rx) to Ethernet PAUSE frames.
2016 * - High water mark should allow for at least two frames to be
2017 * received after sending an XOFF.
2018 * - Low water mark works best when it is very near the high water mark.
2019 * This allows the receiver to restart by sending XON when it has
2020 * drained a bit. Here we use an arbitary value of 1500 which will
2021 * restart after one full frame is pulled from the buffer. There
2022 * could be several smaller frames in the buffer and if so they will
2023 * not trigger the XON until their total number reduces the buffer
2024 * by 1500.
2025 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2026 */
2027 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10;
2028
2029 sc->hw.fc.high_water = rx_buffer_size -
a5807b81 2030 roundup2(sc->hw.mac.max_frame_size, 1024);
5330213c
SZ
2031 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500;
2032
a5807b81 2033 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME;
5330213c 2034 sc->hw.fc.send_xon = TRUE;
81ac62f7 2035 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl);
5330213c 2036
a5807b81
SZ
2037 /*
2038 * Device specific overrides/settings
2039 */
524ce499
SZ
2040 if (sc->hw.mac.type == e1000_pch_lpt ||
2041 sc->hw.mac.type == e1000_pch_spt) {
a5807b81
SZ
2042 sc->hw.fc.high_water = 0x5C20;
2043 sc->hw.fc.low_water = 0x5048;
2044 sc->hw.fc.pause_time = 0x0650;
2045 sc->hw.fc.refresh_time = 0x0400;
2046 /* Jumbos need adjusted PBA */
2047 if (sc->arpcom.ac_if.if_mtu > ETHERMTU)
2048 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12);
2049 else
2050 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26);
2051 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2052 sc->hw.fc.pause_time = 0xFFFF;
2053 }
2054
2d0e5700
SZ
2055 /* Issue a global reset */
2056 e1000_reset_hw(&sc->hw);
2057 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
6d5e2922 2058 emx_disable_aspm(sc);
2d0e5700 2059
5330213c
SZ
2060 if (e1000_init_hw(&sc->hw) < 0) {
2061 device_printf(dev, "Hardware Initialization Failed\n");
2062 return (EIO);
2063 }
2064
2d0e5700
SZ
2065 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
2066 e1000_get_phy_info(&sc->hw);
5330213c
SZ
2067 e1000_check_for_link(&sc->hw);
2068
2069 return (0);
2070}
2071
2072static void
2073emx_setup_ifp(struct emx_softc *sc)
2074{
2075 struct ifnet *ifp = &sc->arpcom.ac_if;
dce0b08a 2076 int i;
5330213c
SZ
2077
2078 if_initname(ifp, device_get_name(sc->dev),
2079 device_get_unit(sc->dev));
2080 ifp->if_softc = sc;
2081 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2082 ifp->if_init = emx_init;
2083 ifp->if_ioctl = emx_ioctl;
2084 ifp->if_start = emx_start;
b3a7093f 2085#ifdef IFPOLL_ENABLE
f994de37 2086 ifp->if_npoll = emx_npoll;
5330213c 2087#endif
6d435846
SZ
2088 ifp->if_serialize = emx_serialize;
2089 ifp->if_deserialize = emx_deserialize;
2090 ifp->if_tryserialize = emx_tryserialize;
2c9effcf
SZ
2091#ifdef INVARIANTS
2092 ifp->if_serialize_assert = emx_serialize_assert;
2093#endif
d84018e9 2094
14929979
SZ
2095 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc;
2096
d84018e9 2097 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1);
5330213c 2098 ifq_set_ready(&ifp->if_snd);
d84018e9
SZ
2099 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
2100
2101 ifp->if_mapsubq = ifq_mapsubq_mask;
2102 ifq_set_subq_mask(&ifp->if_snd, 0);
5330213c 2103
ae474cfa 2104 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
5330213c
SZ
2105
2106 ifp->if_capabilities = IFCAP_HWCSUM |
2107 IFCAP_VLAN_HWTAGGING |
3eb0ea09
SZ
2108 IFCAP_VLAN_MTU |
2109 IFCAP_TSO;
8434a83b
SZ
2110 if (sc->rx_ring_cnt > 1)
2111 ifp->if_capabilities |= IFCAP_RSS;
5330213c 2112 ifp->if_capenable = ifp->if_capabilities;
3eb0ea09 2113 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO;
5330213c
SZ
2114
2115 /*
2116 * Tell the upper layer(s) we support long frames.
2117 */
2118 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2119
dce0b08a
SZ
2120 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2121 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2122 struct emx_txdata *tdata = &sc->tx_data[i];
2123
2124 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res));
2125 ifsq_set_priv(ifsq, tdata);
bfefe4a6 2126 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize);
dce0b08a
SZ
2127 tdata->ifsq = ifsq;
2128
2129 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog);
2130 }
2131
5330213c
SZ
2132 /*
2133 * Specify the media types supported by this sc and register
2134 * callbacks to update media and link information
2135 */
5330213c
SZ
2136 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2137 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
2138 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2139 0, NULL);
5330213c
SZ
2140 } else {
2141 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
2142 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2143 0, NULL);
2144 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2145 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2146 0, NULL);
2147 if (sc->hw.phy.type != e1000_phy_ife) {
2148 ifmedia_add(&sc->media,
2149 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
5330213c
SZ
2150 }
2151 }
2152 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
81ac62f7 2153 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl);
5330213c
SZ
2154}
2155
2156/*
2157 * Workaround for SmartSpeed on 82541 and 82547 controllers
2158 */
2159static void
2160emx_smartspeed(struct emx_softc *sc)
2161{
2162 uint16_t phy_tmp;
2163
2164 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp ||
2165 sc->hw.mac.autoneg == 0 ||
2166 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2167 return;
2168
2169 if (sc->smartspeed == 0) {
2170 /*
2171 * If Master/Slave config fault is asserted twice,
2172 * we assume back-to-back
2173 */
2174 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2175 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2176 return;
2177 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2178 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2179 e1000_read_phy_reg(&sc->hw,
2180 PHY_1000T_CTRL, &phy_tmp);
2181 if (phy_tmp & CR_1000T_MS_ENABLE) {
2182 phy_tmp &= ~CR_1000T_MS_ENABLE;
2183 e1000_write_phy_reg(&sc->hw,
2184 PHY_1000T_CTRL, phy_tmp);
2185 sc->smartspeed++;
2186 if (sc->hw.mac.autoneg &&
2187 !e1000_phy_setup_autoneg(&sc->hw) &&
2188 !e1000_read_phy_reg(&sc->hw,
2189 PHY_CONTROL, &phy_tmp)) {
2190 phy_tmp |= MII_CR_AUTO_NEG_EN |
2191 MII_CR_RESTART_AUTO_NEG;
2192 e1000_write_phy_reg(&sc->hw,
2193 PHY_CONTROL, phy_tmp);
2194 }
2195 }
2196 }
2197 return;
2198 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) {
2199 /* If still no link, perhaps using 2/3 pair cable */
2200 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2201 phy_tmp |= CR_1000T_MS_ENABLE;
2202 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2203 if (sc->hw.mac.autoneg &&
2204 !e1000_phy_setup_autoneg(&sc->hw) &&
2205 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2206 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2207 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2208 }
2209 }
2210
2211 /* Restart process after EMX_SMARTSPEED_MAX iterations */
2212 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX)
2213 sc->smartspeed = 0;
2214}
2215
5330213c 2216static int
ec1c60bb 2217emx_create_tx_ring(struct emx_txdata *tdata)
5330213c 2218{
ec1c60bb 2219 device_t dev = tdata->sc->dev;
323e5ecd 2220 struct emx_txbuf *tx_buffer;
b4d8c36b 2221 int error, i, tsize, ntxd;
bdca134f
SZ
2222
2223 /*
2224 * Validate number of transmit descriptors. It must not exceed
2225 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2226 */
b4d8c36b
SZ
2227 ntxd = device_getenv_int(dev, "txd", emx_txd);
2228 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 ||
2229 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) {
bdca134f 2230 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
b4d8c36b 2231 EMX_DEFAULT_TXD, ntxd);
ec1c60bb 2232 tdata->num_tx_desc = EMX_DEFAULT_TXD;
bdca134f 2233 } else {
ec1c60bb 2234 tdata->num_tx_desc = ntxd;
bdca134f
SZ
2235 }
2236
2237 /*
2238 * Allocate Transmit Descriptor ring
2239 */
ec1c60bb 2240 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc),
bdca134f 2241 EMX_DBA_ALIGN);
ec1c60bb 2242 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag,
a596084c 2243 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
ec1c60bb
SZ
2244 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap,
2245 &tdata->tx_desc_paddr);
2246 if (tdata->tx_desc_base == NULL) {
bdca134f 2247 device_printf(dev, "Unable to allocate tx_desc memory\n");
a596084c 2248 return ENOMEM;
bdca134f 2249 }
5330213c 2250
5a7acd69
SZ
2251 tsize = __VM_CACHELINE_ALIGN(
2252 sizeof(struct emx_txbuf) * tdata->num_tx_desc);
2253 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
5330213c
SZ
2254
2255 /*
2256 * Create DMA tags for tx buffers
2257 */
ec1c60bb 2258 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */
5330213c
SZ
2259 1, 0, /* alignment, bounds */
2260 BUS_SPACE_MAXADDR, /* lowaddr */
2261 BUS_SPACE_MAXADDR, /* highaddr */
2262 NULL, NULL, /* filter, filterarg */
2263 EMX_TSO_SIZE, /* maxsize */
2264 EMX_MAX_SCATTER, /* nsegments */
2265 EMX_MAX_SEGSIZE, /* maxsegsize */
2266 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2267 BUS_DMA_ONEBPAGE, /* flags */
ec1c60bb 2268 &tdata->txtag);
5330213c
SZ
2269 if (error) {
2270 device_printf(dev, "Unable to allocate TX DMA tag\n");
ec1c60bb
SZ
2271 kfree(tdata->tx_buf, M_DEVBUF);
2272 tdata->tx_buf = NULL;
5330213c
SZ
2273 return error;
2274 }
2275
2276 /*
2277 * Create DMA maps for tx buffers
2278 */
ec1c60bb
SZ
2279 for (i = 0; i < tdata->num_tx_desc; i++) {
2280 tx_buffer = &tdata->tx_buf[i];
5330213c 2281
ec1c60bb 2282 error = bus_dmamap_create(tdata->txtag,
5330213c
SZ
2283 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2284 &tx_buffer->map);
2285 if (error) {
2286 device_printf(dev, "Unable to create TX DMA map\n");
ec1c60bb 2287 emx_destroy_tx_ring(tdata, i);
5330213c
SZ
2288 return error;
2289 }
2290 }
d84018e9
SZ
2291
2292 /*
2293 * Setup TX parameters
2294 */
2295 tdata->spare_tx_desc = EMX_TX_SPARE;
55471c55 2296 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG;
d84018e9
SZ
2297
2298 /*
2299 * Keep following relationship between spare_tx_desc, oact_tx_desc
2300 * and tx_intr_nsegs:
2301 * (spare_tx_desc + EMX_TX_RESERVED) <=
2302 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs
2303 */
2304 tdata->oact_tx_desc = tdata->num_tx_desc / 8;
2305 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX)
2306 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX;
2307 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED)
2308 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED;
2309
2310 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16;
2311 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc)
2312 tdata->tx_intr_nsegs = tdata->oact_tx_desc;
2313
2314 /*
1fabd251 2315 * Pullup extra 4bytes into the first data segment for TSO, see:
d84018e9
SZ
2316 * 82571/82572 specification update errata #7
2317 *
524ce499 2318 * Same applies to I217 (and maybe I218 and I219).
1fabd251 2319 *
d84018e9
SZ
2320 * NOTE:
2321 * 4bytes instead of 2bytes, which are mentioned in the errata,
2322 * are pulled; mainly to keep rest of the data properly aligned.
2323 */
2324 if (tdata->sc->hw.mac.type == e1000_82571 ||
1fabd251 2325 tdata->sc->hw.mac.type == e1000_82572 ||
524ce499
SZ
2326 tdata->sc->hw.mac.type == e1000_pch_lpt ||
2327 tdata->sc->hw.mac.type == e1000_pch_spt)
d84018e9
SZ
2328 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX;
2329
5330213c
SZ
2330 return (0);
2331}
2332
2333static void
ec1c60bb 2334emx_init_tx_ring(struct emx_txdata *tdata)
5330213c
SZ
2335{
2336 /* Clear the old ring contents */
ec1c60bb
SZ
2337 bzero(tdata->tx_desc_base,
2338 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc);
5330213c
SZ
2339
2340 /* Reset state */
ec1c60bb
SZ
2341 tdata->next_avail_tx_desc = 0;
2342 tdata->next_tx_to_clean = 0;
2343 tdata->num_tx_desc_avail = tdata->num_tx_desc;
d84018e9
SZ
2344
2345 tdata->tx_flags |= EMX_TXFLAG_ENABLED;
2346 if (tdata->sc->tx_ring_inuse > 1) {
2347 tdata->tx_flags |= EMX_TXFLAG_FORCECTX;
2348 if (bootverbose) {
2349 if_printf(&tdata->sc->arpcom.ac_if,
2350 "TX %d force ctx setup\n", tdata->idx);
2351 }
2352 }
5330213c
SZ
2353}
2354
2355static void
2356emx_init_tx_unit(struct emx_softc *sc)
2357{
57f26b35 2358 uint32_t tctl, tarc, tipg = 0, txdctl;
d84018e9
SZ
2359 int i;
2360
2361 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2362 struct emx_txdata *tdata = &sc->tx_data[i];
2363 uint64_t bus_addr;
5330213c 2364
d84018e9
SZ
2365 /* Setup the Base and Length of the Tx Descriptor Ring */
2366 bus_addr = tdata->tx_desc_paddr;
2367 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i),
2368 tdata->num_tx_desc * sizeof(struct e1000_tx_desc));
2369 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i),
2370 (uint32_t)(bus_addr >> 32));
2371 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i),
2372 (uint32_t)bus_addr);
2373 /* Setup the HW Tx Head and Tail descriptor pointers */
2374 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0);
2375 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0);
2376 }
5330213c
SZ
2377
2378 /* Set the default values for the Tx Inter Packet Gap timer */
2379 switch (sc->hw.mac.type) {
2380 case e1000_80003es2lan:
2381 tipg = DEFAULT_82543_TIPG_IPGR1;
2382 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2383 E1000_TIPG_IPGR2_SHIFT;
2384 break;
2385
2386 default:
2387 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2388 sc->hw.phy.media_type == e1000_media_type_internal_serdes)
2389 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2390 else
2391 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2392 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2393 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2394 break;
2395 }
2396
2397 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg);
2398
2399 /* NOTE: 0 is not allowed for TIDV */
2400 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1);
2401 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0);
2402
57f26b35
SZ
2403 /*
2404 * Errata workaround (obtained from Linux). This is necessary
2405 * to make multiple TX queues work on 82574.
2406 * XXX can't find it in any published errata though.
2407 */
2408 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0));
2409 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl);
2410
5330213c
SZ
2411 if (sc->hw.mac.type == e1000_82571 ||
2412 sc->hw.mac.type == e1000_82572) {
2413 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2414 tarc |= EMX_TARC_SPEED_MODE;
2415 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2416 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2417 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2418 tarc |= 1;
2419 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2420 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2421 tarc |= 1;
2422 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2423 }
2424
2425 /* Program the Transmit Control Register */
2426 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL);
2427 tctl &= ~E1000_TCTL_CT;
2428 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2429 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2430 tctl |= E1000_TCTL_MULR;
2431
2432 /* This write will effectively turn on the transmit unit. */
2433 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl);
01058531
SZ
2434
2435 if (sc->hw.mac.type == e1000_82571 ||
2436 sc->hw.mac.type == e1000_82572 ||
2437 sc->hw.mac.type == e1000_80003es2lan) {
2438 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2439 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2440 tarc &= ~(1 << 28);
2441 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2442 }
d84018e9
SZ
2443
2444 if (sc->tx_ring_inuse > 1) {
2445 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2446 tarc &= ~EMX_TARC_COUNT_MASK;
2447 tarc |= 1;
2448 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2449
2450 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2451 tarc &= ~EMX_TARC_COUNT_MASK;
2452 tarc |= 1;
2453 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2454 }
5330213c
SZ
2455}
2456
2457static void
ec1c60bb 2458emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc)
5330213c 2459{
323e5ecd 2460 struct emx_txbuf *tx_buffer;
5330213c
SZ
2461 int i;
2462
bdca134f 2463 /* Free Transmit Descriptor ring */
ec1c60bb
SZ
2464 if (tdata->tx_desc_base) {
2465 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap);
2466 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base,
2467 tdata->tx_desc_dmap);
2468 bus_dma_tag_destroy(tdata->tx_desc_dtag);
a596084c 2469
ec1c60bb 2470 tdata->tx_desc_base = NULL;
a596084c 2471 }
bdca134f 2472
ec1c60bb 2473 if (tdata->tx_buf == NULL)
5330213c
SZ
2474 return;
2475
2476 for (i = 0; i < ndesc; i++) {
ec1c60bb 2477 tx_buffer = &tdata->tx_buf[i];
5330213c
SZ
2478
2479 KKASSERT(tx_buffer->m_head == NULL);
ec1c60bb 2480 bus_dmamap_destroy(tdata->txtag, tx_buffer->map);
5330213c 2481 }
ec1c60bb 2482 bus_dma_tag_destroy(tdata->txtag);
5330213c 2483
ec1c60bb
SZ
2484 kfree(tdata->tx_buf, M_DEVBUF);
2485 tdata->tx_buf = NULL;
5330213c
SZ
2486}
2487
2488/*
2489 * The offload context needs to be set when we transfer the first
2490 * packet of a particular protocol (TCP/UDP). This routine has been
2491 * enhanced to deal with inserted VLAN headers.
2492 *
2493 * If the new packet's ether header length, ip header length and
2494 * csum offloading type are same as the previous packet, we should
2495 * avoid allocating a new csum context descriptor; mainly to take
2496 * advantage of the pipeline effect of the TX data read request.
2497 *
2498 * This function returns number of TX descrptors allocated for
2499 * csum context.
2500 */
2501static int
ec1c60bb 2502emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp,
5330213c
SZ
2503 uint32_t *txd_upper, uint32_t *txd_lower)
2504{
2505 struct e1000_context_desc *TXD;
5330213c
SZ
2506 int curr_txd, ehdrlen, csum_flags;
2507 uint32_t cmd, hdr_len, ip_hlen;
5330213c
SZ
2508
2509 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES;
68447568
SZ
2510 ip_hlen = mp->m_pkthdr.csum_iphlen;
2511 ehdrlen = mp->m_pkthdr.csum_lhlen;
5330213c 2512
d84018e9
SZ
2513 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
2514 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen &&
ec1c60bb 2515 tdata->csum_flags == csum_flags) {
5330213c
SZ
2516 /*
2517 * Same csum offload context as the previous packets;
2518 * just return.
2519 */
ec1c60bb
SZ
2520 *txd_upper = tdata->csum_txd_upper;
2521 *txd_lower = tdata->csum_txd_lower;
5330213c
SZ
2522 return 0;
2523 }
2524
2525 /*
2526 * Setup a new csum offload context.
2527 */
2528
ec1c60bb
SZ
2529 curr_txd = tdata->next_avail_tx_desc;
2530 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
5330213c
SZ
2531
2532 cmd = 0;
2533
2534 /* Setup of IP header checksum. */
2535 if (csum_flags & CSUM_IP) {
2536 /*
2537 * Start offset for header checksum calculation.
2538 * End offset for header checksum calculation.
2539 * Offset of place to put the checksum.
2540 */
2541 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2542 TXD->lower_setup.ip_fields.ipcse =
2543 htole16(ehdrlen + ip_hlen - 1);
2544 TXD->lower_setup.ip_fields.ipcso =
2545 ehdrlen + offsetof(struct ip, ip_sum);
2546 cmd |= E1000_TXD_CMD_IP;
2547 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2548 }
2549 hdr_len = ehdrlen + ip_hlen;
2550
2551 if (csum_flags & CSUM_TCP) {
2552 /*
2553 * Start offset for payload checksum calculation.
2554 * End offset for payload checksum calculation.
2555 * Offset of place to put the checksum.
2556 */
2557 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2558 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2559 TXD->upper_setup.tcp_fields.tucso =
2560 hdr_len + offsetof(struct tcphdr, th_sum);
2561 cmd |= E1000_TXD_CMD_TCP;
2562 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2563 } else if (csum_flags & CSUM_UDP) {
2564 /*
2565 * Start offset for header checksum calculation.
2566 * End offset for header checksum calculation.
2567 * Offset of place to put the checksum.
2568 */
2569 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2570 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2571 TXD->upper_setup.tcp_fields.tucso =
2572 hdr_len + offsetof(struct udphdr, uh_sum);
2573 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2574 }
2575
2576 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2577 E1000_TXD_DTYP_D; /* Data descr */
2578
2579 /* Save the information for this csum offloading context */
ec1c60bb
SZ
2580 tdata->csum_lhlen = ehdrlen;
2581 tdata->csum_iphlen = ip_hlen;
2582 tdata->csum_flags = csum_flags;
2583 tdata->csum_txd_upper = *txd_upper;
2584 tdata->csum_txd_lower = *txd_lower;
5330213c
SZ
2585
2586 TXD->tcp_seg_setup.data = htole32(0);
2587 TXD->cmd_and_length =
2588 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
5330213c 2589
ec1c60bb 2590 if (++curr_txd == tdata->num_tx_desc)
5330213c
SZ
2591 curr_txd = 0;
2592
ec1c60bb
SZ
2593 KKASSERT(tdata->num_tx_desc_avail > 0);
2594 tdata->num_tx_desc_avail--;
5330213c 2595
ec1c60bb 2596 tdata->next_avail_tx_desc = curr_txd;
5330213c
SZ
2597 return 1;
2598}
2599
5330213c 2600static void
ec1c60bb 2601emx_txeof(struct emx_txdata *tdata)
5330213c 2602{
323e5ecd 2603 struct emx_txbuf *tx_buffer;
5330213c
SZ
2604 int first, num_avail;
2605
ec1c60bb 2606 if (tdata->tx_dd_head == tdata->tx_dd_tail)
5330213c
SZ
2607 return;
2608
ec1c60bb 2609 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
5330213c
SZ
2610 return;
2611
ec1c60bb
SZ
2612 num_avail = tdata->num_tx_desc_avail;
2613 first = tdata->next_tx_to_clean;
5330213c 2614
ec1c60bb
SZ
2615 while (tdata->tx_dd_head != tdata->tx_dd_tail) {
2616 int dd_idx = tdata->tx_dd[tdata->tx_dd_head];
70172a73 2617 struct e1000_tx_desc *tx_desc;
5330213c 2618
ec1c60bb 2619 tx_desc = &tdata->tx_desc_base[dd_idx];
5330213c 2620 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
ec1c60bb 2621 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
5330213c 2622
ec1c60bb 2623 if (++dd_idx == tdata->num_tx_desc)
5330213c
SZ
2624 dd_idx = 0;
2625
2626 while (first != dd_idx) {
2627 logif(pkt_txclean);
2628
5330213c
SZ
2629 num_avail++;
2630
ec1c60bb 2631 tx_buffer = &tdata->tx_buf[first];
5330213c 2632 if (tx_buffer->m_head) {
ec1c60bb 2633 bus_dmamap_unload(tdata->txtag,
5330213c
SZ
2634 tx_buffer->map);
2635 m_freem(tx_buffer->m_head);
2636 tx_buffer->m_head = NULL;
2637 }
2638
ec1c60bb 2639 if (++first == tdata->num_tx_desc)
5330213c
SZ
2640 first = 0;
2641 }
2642 } else {
2643 break;
2644 }
2645 }
ec1c60bb
SZ
2646 tdata->next_tx_to_clean = first;
2647 tdata->num_tx_desc_avail = num_avail;
5330213c 2648
ec1c60bb
SZ
2649 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2650 tdata->tx_dd_head = 0;
2651 tdata->tx_dd_tail = 0;
5330213c
SZ
2652 }
2653
ec1c60bb 2654 if (!EMX_IS_OACTIVE(tdata)) {
d84018e9 2655 ifsq_clr_oactive(tdata->ifsq);
5330213c
SZ
2656
2657 /* All clean, turn off the timer */
ec1c60bb 2658 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
d84018e9 2659 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
2660 }
2661}
2662
2663static void
ec1c60bb 2664emx_tx_collect(struct emx_txdata *tdata)
5330213c 2665{
323e5ecd 2666 struct emx_txbuf *tx_buffer;
5330213c
SZ
2667 int tdh, first, num_avail, dd_idx = -1;
2668
ec1c60bb 2669 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
5330213c
SZ
2670 return;
2671
d84018e9 2672 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx));
ec1c60bb 2673 if (tdh == tdata->next_tx_to_clean)
5330213c
SZ
2674 return;
2675
ec1c60bb
SZ
2676 if (tdata->tx_dd_head != tdata->tx_dd_tail)
2677 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
5330213c 2678
ec1c60bb
SZ
2679 num_avail = tdata->num_tx_desc_avail;
2680 first = tdata->next_tx_to_clean;
5330213c
SZ
2681
2682 while (first != tdh) {
2683 logif(pkt_txclean);
2684
5330213c
SZ
2685 num_avail++;
2686
ec1c60bb 2687 tx_buffer = &tdata->tx_buf[first];
5330213c 2688 if (tx_buffer->m_head) {
ec1c60bb 2689 bus_dmamap_unload(tdata->txtag,
5330213c
SZ
2690 tx_buffer->map);
2691 m_freem(tx_buffer->m_head);
2692 tx_buffer->m_head = NULL;
2693 }
2694
2695 if (first == dd_idx) {
ec1c60bb
SZ
2696 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2697 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2698 tdata->tx_dd_head = 0;
2699 tdata->tx_dd_tail = 0;
5330213c
SZ
2700 dd_idx = -1;
2701 } else {
ec1c60bb 2702 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
5330213c
SZ
2703 }
2704 }
2705
ec1c60bb 2706 if (++first == tdata->num_tx_desc)
5330213c
SZ
2707 first = 0;
2708 }
ec1c60bb
SZ
2709 tdata->next_tx_to_clean = first;
2710 tdata->num_tx_desc_avail = num_avail;
5330213c 2711
ec1c60bb 2712 if (!EMX_IS_OACTIVE(tdata)) {
d84018e9 2713 ifsq_clr_oactive(tdata->ifsq);
5330213c
SZ
2714
2715 /* All clean, turn off the timer */
ec1c60bb 2716 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
d84018e9 2717 tdata->tx_watchdog.wd_timer = 0;
5330213c
SZ
2718 }
2719}
2720
2721/*
2722 * When Link is lost sometimes there is work still in the TX ring
2723 * which will result in a watchdog, rather than allow that do an
2724 * attempted cleanup and then reinit here. Note that this has been
2725 * seens mostly with fiber adapters.
2726 */
2727static void
2728emx_tx_purge(struct emx_softc *sc)
2729{
d84018e9 2730 int i;
5330213c 2731
d84018e9
SZ
2732 if (sc->link_active)
2733 return;
2734
2735 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2736 struct emx_txdata *tdata = &sc->tx_data[i];
2737
2738 if (tdata->tx_watchdog.wd_timer) {
2739 emx_tx_collect(tdata);
2740 if (tdata->tx_watchdog.wd_timer) {
2741 if_printf(&sc->arpcom.ac_if,
2742 "Link lost, TX pending, reinit\n");
2743 emx_init(sc);
2744 return;
2745 }
5330213c
SZ
2746 }
2747 }
2748}
2749
2750static int
9f831fa8 2751emx_newbuf(struct emx_rxdata *rdata, int i, int init)
5330213c
SZ
2752{
2753 struct mbuf *m;
2754 bus_dma_segment_t seg;
2755 bus_dmamap_t map;
323e5ecd 2756 struct emx_rxbuf *rx_buffer;
5330213c
SZ
2757 int error, nseg;
2758
b5523eac 2759 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
5330213c 2760 if (m == NULL) {
5330213c 2761 if (init) {
9f831fa8 2762 if_printf(&rdata->sc->arpcom.ac_if,
5330213c
SZ
2763 "Unable to allocate RX mbuf\n");
2764 }
2765 return (ENOBUFS);
2766 }
2767 m->m_len = m->m_pkthdr.len = MCLBYTES;
2768
a5807b81 2769 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN)
5330213c
SZ
2770 m_adj(m, ETHER_ALIGN);
2771
c39e3a1f
SZ
2772 error = bus_dmamap_load_mbuf_segment(rdata->rxtag,
2773 rdata->rx_sparemap, m,
5330213c
SZ
2774 &seg, 1, &nseg, BUS_DMA_NOWAIT);
2775 if (error) {
2776 m_freem(m);
2777 if (init) {
9f831fa8 2778 if_printf(&rdata->sc->arpcom.ac_if,
5330213c
SZ
2779 "Unable to load RX mbuf\n");
2780 }
2781 return (error);
2782 }
2783
323e5ecd 2784 rx_buffer = &rdata->rx_buf[i];
5330213c 2785 if (rx_buffer->m_head != NULL)
c39e3a1f 2786 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
5330213c
SZ
2787
2788 map = rx_buffer->map;
c39e3a1f
SZ
2789 rx_buffer->map = rdata->rx_sparemap;
2790 rdata->rx_sparemap = map;
5330213c
SZ
2791
2792 rx_buffer->m_head = m;
235b9d30 2793 rx_buffer->paddr = seg.ds_addr;
5330213c 2794
235b9d30 2795 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer);
5330213c
SZ
2796 return (0);
2797}
2798
2799static int
9f831fa8 2800emx_create_rx_ring(struct emx_rxdata *rdata)
5330213c 2801{
9f831fa8 2802 device_t dev = rdata->sc->dev;
323e5ecd 2803 struct emx_rxbuf *rx_buffer;
b4d8c36b 2804 int i, error, rsize, nrxd;
bdca134f
SZ
2805
2806 /*
2807 * Validate number of receive descriptors. It must not exceed
2808 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2809 */
b4d8c36b
SZ
2810 nrxd = device_getenv_int(dev, "rxd", emx_rxd);
2811 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 ||
2812 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) {
bdca134f 2813 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
b4d8c36b 2814 EMX_DEFAULT_RXD, nrxd);
c39e3a1f 2815 rdata->num_rx_desc = EMX_DEFAULT_RXD;
bdca134f 2816 } else {
b4d8c36b 2817 rdata->num_rx_desc = nrxd;
bdca134f
SZ
2818 }
2819
2820 /*
2821 * Allocate Receive Descriptor ring
2822 */
235b9d30 2823 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t),
bdca134f 2824 EMX_DBA_ALIGN);
9f831fa8 2825 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag,
a596084c 2826 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
c39e3a1f
SZ
2827 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap,
2828 &rdata->rx_desc_paddr);
235b9d30 2829 if (rdata->rx_desc == NULL) {
bdca134f 2830 device_printf(dev, "Unable to allocate rx_desc memory\n");
a596084c 2831 return ENOMEM;
bdca134f 2832 }
5330213c 2833
5a7acd69
SZ
2834 rsize = __VM_CACHELINE_ALIGN(
2835 sizeof(struct emx_rxbuf) * rdata->num_rx_desc);
2836 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
5330213c
SZ
2837
2838 /*
2839 * Create DMA tag for rx buffers
2840 */
9f831fa8 2841 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */
5330213c
SZ
2842 1, 0, /* alignment, bounds */
2843 BUS_SPACE_MAXADDR, /* lowaddr */
2844 BUS_SPACE_MAXADDR, /* highaddr */
2845 NULL, NULL, /* filter, filterarg */
2846 MCLBYTES, /* maxsize */
2847 1, /* nsegments */
2848 MCLBYTES, /* maxsegsize */
2849 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
c39e3a1f 2850 &rdata->rxtag);
5330213c
SZ
2851 if (error) {
2852 device_printf(dev, "Unable to allocate RX DMA tag\n");
323e5ecd
SZ
2853 kfree(rdata->rx_buf, M_DEVBUF);
2854 rdata->rx_buf = NULL;
5330213c
SZ
2855 return error;
2856 }
2857
2858 /*
2859 * Create spare DMA map for rx buffers
2860 */
c39e3a1f
SZ
2861 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
2862 &rdata->rx_sparemap);
5330213c
SZ
2863 if (error) {
2864 device_printf(dev, "Unable to create spare RX DMA map\n");
c39e3a1f 2865 bus_dma_tag_destroy(rdata->rxtag);
323e5ecd
SZ
2866 kfree(rdata->rx_buf, M_DEVBUF);
2867 rdata->rx_buf = NULL;
5330213c
SZ
2868 return error;
2869 }
2870
2871 /*
2872 * Create DMA maps for rx buffers
2873 */
c39e3a1f 2874 for (i = 0; i < rdata->num_rx_desc; i++) {
323e5ecd 2875 rx_buffer = &rdata->rx_buf[i];
5330213c 2876
c39e3a1f 2877 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
5330213c
SZ
2878 &rx_buffer->map);
2879 if (error) {
2880 device_printf(dev, "Unable to create RX DMA map\n");
9f831fa8 2881 emx_destroy_rx_ring(rdata, i);
5330213c
SZ
2882 return error;
2883 }
2884 }
2885 return (0);
2886}
2887
c39e3a1f 2888static void
9f831fa8 2889emx_free_rx_ring(struct emx_rxdata *rdata)
c39e3a1f
SZ
2890{
2891 int i;
2892
2893 for (i = 0; i < rdata->num_rx_desc; i++) {
323e5ecd 2894 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i];
c39e3a1f
SZ
2895
2896 if (rx_buffer->m_head != NULL) {
2897 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2898 m_freem(rx_buffer->m_head);
2899 rx_buffer->m_head = NULL;
2900 }
2901 }
2902
2903 if (rdata->fmp != NULL)
2904 m_freem(rdata->fmp);
2905 rdata->fmp = NULL;
2906 rdata->lmp = NULL;
2907}
2908
d84018e9
SZ
2909static void
2910emx_free_tx_ring(struct emx_txdata *tdata)
2911{
2912 int i;
2913
2914 for (i = 0; i < tdata->num_tx_desc; i++) {
2915 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i];
2916
2917 if (tx_buffer->m_head != NULL) {
2918 bus_dmamap_unload(tdata->txtag, tx_buffer->map);
2919 m_freem(tx_buffer->m_head);
2920 tx_buffer->m_head = NULL;
2921 }
2922 }
2923
2924 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX;
2925
2926 tdata->csum_flags = 0;
2927 tdata->csum_lhlen = 0;
2928 tdata->csum_iphlen = 0;
2929 tdata->csum_thlen = 0;
2930 tdata->csum_mss = 0;
2931 tdata->csum_pktlen = 0;
2932
2933 tdata->tx_dd_head = 0;
2934 tdata->tx_dd_tail = 0;
2935 tdata->tx_nsegs = 0;
2936}
2937
5330213c 2938static int
9f831fa8 2939emx_init_rx_ring(struct emx_rxdata *rdata)
5330213c
SZ
2940{
2941 int i, error;
2942
2943 /* Reset descriptor ring */
235b9d30 2944 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc);
5330213c
SZ
2945
2946 /* Allocate new ones. */
c39e3a1f 2947 for (i = 0; i < rdata->num_rx_desc; i++) {
9f831fa8 2948 error = emx_newbuf(rdata, i, 1);
5330213c
SZ
2949 if (error)
2950 return (error);
2951 }
2952
2953 /* Setup our descriptor pointers */
c39e3a1f 2954 rdata->next_rx_desc_to_check = 0;
5330213c
SZ
2955
2956 return (0);
2957}
2958
2959static void
2960emx_init_rx_unit(struct emx_softc *sc)
2961{
2962 struct ifnet *ifp = &sc->arpcom.ac_if;
2963 uint64_t bus_addr;
2d0e5700 2964 uint32_t rctl, itr, rfctl;
3f939c23 2965 int i;
5330213c
SZ
2966
2967 /*
2968 * Make sure receives are disabled while setting
2969 * up the descriptor ring
2970 */
2971 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
2972 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2973
2974 /*
2975 * Set the interrupt throttling rate. Value is calculated
2976 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
2977 */
2d0e5700
SZ
2978 if (sc->int_throttle_ceil)
2979 itr = 1000000000 / 256 / sc->int_throttle_ceil;
2980 else
2981 itr = 0;
2982 emx_set_itr(sc, itr);
5330213c 2983
235b9d30
SZ
2984 /* Use extended RX descriptor */
2985 rfctl = E1000_RFCTL_EXTEN;
2986
5330213c 2987 /* Disable accelerated ackknowledge */
235b9d30
SZ
2988 if (sc->hw.mac.type == e1000_82574)
2989 rfctl |= E1000_RFCTL_ACK_DIS;
2990
2991 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl);
5330213c 2992
65c7a6af
SZ
2993 /*
2994 * Receive Checksum Offload for TCP and UDP
2995 *
2996 * Checksum offloading is also enabled if multiple receive
2997 * queue is to be supported, since we need it to figure out
2998 * packet type.
2999 */
13890b61
SZ
3000 if ((ifp->if_capenable & IFCAP_RXCSUM) ||
3001 sc->rx_ring_cnt > 1) {
2d0e5700
SZ
3002 uint32_t rxcsum;
3003
5330213c 3004 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
3f939c23
SZ
3005
3006 /*
3007 * NOTE:
3008 * PCSD must be enabled to enable multiple
3009 * receive queues.
3010 */
3011 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3012 E1000_RXCSUM_PCSD;
5330213c
SZ
3013 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
3014 }
3015
3f939c23 3016 /*
65c7a6af 3017 * Configure multiple receive queue (RSS)
3f939c23 3018 */
13890b61 3019 if (sc->rx_ring_cnt > 1) {
89d8e73d
SZ
3020 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE];
3021 uint32_t reta;
3022
13890b61
SZ
3023 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING,
3024 ("invalid number of RX ring (%d)", sc->rx_ring_cnt));
89d8e73d 3025
65c7a6af
SZ
3026 /*
3027 * NOTE:
3028 * When we reach here, RSS has already been disabled
3029 * in emx_stop(), so we could safely configure RSS key
3030 * and redirect table.
3031 */
3f939c23 3032
65c7a6af
SZ
3033 /*
3034 * Configure RSS key
3035 */
89d8e73d
SZ
3036 toeplitz_get_key(key, sizeof(key));
3037 for (i = 0; i < EMX_NRSSRK; ++i) {
3038 uint32_t rssrk;
3039
3040 rssrk = EMX_RSSRK_VAL(key, i);
3041 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
3042
3043 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk);
3044 }
3f939c23 3045
65c7a6af 3046 /*
89d8e73d
SZ
3047 * Configure RSS redirect table in following fashion:
3048 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
65c7a6af 3049 */
89d8e73d
SZ
3050 reta = 0;
3051 for (i = 0; i < EMX_RETA_SIZE; ++i) {
3052 uint32_t q;
3053
13890b61 3054 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT;
89d8e73d
SZ
3055 reta |= q << (8 * i);
3056 }
3057 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
3058
65c7a6af
SZ
3059 for (i = 0; i < EMX_NRETA; ++i)
3060 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta);
3f939c23 3061
65c7a6af
SZ
3062 /*
3063 * Enable multiple receive queues.
3064 * Enable IPv4 RSS standard hash functions.
3065 * Disable RSS interrupt.
3066 */
3067 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
3068 E1000_MRQC_ENABLE_RSS_2Q |
3069 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3070 E1000_MRQC_RSS_FIELD_IPV4);
3071 }
3f939c23 3072
5330213c
SZ
3073 /*
3074 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3075 * long latencies are observed, like Lenovo X60. This
3076 * change eliminates the problem, but since having positive
3077 * values in RDTR is a known source of problems on other
3078 * platforms another solution is being sought.
3079 */
3080 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) {
3081 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573);
3082 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573);
3083 }
3084
13890b61 3085 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2d0e5700
SZ
3086 struct emx_rxdata *rdata = &sc->rx_data[i];
3087
3088 /*
3089 * Setup the Base and Length of the Rx Descriptor Ring
3090 */
3091 bus_addr = rdata->rx_desc_paddr;
3092 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i),
3093 rdata->num_rx_desc * sizeof(emx_rxdesc_t));
3094 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i),
3095 (uint32_t)(bus_addr >> 32));
3096 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i),
3097 (uint32_t)bus_addr);
3098
3099 /*
3100 * Setup the HW Rx Head and Tail Descriptor Pointers
3101 */
3f939c23
SZ
3102 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0);
3103 E1000_WRITE_REG(&sc->hw, E1000_RDT(i),
3104 sc->rx_data[i].num_rx_desc - 1);
3105 }
3106
a5807b81
SZ
3107 if (sc->hw.mac.type >= e1000_pch2lan) {
3108 if (ifp->if_mtu > ETHERMTU)
3109 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE);
3110 else
3111 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE);
3112 }
3113
2d0e5700
SZ
3114 /* Setup the Receive Control Register */
3115 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3116 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3117 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC |
3118 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3119
3120 /* Make sure VLAN Filters are off */
3121 rctl &= ~E1000_RCTL_VFE;
3122
3123 /* Don't store bad paket */
3124 rctl &= ~E1000_RCTL_SBP;
3125
3126 /* MCLBYTES */
3127 rctl |= E1000_RCTL_SZ_2048;
3128
3129 if (ifp->if_mtu > ETHERMTU)
3130 rctl |= E1000_RCTL_LPE;
3131 else
3132 rctl &= ~E1000_RCTL_LPE;
3133
3f939c23
SZ
3134 /* Enable Receives */
3135 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
5330213c
SZ
3136}
3137
3138static void
9f831fa8 3139emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc)
5330213c 3140{
323e5ecd 3141 struct emx_rxbuf *rx_buffer;
5330213c
SZ
3142 int i;
3143
bdca134f 3144 /* Free Receive Descriptor ring */
235b9d30 3145 if (rdata->rx_desc) {
c39e3a1f 3146 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap);
235b9d30 3147 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc,
c39e3a1f
SZ
3148 rdata->rx_desc_dmap);
3149 bus_dma_tag_destroy(rdata->rx_desc_dtag);
a596084c 3150
235b9d30 3151 rdata->rx_desc = NULL;
a596084c 3152 }
bdca134f 3153
323e5ecd 3154 if (rdata->rx_buf == NULL)
5330213c
SZ
3155 return;
3156
3157 for (i = 0; i < ndesc; i++) {
323e5ecd 3158 rx_buffer = &rdata->rx_buf[i];
5330213c
SZ
3159
3160 KKASSERT(rx_buffer->m_head == NULL);
c39e3a1f 3161 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map);
5330213c 3162 }
c39e3a1f
SZ
3163 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap);
3164 bus_dma_tag_destroy(rdata->rxtag);
5330213c 3165
323e5ecd
SZ
3166 kfree(rdata->rx_buf, M_DEVBUF);
3167 rdata->rx_buf = NULL;
5330213c
SZ
3168}
3169
3170static void
9f831fa8 3171emx_rxeof(struct emx_rxdata *rdata, int count)
5330213c 3172{
9f831fa8 3173 struct ifnet *ifp = &rdata->sc->arpcom.ac_if;
235b9d30 3174 uint32_t staterr;
235b9d30 3175 emx_rxdesc_t *current_desc;
5330213c 3176 struct mbuf *mp;
ff37a356 3177 int i, cpuid = mycpuid;
5330213c 3178
c39e3a1f 3179 i = rdata->next_rx_desc_to_check;
235b9d30
SZ
3180 current_desc = &rdata->rx_desc[i];
3181 staterr = le32toh(current_desc->rxd_staterr);
5330213c 3182
235b9d30 3183 if (!(staterr & E1000_RXD_STAT_DD))
5330213c
SZ
3184 return;
3185
235b9d30 3186 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
9cc86e17 3187 struct pktinfo *pi = NULL, pi0;
235b9d30 3188 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i];
5330213c 3189 struct mbuf *m = NULL;
0acc29d6 3190 int eop, len;
5330213c
SZ
3191
3192 logif(pkt_receive);
3193
235b9d30 3194 mp = rx_buf->m_head;
5330213c
SZ
3195
3196 /*
3197 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3198 * needs to access the last received byte in the mbuf.
3199 */
235b9d30 3200 bus_dmamap_sync(rdata->rxtag, rx_buf->map,
5330213c
SZ
3201 BUS_DMASYNC_POSTREAD);
3202
0acc29d6 3203 len = le16toh(current_desc->rxd_length);
235b9d30 3204 if (staterr & E1000_RXD_STAT_EOP) {
5330213c
SZ
3205 count--;
3206 eop = 1;
5330213c
SZ
3207 } else {
3208 eop = 0;
5330213c
SZ
3209 }
3210
235b9d30
SZ
3211 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3212 uint16_t vlan = 0;
3f939c23 3213 uint32_t mrq, rss_hash;
235b9d30
SZ
3214
3215 /*
3216 * Save several necessary information,
3217 * before emx_newbuf() destroy it.
3218 */
3219 if ((staterr & E1000_RXD_STAT_VP) && eop)
3220 vlan = le16toh(current_desc->rxd_vlan);
5330213c 3221
3f939c23
SZ
3222 mrq = le32toh(current_desc->rxd_mrq);
3223 rss_hash = le32toh(current_desc->rxd_rss);
3224
9f831fa8 3225 EMX_RSS_DPRINTF(rdata->sc, 10,
3f939c23 3226 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n",
9f831fa8 3227 rdata->idx, mrq, rss_hash);
3f939c23 3228
9f831fa8 3229 if (emx_newbuf(rdata, i, 0) != 0) {
d40991ef 3230 IFNET_STAT_INC(ifp, iqdrops, 1);
5330213c
SZ
3231 goto discard;
3232 }
3233
3234 /* Assign correct length to the current fragment */
3235 mp->m_len = len;
3236
c39e3a1f 3237 if (rdata->fmp == NULL) {
5330213c 3238 mp->m_pkthdr.len = len;
c39e3a1f
SZ
3239 rdata->fmp = mp; /* Store the first mbuf */
3240 rdata->lmp = mp;
5330213c
SZ
3241 } else {
3242 /*
3243 * Chain mbuf's together
3244 */
c39e3a1f
SZ
3245 rdata->lmp->m_next = mp;
3246 rdata->lmp = rdata->lmp->m_next;
3247 rdata->fmp->m_pkthdr.len += len;
5330213c
SZ
3248 }
3249
3250 if (eop) {
c39e3a1f 3251 rdata->fmp->m_pkthdr.rcvif = ifp;
d40991ef 3252 IFNET_STAT_INC(ifp, ipackets, 1);
5330213c 3253
235b9d30
SZ
3254 if (ifp->if_capenable & IFCAP_RXCSUM)
3255 emx_rxcsum(staterr, rdata->fmp);
5330213c 3256
235b9d30 3257 if (staterr & E1000_RXD_STAT_VP) {
c39e3a1f 3258 rdata->fmp->m_pkthdr.ether_vlantag =
235b9d30 3259 vlan;
c39e3a1f 3260 rdata->fmp->m_flags |= M_VLANTAG;
5330213c 3261 }
c39e3a1f
SZ
3262 m = rdata->fmp;
3263 rdata->fmp = NULL;
3264 rdata->lmp = NULL;
3f939c23 3265
9cc86e17
SZ
3266 if (ifp->if_capenable & IFCAP_RSS) {
3267 pi = emx_rssinfo(m, &pi0, mrq,
3268 rss_hash, staterr);
3269 }
3f939c23
SZ
3270#ifdef EMX_RSS_DEBUG
3271 rdata->rx_pkts++;
3272#endif
5330213c
SZ
3273 }
3274 } else {
d40991ef 3275 IFNET_STAT_INC(ifp, ierrors, 1);
5330213c 3276discard:
235b9d30 3277 emx_setup_rxdesc(current_desc, rx_buf);
c39e3a1f
SZ
3278 if (rdata->fmp != NULL) {
3279 m_freem(rdata->fmp);
3280 rdata->fmp = NULL;
3281 rdata->lmp = NULL;
5330213c
SZ
3282 }
3283 m = NULL;
3284 }
3285
5330213c 3286 if (m != NULL)
be4134c6 3287 ifp->if_input(ifp, m, pi, cpuid);
5330213c
SZ
3288
3289 /* Advance our pointers to the next descriptor. */
c39e3a1f 3290 if (++i == rdata->num_rx_desc)
5330213c 3291 i = 0;
235b9d30
SZ
3292
3293 current_desc = &rdata->rx_desc[i];
3294 staterr = le32toh(current_desc->rxd_staterr);
5330213c 3295 }
c39e3a1f 3296 rdata->next_rx_desc_to_check = i;
5330213c 3297
3f939c23 3298 /* Advance the E1000's Receive Queue "Tail Pointer". */
5330213c 3299 if (--i < 0)
c39e3a1f 3300 i = rdata->num_rx_desc - 1;
9f831fa8 3301 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i);
5330213c
SZ
3302}
3303
5330213c
SZ
3304static void
3305emx_enable_intr(struct emx_softc *sc)
3306{
2d0e5700
SZ
3307 uint32_t ims_mask = IMS_ENABLE_MASK;
3308
6d435846 3309 lwkt_serialize_handler_enable(&sc->main_serialize);
2d0e5700
SZ
3310
3311#if 0
3312 if (sc->hw.mac.type == e1000_82574) {
3313 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK);
3314 ims_mask |= EM_MSIX_MASK;
3315 }
3316#endif
3317 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask);
5330213c
SZ
3318}
3319
3320static void
3321emx_disable_intr(struct emx_softc *sc)
3322{
2d0e5700
SZ
3323 if (sc->hw.mac.type == e1000_82574)
3324 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0);
5330213c 3325 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2d0e5700 3326
6d435846 3327 lwkt_serialize_handler_disable(&sc->main_serialize);
5330213c
SZ
3328}
3329
3330/*
3331 * Bit of a misnomer, what this really means is
3332 * to enable OS management of the system... aka
3333 * to disable special hardware management features
3334 */
3335static void
3336emx_get_mgmt(struct emx_softc *sc)
3337{
3338 /* A shared code workaround */
de0836d4 3339 if (sc->flags & EMX_FLAG_HAS_MGMT) {
5330213c
SZ
3340 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3341 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3342
3343 /* disable hardware interception of ARP */
3344 manc &= ~(E1000_MANC_ARP_EN);
3345
3346 /* enable receiving management packets to the host */
3347 manc |= E1000_MANC_EN_MNG2HOST;
3348#define E1000_MNG2HOST_PORT_623 (1 << 5)
3349#define E1000_MNG2HOST_PORT_664 (1 << 6)
3350 manc2h |= E1000_MNG2HOST_PORT_623;
3351 manc2h |= E1000_MNG2HOST_PORT_664;
3352 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3353
3354 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3355 }
3356}
3357
3358/*
3359 * Give control back to hardware management
3360 * controller if there is one.
3361 */
3362static void
3363emx_rel_mgmt(struct emx_softc *sc)
3364{
de0836d4 3365 if (sc->flags & EMX_FLAG_HAS_MGMT) {
5330213c
SZ
3366 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3367
3368 /* re-enable hardware interception of ARP */
3369 manc |= E1000_MANC_ARP_EN;
3370 manc &= ~E1000_MANC_EN_MNG2HOST;
3371
3372 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3373 }
3374}
3375
3376/*
3377 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3378 * For ASF and Pass Through versions of f/w this means that
3379 * the driver is loaded. For AMT version (only with 82573)
3380 * of the f/w this means that the network i/f is open.
3381 */
3382static void
3383emx_get_hw_control(struct emx_softc *sc)
3384{
5330213c 3385 /* Let firmware know the driver has taken over */
2d0e5700
SZ
3386 if (sc->hw.mac.type == e1000_82573) {
3387 uint32_t swsm;
3388
5330213c
SZ
3389 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3390 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3391 swsm | E1000_SWSM_DRV_LOAD);
2d0e5700
SZ
3392 } else {
3393 uint32_t ctrl_ext;
5330213c 3394
5330213c
SZ
3395 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3396 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3397 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
5330213c 3398 }
de0836d4 3399 sc->flags |= EMX_FLAG_HW_CTRL;
5330213c
SZ
3400}
3401
3402/*
3403 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3404 * For ASF and Pass Through versions of f/w this means that the
3405 * driver is no longer loaded. For AMT version (only with 82573)
3406 * of the f/w this means that the network i/f is closed.
3407 */
3408static void
3409emx_rel_hw_control(struct emx_softc *sc)
3410{
de0836d4 3411 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0)
2d0e5700 3412 return;
de0836d4 3413 sc->flags &= ~EMX_FLAG_HW_CTRL;
5330213c
SZ
3414
3415 /* Let firmware taken over control of h/w */
2d0e5700
SZ
3416 if (sc->hw.mac.type == e1000_82573) {
3417 uint32_t swsm;
3418
5330213c
SZ
3419 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3420 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3421 swsm & ~E1000_SWSM_DRV_LOAD);
2d0e5700
SZ
3422 } else {
3423 uint32_t ctrl_ext;
5330213c 3424
5330213c
SZ
3425 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3426 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3427 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
5330213c
SZ
3428 }
3429}
3430
3431static int
3432emx_is_valid_eaddr(const uint8_t *addr)
3433{
3434 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
3435
3436 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3437 return (FALSE);
3438
3439 return (TRUE);
3440}
3441
3442/*
3443 * Enable PCI Wake On Lan capability
3444 */
3445void
3446emx_enable_wol(device_t dev)
3447{
3448 uint16_t cap, status;
3449 uint8_t id;
3450
3451 /* First find the capabilities pointer*/
3452 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
3453
3454 /* Read the PM Capabilities */
3455 id = pci_read_config(dev, cap, 1);
3456 if (id != PCIY_PMG) /* Something wrong */
3457 return;
3458
3459 /*
3460 * OK, we have the power capabilities,
3461 * so now get the status register
3462 */
3463 cap += PCIR_POWER_STATUS;
3464 status = pci_read_config(dev, cap, 2);
3465 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3466 pci_write_config(dev, cap, status, 2);
3467}
3468
3469static void
3470emx_update_stats(struct emx_softc *sc)
3471{
3472 struct ifnet *ifp = &sc->arpcom.ac_if;