2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
4 * Copyright (c) 2001-2008, Intel Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 #include "opt_ifpoll.h"
70 #include <sys/param.h>
72 #include <sys/endian.h>
73 #include <sys/interrupt.h>
74 #include <sys/kernel.h>
76 #include <sys/malloc.h>
80 #include <sys/serialize.h>
81 #include <sys/serialize2.h>
82 #include <sys/socket.h>
83 #include <sys/sockio.h>
84 #include <sys/sysctl.h>
85 #include <sys/systm.h>
88 #include <net/ethernet.h>
90 #include <net/if_arp.h>
91 #include <net/if_dl.h>
92 #include <net/if_media.h>
93 #include <net/ifq_var.h>
94 #include <net/if_ringmap.h>
95 #include <net/toeplitz.h>
96 #include <net/toeplitz2.h>
97 #include <net/vlan/if_vlan_var.h>
98 #include <net/vlan/if_vlan_ether.h>
99 #include <net/if_poll.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/in.h>
103 #include <netinet/ip.h>
104 #include <netinet/tcp.h>
105 #include <netinet/udp.h>
107 #include <bus/pci/pcivar.h>
108 #include <bus/pci/pcireg.h>
110 #include <dev/netif/ig_hal/e1000_api.h>
111 #include <dev/netif/ig_hal/e1000_82571.h>
112 #include <dev/netif/ig_hal/e1000_dragonfly.h>
113 #include <dev/netif/emx/if_emx.h>
118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \
120 if (sc->rss_debug >= lvl) \
121 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
123 #else /* !EMX_RSS_DEBUG */
124 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
125 #endif /* EMX_RSS_DEBUG */
127 #define EMX_NAME "Intel(R) PRO/1000 "
129 #define EMX_DEVICE(id) \
130 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id }
131 #define EMX_DEVICE_NULL { 0, 0, NULL }
133 static const struct emx_device {
138 EMX_DEVICE(82571EB_COPPER),
139 EMX_DEVICE(82571EB_FIBER),
140 EMX_DEVICE(82571EB_SERDES),
141 EMX_DEVICE(82571EB_SERDES_DUAL),
142 EMX_DEVICE(82571EB_SERDES_QUAD),
143 EMX_DEVICE(82571EB_QUAD_COPPER),
144 EMX_DEVICE(82571EB_QUAD_COPPER_BP),
145 EMX_DEVICE(82571EB_QUAD_COPPER_LP),
146 EMX_DEVICE(82571EB_QUAD_FIBER),
147 EMX_DEVICE(82571PT_QUAD_COPPER),
149 EMX_DEVICE(82572EI_COPPER),
150 EMX_DEVICE(82572EI_FIBER),
151 EMX_DEVICE(82572EI_SERDES),
155 EMX_DEVICE(82573E_IAMT),
158 EMX_DEVICE(80003ES2LAN_COPPER_SPT),
159 EMX_DEVICE(80003ES2LAN_SERDES_SPT),
160 EMX_DEVICE(80003ES2LAN_COPPER_DPT),
161 EMX_DEVICE(80003ES2LAN_SERDES_DPT),
166 EMX_DEVICE(PCH_LPT_I217_LM),
167 EMX_DEVICE(PCH_LPT_I217_V),
168 EMX_DEVICE(PCH_LPTLP_I218_LM),
169 EMX_DEVICE(PCH_LPTLP_I218_V),
170 EMX_DEVICE(PCH_I218_LM2),
171 EMX_DEVICE(PCH_I218_V2),
172 EMX_DEVICE(PCH_I218_LM3),
173 EMX_DEVICE(PCH_I218_V3),
174 EMX_DEVICE(PCH_SPT_I219_LM),
175 EMX_DEVICE(PCH_SPT_I219_V),
176 EMX_DEVICE(PCH_SPT_I219_LM2),
177 EMX_DEVICE(PCH_SPT_I219_V2),
178 EMX_DEVICE(PCH_LBG_I219_LM3),
179 EMX_DEVICE(PCH_SPT_I219_LM4),
180 EMX_DEVICE(PCH_SPT_I219_V4),
181 EMX_DEVICE(PCH_SPT_I219_LM5),
182 EMX_DEVICE(PCH_SPT_I219_V5),
183 EMX_DEVICE(PCH_CNP_I219_LM6),
184 EMX_DEVICE(PCH_CNP_I219_V6),
185 EMX_DEVICE(PCH_CNP_I219_LM7),
186 EMX_DEVICE(PCH_CNP_I219_V7),
187 EMX_DEVICE(PCH_CNP_I219_LM8),
188 EMX_DEVICE(PCH_CNP_I219_V8),
189 EMX_DEVICE(PCH_CNP_I219_LM9),
190 EMX_DEVICE(PCH_CNP_I219_V9),
191 EMX_DEVICE(PCH_CNP_I219_LM10),
192 EMX_DEVICE(PCH_CNP_I219_V10),
193 EMX_DEVICE(PCH_CNP_I219_LM11),
194 EMX_DEVICE(PCH_CNP_I219_V11),
195 EMX_DEVICE(PCH_CNP_I219_LM12),
196 EMX_DEVICE(PCH_CNP_I219_V12),
197 EMX_DEVICE(PCH_CNP_I219_LM13),
198 EMX_DEVICE(PCH_CNP_I219_V13),
199 EMX_DEVICE(PCH_CNP_I219_LM14),
200 EMX_DEVICE(PCH_CNP_I219_V14),
202 /* required last entry */
206 static int emx_probe(device_t);
207 static int emx_attach(device_t);
208 static int emx_detach(device_t);
209 static int emx_shutdown(device_t);
210 static int emx_suspend(device_t);
211 static int emx_resume(device_t);
213 static void emx_init(void *);
214 static void emx_stop(struct emx_softc *);
215 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
216 static void emx_start(struct ifnet *, struct ifaltq_subque *);
218 static void emx_npoll(struct ifnet *, struct ifpoll_info *);
219 static void emx_npoll_status(struct ifnet *);
220 static void emx_npoll_tx(struct ifnet *, void *, int);
221 static void emx_npoll_rx(struct ifnet *, void *, int);
223 static void emx_watchdog(struct ifaltq_subque *);
224 static void emx_media_status(struct ifnet *, struct ifmediareq *);
225 static int emx_media_change(struct ifnet *);
226 static void emx_timer(void *);
227 static void emx_serialize(struct ifnet *, enum ifnet_serialize);
228 static void emx_deserialize(struct ifnet *, enum ifnet_serialize);
229 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize);
231 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize,
235 static void emx_intr(void *);
236 static void emx_intr_mask(void *);
237 static void emx_intr_body(struct emx_softc *, boolean_t);
238 static void emx_rxeof(struct emx_rxdata *, int);
239 static void emx_txeof(struct emx_txdata *);
240 static void emx_tx_collect(struct emx_txdata *, boolean_t);
241 static void emx_txgc_timer(void *);
242 static void emx_tx_purge(struct emx_softc *);
243 static void emx_enable_intr(struct emx_softc *);
244 static void emx_disable_intr(struct emx_softc *);
246 static int emx_dma_alloc(struct emx_softc *);
247 static void emx_dma_free(struct emx_softc *);
248 static void emx_init_tx_ring(struct emx_txdata *);
249 static int emx_init_rx_ring(struct emx_rxdata *);
250 static void emx_free_tx_ring(struct emx_txdata *);
251 static void emx_free_rx_ring(struct emx_rxdata *);
252 static int emx_create_tx_ring(struct emx_txdata *);
253 static int emx_create_rx_ring(struct emx_rxdata *);
254 static void emx_destroy_tx_ring(struct emx_txdata *, int);
255 static void emx_destroy_rx_ring(struct emx_rxdata *, int);
256 static int emx_newbuf(struct emx_rxdata *, int, int);
257 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *);
258 static int emx_txcsum(struct emx_txdata *, struct mbuf *,
259 uint32_t *, uint32_t *);
260 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **);
261 static int emx_tso_setup(struct emx_txdata *, struct mbuf *,
262 uint32_t *, uint32_t *);
263 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t);
265 static int emx_is_valid_eaddr(const uint8_t *);
266 static int emx_reset(struct emx_softc *);
267 static void emx_setup_ifp(struct emx_softc *);
268 static void emx_init_tx_unit(struct emx_softc *);
269 static void emx_init_rx_unit(struct emx_softc *);
270 static void emx_update_stats(struct emx_softc *);
271 static void emx_set_promisc(struct emx_softc *);
272 static void emx_disable_promisc(struct emx_softc *);
273 static void emx_set_multi(struct emx_softc *);
274 static void emx_update_link_status(struct emx_softc *);
275 static void emx_smartspeed(struct emx_softc *);
276 static void emx_set_itr(struct emx_softc *, uint32_t);
277 static void emx_disable_aspm(struct emx_softc *);
278 static void emx_flush_tx_ring(struct emx_softc *);
279 static void emx_flush_rx_ring(struct emx_softc *);
280 static void emx_flush_txrx_ring(struct emx_softc *);
282 static void emx_print_debug_info(struct emx_softc *);
283 static void emx_print_nvm_info(struct emx_softc *);
284 static void emx_print_hw_stats(struct emx_softc *);
286 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS);
287 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
288 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
289 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
290 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS);
291 static void emx_add_sysctl(struct emx_softc *);
293 static void emx_serialize_skipmain(struct emx_softc *);
294 static void emx_deserialize_skipmain(struct emx_softc *);
296 /* Management and WOL Support */
297 static void emx_get_mgmt(struct emx_softc *);
298 static void emx_rel_mgmt(struct emx_softc *);
299 static void emx_get_hw_control(struct emx_softc *);
300 static void emx_rel_hw_control(struct emx_softc *);
301 static void emx_enable_wol(device_t);
303 static device_method_t emx_methods[] = {
304 /* Device interface */
305 DEVMETHOD(device_probe, emx_probe),
306 DEVMETHOD(device_attach, emx_attach),
307 DEVMETHOD(device_detach, emx_detach),
308 DEVMETHOD(device_shutdown, emx_shutdown),
309 DEVMETHOD(device_suspend, emx_suspend),
310 DEVMETHOD(device_resume, emx_resume),
314 static driver_t emx_driver = {
317 sizeof(struct emx_softc),
320 static devclass_t emx_devclass;
322 DECLARE_DUMMY_MODULE(if_emx);
323 MODULE_DEPEND(emx, ig_hal, 1, 1, 1);
324 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL);
329 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR;
330 static int emx_rxd = EMX_DEFAULT_RXD;
331 static int emx_txd = EMX_DEFAULT_TXD;
332 static int emx_smart_pwr_down = 0;
333 static int emx_rxr = 0;
334 static int emx_txr = 1;
336 /* Controls whether promiscuous also shows bad packets */
337 static int emx_debug_sbp = 0;
339 static int emx_82573_workaround = 1;
340 static int emx_msi_enable = 1;
342 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_NONE;
344 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil);
345 TUNABLE_INT("hw.emx.rxd", &emx_rxd);
346 TUNABLE_INT("hw.emx.rxr", &emx_rxr);
347 TUNABLE_INT("hw.emx.txd", &emx_txd);
348 TUNABLE_INT("hw.emx.txr", &emx_txr);
349 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down);
350 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp);
351 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround);
352 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable);
353 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl));
355 /* Global used in WOL setup with multiport cards */
356 static int emx_global_quad_port_a = 0;
358 /* Set this to one to display debug statistics */
359 static int emx_display_debug_stats = 0;
361 #if !defined(KTR_IF_EMX)
362 #define KTR_IF_EMX KTR_ALL
364 KTR_INFO_MASTER(if_emx);
365 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin");
366 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end");
367 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet");
368 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet");
369 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean");
370 #define logif(name) KTR_LOG(if_emx_ ## name)
373 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf)
375 rxd->rxd_bufaddr = htole64(rxbuf->paddr);
376 /* DD bit must be cleared */
377 rxd->rxd_staterr = 0;
381 emx_free_txbuf(struct emx_txdata *tdata, struct emx_txbuf *tx_buffer)
384 KKASSERT(tx_buffer->m_head != NULL);
385 KKASSERT(tdata->tx_nmbuf > 0);
388 bus_dmamap_unload(tdata->txtag, tx_buffer->map);
389 m_freem(tx_buffer->m_head);
390 tx_buffer->m_head = NULL;
394 emx_tx_intr(struct emx_txdata *tdata)
398 if (!ifsq_is_empty(tdata->ifsq))
399 ifsq_devstart(tdata->ifsq);
403 emx_try_txgc(struct emx_txdata *tdata, int16_t dec)
406 if (tdata->tx_running > 0) {
407 tdata->tx_running -= dec;
408 if (tdata->tx_running <= 0 && tdata->tx_nmbuf &&
409 tdata->num_tx_desc_avail < tdata->num_tx_desc &&
410 tdata->num_tx_desc_avail + tdata->tx_intr_nsegs >
412 emx_tx_collect(tdata, TRUE);
417 emx_txgc_timer(void *xtdata)
419 struct emx_txdata *tdata = xtdata;
420 struct ifnet *ifp = &tdata->sc->arpcom.ac_if;
422 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) !=
423 (IFF_RUNNING | IFF_UP))
426 if (!lwkt_serialize_try(&tdata->tx_serialize))
429 if ((ifp->if_flags & (IFF_RUNNING | IFF_UP | IFF_NPOLLING)) !=
430 (IFF_RUNNING | IFF_UP)) {
431 lwkt_serialize_exit(&tdata->tx_serialize);
434 emx_try_txgc(tdata, EMX_TX_RUNNING_DEC);
436 lwkt_serialize_exit(&tdata->tx_serialize);
438 callout_reset(&tdata->tx_gc_timer, 1, emx_txgc_timer, tdata);
442 emx_rxcsum(uint32_t staterr, struct mbuf *mp)
444 /* Ignore Checksum bit is set */
445 if (staterr & E1000_RXD_STAT_IXSM)
448 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
450 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
452 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
453 E1000_RXD_STAT_TCPCS) {
454 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
456 CSUM_FRAG_NOT_CHECKED;
457 mp->m_pkthdr.csum_data = htons(0xffff);
461 static __inline struct pktinfo *
462 emx_rssinfo(struct mbuf *m, struct pktinfo *pi,
463 uint32_t mrq, uint32_t hash, uint32_t staterr)
465 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) {
466 case EMX_RXDMRQ_IPV4_TCP:
467 pi->pi_netisr = NETISR_IP;
469 pi->pi_l3proto = IPPROTO_TCP;
472 case EMX_RXDMRQ_IPV6_TCP:
473 pi->pi_netisr = NETISR_IPV6;
475 pi->pi_l3proto = IPPROTO_TCP;
478 case EMX_RXDMRQ_IPV4:
479 if (staterr & E1000_RXD_STAT_IXSM)
483 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
484 E1000_RXD_STAT_TCPCS) {
485 pi->pi_netisr = NETISR_IP;
487 pi->pi_l3proto = IPPROTO_UDP;
495 m_sethash(m, toeplitz_hash(hash));
500 emx_probe(device_t dev)
502 const struct emx_device *d;
505 vid = pci_get_vendor(dev);
506 did = pci_get_device(dev);
508 for (d = emx_devices; d->desc != NULL; ++d) {
509 if (vid == d->vid && did == d->did) {
510 device_set_desc(dev, d->desc);
511 device_set_async_attach(dev, TRUE);
519 emx_attach(device_t dev)
521 struct emx_softc *sc = device_get_softc(dev);
522 int error = 0, i, throttle, msi_enable;
523 int tx_ring_max, ring_cnt;
525 uint16_t eeprom_data, device_id, apme_mask;
526 driver_intr_t *intr_func;
527 char flowctrl[IFM_ETH_FC_STRLEN];
532 for (i = 0; i < EMX_NRX_RING; ++i) {
533 sc->rx_data[i].sc = sc;
534 sc->rx_data[i].idx = i;
540 for (i = 0; i < EMX_NTX_RING; ++i) {
541 sc->tx_data[i].sc = sc;
542 sc->tx_data[i].idx = i;
543 callout_init_mp(&sc->tx_data[i].tx_gc_timer);
547 * Initialize serializers
549 lwkt_serialize_init(&sc->main_serialize);
550 for (i = 0; i < EMX_NTX_RING; ++i)
551 lwkt_serialize_init(&sc->tx_data[i].tx_serialize);
552 for (i = 0; i < EMX_NRX_RING; ++i)
553 lwkt_serialize_init(&sc->rx_data[i].rx_serialize);
556 * Initialize serializer array
560 KKASSERT(i < EMX_NSERIALIZE);
561 sc->serializes[i++] = &sc->main_serialize;
563 KKASSERT(i < EMX_NSERIALIZE);
564 sc->serializes[i++] = &sc->tx_data[0].tx_serialize;
565 KKASSERT(i < EMX_NSERIALIZE);
566 sc->serializes[i++] = &sc->tx_data[1].tx_serialize;
568 KKASSERT(i < EMX_NSERIALIZE);
569 sc->serializes[i++] = &sc->rx_data[0].rx_serialize;
570 KKASSERT(i < EMX_NSERIALIZE);
571 sc->serializes[i++] = &sc->rx_data[1].rx_serialize;
573 KKASSERT(i == EMX_NSERIALIZE);
575 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK,
576 emx_media_change, emx_media_status);
577 callout_init_mp(&sc->timer);
579 sc->dev = sc->osdep.dev = dev;
582 * Determine hardware and mac type
584 sc->hw.vendor_id = pci_get_vendor(dev);
585 sc->hw.device_id = pci_get_device(dev);
586 sc->hw.revision_id = pci_get_revid(dev);
587 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev);
588 sc->hw.subsystem_device_id = pci_get_subdevice(dev);
590 if (e1000_set_mac_type(&sc->hw))
593 /* Enable bus mastering */
594 pci_enable_busmaster(dev);
599 sc->memory_rid = EMX_BAR_MEM;
600 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
601 &sc->memory_rid, RF_ACTIVE);
602 if (sc->memory == NULL) {
603 device_printf(dev, "Unable to allocate bus resource: memory\n");
607 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
608 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
610 /* XXX This is quite goofy, it is not actually used */
611 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
614 * Don't enable MSI-X on 82574, see:
615 * 82574 specification update errata #15
617 * Don't enable MSI on 82571/82572, see:
618 * 82571/82572 specification update errata #63
620 msi_enable = emx_msi_enable;
622 (sc->hw.mac.type == e1000_82571 ||
623 sc->hw.mac.type == e1000_82572))
629 sc->intr_type = pci_alloc_1intr(dev, msi_enable,
630 &sc->intr_rid, &intr_flags);
632 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) {
635 unshared = device_getenv_int(dev, "irq.unshared", 0);
637 sc->flags |= EMX_FLAG_SHARED_INTR;
639 device_printf(dev, "IRQ shared\n");
641 intr_flags &= ~RF_SHAREABLE;
643 device_printf(dev, "IRQ unshared\n");
647 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid,
649 if (sc->intr_res == NULL) {
650 device_printf(dev, "Unable to allocate bus resource: %s\n",
651 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr");
653 /* Retry with MSI. */
655 sc->flags &= ~EMX_FLAG_SHARED_INTR;
662 /* Save PCI command register for Shared Code */
663 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
664 sc->hw.back = &sc->osdep;
667 * For I217/I218, we need to map the flash memory and this
668 * must happen after the MAC is identified.
670 if (sc->hw.mac.type == e1000_pch_lpt) {
671 sc->flash_rid = EMX_BAR_FLASH;
673 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
674 &sc->flash_rid, RF_ACTIVE);
675 if (sc->flash == NULL) {
676 device_printf(dev, "Mapping of Flash failed\n");
680 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash);
681 sc->osdep.flash_bus_space_handle =
682 rman_get_bushandle(sc->flash);
685 * This is used in the shared code
686 * XXX this goof is actually not used.
688 sc->hw.flash_address = (uint8_t *)sc->flash;
689 } else if (sc->hw.mac.type >= e1000_pch_spt) {
691 * In the new SPT device flash is not a seperate BAR,
692 * rather it is also in BAR0, so use the same tag and
693 * an offset handle for the FLASH read/write macros
694 * in the shared code.
696 sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag;
697 sc->osdep.flash_bus_space_handle =
698 sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR;
701 /* Do Shared Code initialization */
702 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
703 device_printf(dev, "Setup of Shared code failed\n");
707 e1000_get_bus_info(&sc->hw);
709 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
710 sc->hw.phy.autoneg_wait_to_complete = FALSE;
711 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
714 * Interrupt throttle rate
716 throttle = device_getenv_int(dev, "int_throttle_ceil",
717 emx_int_throttle_ceil);
719 sc->int_throttle_ceil = 0;
722 throttle = EMX_DEFAULT_ITR;
724 /* Recalculate the tunable value to get the exact frequency. */
725 throttle = 1000000000 / 256 / throttle;
727 /* Upper 16bits of ITR is reserved and should be zero */
728 if (throttle & 0xffff0000)
729 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR;
731 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
734 e1000_init_script_state_82541(&sc->hw, TRUE);
735 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE);
738 if (sc->hw.phy.media_type == e1000_media_type_copper) {
739 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES;
740 sc->hw.phy.disable_polarity_correction = FALSE;
741 sc->hw.phy.ms_type = EMX_MASTER_SLAVE;
744 /* Set the frame limits assuming standard ethernet sized frames. */
745 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
747 /* This controls when hardware reports transmit completion status. */
748 sc->hw.mac.report_tx_early = 1;
751 * Calculate # of RX/TX rings
753 ring_cnt = device_getenv_int(dev, "rxr", emx_rxr);
754 sc->rx_rmap = if_ringmap_alloc(dev, ring_cnt, EMX_NRX_RING);
757 if (sc->hw.mac.type == e1000_82571 ||
758 sc->hw.mac.type == e1000_82572 ||
759 sc->hw.mac.type == e1000_80003es2lan ||
760 sc->hw.mac.type == e1000_pch_lpt ||
761 sc->hw.mac.type == e1000_pch_spt ||
762 sc->hw.mac.type == e1000_pch_cnp ||
763 sc->hw.mac.type == e1000_82574)
764 tx_ring_max = EMX_NTX_RING;
765 ring_cnt = device_getenv_int(dev, "txr", emx_txr);
766 sc->tx_rmap = if_ringmap_alloc(dev, ring_cnt, tx_ring_max);
768 if_ringmap_match(dev, sc->rx_rmap, sc->tx_rmap);
769 sc->rx_ring_cnt = if_ringmap_count(sc->rx_rmap);
770 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap);
772 /* Allocate RX/TX rings' busdma(9) stuffs */
773 error = emx_dma_alloc(sc);
777 /* Allocate multicast array memory. */
778 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX,
781 /* Indicate SOL/IDER usage */
782 if (e1000_check_reset_block(&sc->hw)) {
784 "PHY reset is blocked due to SOL/IDER session.\n");
787 /* Disable EEE on I217/I218 */
788 sc->hw.dev_spec.ich8lan.eee_disable = 1;
791 * Start from a known state, this is important in reading the
792 * nvm and mac from that.
794 e1000_reset_hw(&sc->hw);
796 /* Make sure we have a good EEPROM before we read from it */
797 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
799 * Some PCI-E parts fail the first check due to
800 * the link being in sleep state, call it again,
801 * if it fails a second time its a real issue.
803 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
805 "The EEPROM Checksum Is Not Valid\n");
811 /* Copy the permanent MAC address out of the EEPROM */
812 if (e1000_read_mac_addr(&sc->hw) < 0) {
813 device_printf(dev, "EEPROM read error while reading MAC"
818 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) {
819 device_printf(dev, "Invalid MAC address\n");
824 /* Disable ULP support */
825 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE);
827 /* Determine if we have to control management hardware */
828 if (e1000_enable_mng_pass_thru(&sc->hw))
829 sc->flags |= EMX_FLAG_HAS_MGMT;
834 apme_mask = EMX_EEPROM_APME;
836 switch (sc->hw.mac.type) {
838 sc->flags |= EMX_FLAG_HAS_AMT;
843 case e1000_80003es2lan:
844 if (sc->hw.bus.func == 1) {
845 e1000_read_nvm(&sc->hw,
846 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
848 e1000_read_nvm(&sc->hw,
849 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
856 apme_mask = E1000_WUC_APME;
857 sc->flags |= EMX_FLAG_HAS_AMT;
858 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC);
862 e1000_read_nvm(&sc->hw,
863 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
866 if (eeprom_data & apme_mask)
867 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
870 * We have the eeprom settings, now apply the special cases
871 * where the eeprom may be wrong or the board won't support
872 * wake on lan on a particular port
874 device_id = pci_get_device(dev);
876 case E1000_DEV_ID_82571EB_FIBER:
878 * Wake events only supported on port A for dual fiber
879 * regardless of eeprom setting
881 if (E1000_READ_REG(&sc->hw, E1000_STATUS) &
886 case E1000_DEV_ID_82571EB_QUAD_COPPER:
887 case E1000_DEV_ID_82571EB_QUAD_FIBER:
888 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
889 /* if quad port sc, disable WoL on all but port A */
890 if (emx_global_quad_port_a != 0)
892 /* Reset for multiple quad port adapters */
893 if (++emx_global_quad_port_a == 4)
894 emx_global_quad_port_a = 0;
898 /* XXX disable wol */
901 /* Initialized #of TX rings to use. */
902 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE);
904 /* Setup flow control. */
905 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl),
907 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl);
909 /* Setup OS specific network interface */
912 /* Add sysctl tree, must after em_setup_ifp() */
915 /* Reset the hardware */
916 error = emx_reset(sc);
919 * Some 82573 parts fail the first reset, call it again,
920 * if it fails a second time its a real issue.
922 error = emx_reset(sc);
924 device_printf(dev, "Unable to reset the hardware\n");
925 ether_ifdetach(&sc->arpcom.ac_if);
930 /* Initialize statistics */
931 emx_update_stats(sc);
933 sc->hw.mac.get_link_status = 1;
934 emx_update_link_status(sc);
936 /* Non-AMT based hardware can now take control from firmware */
937 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
939 emx_get_hw_control(sc);
942 * Missing Interrupt Following ICR read:
944 * 82571/82572 specification update errata #76
945 * 82573 specification update errata #31
946 * 82574 specification update errata #12
948 intr_func = emx_intr;
949 if ((sc->flags & EMX_FLAG_SHARED_INTR) &&
950 (sc->hw.mac.type == e1000_82571 ||
951 sc->hw.mac.type == e1000_82572 ||
952 sc->hw.mac.type == e1000_82573 ||
953 sc->hw.mac.type == e1000_82574))
954 intr_func = emx_intr_mask;
956 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc,
957 &sc->intr_tag, &sc->main_serialize);
959 device_printf(dev, "Failed to register interrupt handler");
960 ether_ifdetach(&sc->arpcom.ac_if);
970 emx_detach(device_t dev)
972 struct emx_softc *sc = device_get_softc(dev);
974 if (device_is_attached(dev)) {
975 struct ifnet *ifp = &sc->arpcom.ac_if;
977 ifnet_serialize_all(ifp);
981 e1000_phy_hw_reset(&sc->hw);
984 emx_rel_hw_control(sc);
987 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
988 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
992 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag);
994 ifnet_deserialize_all(ifp);
997 } else if (sc->memory != NULL) {
998 emx_rel_hw_control(sc);
1001 ifmedia_removeall(&sc->media);
1002 bus_generic_detach(dev);
1004 if (sc->intr_res != NULL) {
1005 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
1009 if (sc->intr_type == PCI_INTR_TYPE_MSI)
1010 pci_release_msi(dev);
1012 if (sc->memory != NULL) {
1013 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid,
1017 if (sc->flash != NULL) {
1018 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid,
1024 if (sc->mta != NULL)
1025 kfree(sc->mta, M_DEVBUF);
1027 if (sc->rx_rmap != NULL)
1028 if_ringmap_free(sc->rx_rmap);
1029 if (sc->tx_rmap != NULL)
1030 if_ringmap_free(sc->tx_rmap);
1036 emx_shutdown(device_t dev)
1038 return emx_suspend(dev);
1042 emx_suspend(device_t dev)
1044 struct emx_softc *sc = device_get_softc(dev);
1045 struct ifnet *ifp = &sc->arpcom.ac_if;
1047 ifnet_serialize_all(ifp);
1052 emx_rel_hw_control(sc);
1055 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
1056 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
1057 emx_enable_wol(dev);
1060 ifnet_deserialize_all(ifp);
1062 return bus_generic_suspend(dev);
1066 emx_resume(device_t dev)
1068 struct emx_softc *sc = device_get_softc(dev);
1069 struct ifnet *ifp = &sc->arpcom.ac_if;
1072 ifnet_serialize_all(ifp);
1076 for (i = 0; i < sc->tx_ring_inuse; ++i)
1077 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1079 ifnet_deserialize_all(ifp);
1081 return bus_generic_resume(dev);
1085 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1087 struct emx_softc *sc = ifp->if_softc;
1088 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1089 struct mbuf *m_head;
1090 int idx = -1, nsegs = 0;
1092 KKASSERT(tdata->ifsq == ifsq);
1093 ASSERT_SERIALIZED(&tdata->tx_serialize);
1095 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
1098 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) {
1103 while (!ifsq_is_empty(ifsq)) {
1104 /* Now do we at least have a minimal? */
1105 if (EMX_IS_OACTIVE(tdata)) {
1106 emx_tx_collect(tdata, FALSE);
1107 if (EMX_IS_OACTIVE(tdata)) {
1108 ifsq_set_oactive(ifsq);
1114 m_head = ifsq_dequeue(ifsq);
1118 if (emx_encap(tdata, &m_head, &nsegs, &idx)) {
1119 IFNET_STAT_INC(ifp, oerrors, 1);
1120 emx_tx_collect(tdata, FALSE);
1125 * TX interrupt are aggressively aggregated, so increasing
1126 * opackets at TX interrupt time will make the opackets
1127 * statistics vastly inaccurate; we do the opackets increment
1130 IFNET_STAT_INC(ifp, opackets, 1);
1132 if (nsegs >= tdata->tx_wreg_nsegs) {
1133 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1138 /* Send a copy of the frame to the BPF listener */
1139 ETHER_BPF_MTAP(ifp, m_head);
1141 /* Set timeout in case hardware has problems transmitting. */
1142 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT);
1145 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx);
1146 tdata->tx_running = EMX_TX_RUNNING;
1150 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
1152 struct emx_softc *sc = ifp->if_softc;
1153 struct ifreq *ifr = (struct ifreq *)data;
1154 uint16_t eeprom_data = 0;
1155 int max_frame_size, mask, reinit;
1158 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1162 switch (sc->hw.mac.type) {
1165 * 82573 only supports jumbo frames
1166 * if ASPM is disabled.
1168 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1,
1170 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1171 max_frame_size = ETHER_MAX_LEN;
1176 /* Limit Jumbo Frame size */
1183 case e1000_80003es2lan:
1184 max_frame_size = 9234;
1188 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1191 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1197 ifp->if_mtu = ifr->ifr_mtu;
1198 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
1201 if (ifp->if_flags & IFF_RUNNING)
1206 if (ifp->if_flags & IFF_UP) {
1207 if ((ifp->if_flags & IFF_RUNNING)) {
1208 if ((ifp->if_flags ^ sc->if_flags) &
1209 (IFF_PROMISC | IFF_ALLMULTI)) {
1210 emx_disable_promisc(sc);
1211 emx_set_promisc(sc);
1216 } else if (ifp->if_flags & IFF_RUNNING) {
1219 sc->if_flags = ifp->if_flags;
1224 if (ifp->if_flags & IFF_RUNNING) {
1225 emx_disable_intr(sc);
1227 #ifdef IFPOLL_ENABLE
1228 if (!(ifp->if_flags & IFF_NPOLLING))
1230 emx_enable_intr(sc);
1235 /* Check SOL/IDER usage */
1236 if (e1000_check_reset_block(&sc->hw)) {
1237 device_printf(sc->dev, "Media change is"
1238 " blocked due to SOL/IDER session.\n");
1244 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1249 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1250 if (mask & IFCAP_RXCSUM) {
1251 ifp->if_capenable ^= IFCAP_RXCSUM;
1254 if (mask & IFCAP_VLAN_HWTAGGING) {
1255 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1258 if (mask & IFCAP_TXCSUM) {
1259 ifp->if_capenable ^= IFCAP_TXCSUM;
1260 if (ifp->if_capenable & IFCAP_TXCSUM)
1261 ifp->if_hwassist |= EMX_CSUM_FEATURES;
1263 ifp->if_hwassist &= ~EMX_CSUM_FEATURES;
1265 if (mask & IFCAP_TSO) {
1266 ifp->if_capenable ^= IFCAP_TSO;
1267 if (ifp->if_capenable & IFCAP_TSO)
1268 ifp->if_hwassist |= CSUM_TSO;
1270 ifp->if_hwassist &= ~CSUM_TSO;
1272 if (mask & IFCAP_RSS)
1273 ifp->if_capenable ^= IFCAP_RSS;
1274 if (reinit && (ifp->if_flags & IFF_RUNNING))
1279 error = ether_ioctl(ifp, command, data);
1286 emx_watchdog(struct ifaltq_subque *ifsq)
1288 struct emx_txdata *tdata = ifsq_get_priv(ifsq);
1289 struct ifnet *ifp = ifsq_get_ifp(ifsq);
1290 struct emx_softc *sc = ifp->if_softc;
1293 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1296 * The timer is set to 5 every time start queues a packet.
1297 * Then txeof keeps resetting it as long as it cleans at
1298 * least one descriptor.
1299 * Finally, anytime all descriptors are clean the timer is
1303 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) ==
1304 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) {
1306 * If we reach here, all TX jobs are completed and
1307 * the TX engine should have been idled for some time.
1308 * We don't need to call ifsq_devstart_sched() here.
1310 ifsq_clr_oactive(ifsq);
1311 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
1316 * If we are in this routine because of pause frames, then
1317 * don't reset the hardware.
1319 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) {
1320 ifsq_watchdog_set_count(&tdata->tx_watchdog, EMX_TX_TIMEOUT);
1324 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx);
1326 IFNET_STAT_INC(ifp, oerrors, 1);
1329 for (i = 0; i < sc->tx_ring_inuse; ++i)
1330 ifsq_devstart_sched(sc->tx_data[i].ifsq);
1336 struct emx_softc *sc = xsc;
1337 struct ifnet *ifp = &sc->arpcom.ac_if;
1338 device_t dev = sc->dev;
1342 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1346 /* Get the latest mac address, User can use a LAA */
1347 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
1349 /* Put the address into the Receive Address Array */
1350 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1353 * With the 82571 sc, RAR[0] may be overwritten
1354 * when the other port is reset, we make a duplicate
1355 * in RAR[14] for that eventuality, this assures
1356 * the interface continues to function.
1358 if (sc->hw.mac.type == e1000_82571) {
1359 e1000_set_laa_state_82571(&sc->hw, TRUE);
1360 e1000_rar_set(&sc->hw, sc->hw.mac.addr,
1361 E1000_RAR_ENTRIES - 1);
1364 /* Initialize the hardware */
1365 if (emx_reset(sc)) {
1366 device_printf(dev, "Unable to reset the hardware\n");
1367 /* XXX emx_stop()? */
1370 emx_update_link_status(sc);
1372 /* Setup VLAN support, basic and offload if available */
1373 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1375 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1378 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL);
1379 ctrl |= E1000_CTRL_VME;
1380 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl);
1383 /* Configure for OS presence */
1387 #ifdef IFPOLL_ENABLE
1388 if (ifp->if_flags & IFF_NPOLLING)
1391 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling);
1392 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_inuse);
1394 /* Prepare transmit descriptors and buffers */
1395 for (i = 0; i < sc->tx_ring_inuse; ++i)
1396 emx_init_tx_ring(&sc->tx_data[i]);
1397 emx_init_tx_unit(sc);
1399 /* Setup Multicast table */
1402 /* Prepare receive descriptors and buffers */
1403 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1404 if (emx_init_rx_ring(&sc->rx_data[i])) {
1406 "Could not setup receive structures\n");
1411 emx_init_rx_unit(sc);
1413 /* Don't lose promiscuous settings */
1414 emx_set_promisc(sc);
1416 /* Reset hardware counters */
1417 e1000_clear_hw_cntrs_base_generic(&sc->hw);
1419 /* MSI/X configuration for 82574 */
1420 if (sc->hw.mac.type == e1000_82574) {
1423 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
1424 tmp |= E1000_CTRL_EXT_PBA_CLR;
1425 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
1428 * Set the IVAR - interrupt vector routing.
1429 * Each nibble represents a vector, high bit
1430 * is enable, other 3 bits are the MSIX table
1431 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1432 * Link (other) to 2, hence the magic number.
1434 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908);
1438 * Only enable interrupts if we are not polling, make sure
1439 * they are off otherwise.
1442 emx_disable_intr(sc);
1444 emx_enable_intr(sc);
1446 /* AMT based hardware can now take control from firmware */
1447 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) ==
1448 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT))
1449 emx_get_hw_control(sc);
1451 ifp->if_flags |= IFF_RUNNING;
1452 for (i = 0; i < sc->tx_ring_inuse; ++i) {
1453 struct emx_txdata *tdata = &sc->tx_data[i];
1455 ifsq_clr_oactive(tdata->ifsq);
1456 ifsq_watchdog_start(&tdata->tx_watchdog);
1458 callout_reset_bycpu(&tdata->tx_gc_timer, 1,
1459 emx_txgc_timer, tdata, ifsq_get_cpuid(tdata->ifsq));
1462 callout_reset(&sc->timer, hz, emx_timer, sc);
1468 emx_intr_body(xsc, TRUE);
1472 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted)
1474 struct ifnet *ifp = &sc->arpcom.ac_if;
1478 ASSERT_SERIALIZED(&sc->main_serialize);
1480 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
1482 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) {
1488 * XXX: some laptops trigger several spurious interrupts
1489 * on emx(4) when in the resume cycle. The ICR register
1490 * reports all-ones value in this case. Processing such
1491 * interrupts would lead to a freeze. I don't know why.
1493 if (reg_icr == 0xffffffff) {
1498 if (ifp->if_flags & IFF_RUNNING) {
1500 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
1503 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1504 lwkt_serialize_enter(
1505 &sc->rx_data[i].rx_serialize);
1506 emx_rxeof(&sc->rx_data[i], -1);
1507 lwkt_serialize_exit(
1508 &sc->rx_data[i].rx_serialize);
1511 if (reg_icr & E1000_ICR_TXDW) {
1512 struct emx_txdata *tdata = &sc->tx_data[0];
1514 lwkt_serialize_enter(&tdata->tx_serialize);
1516 lwkt_serialize_exit(&tdata->tx_serialize);
1520 /* Link status change */
1521 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1522 emx_serialize_skipmain(sc);
1524 callout_stop(&sc->timer);
1525 sc->hw.mac.get_link_status = 1;
1526 emx_update_link_status(sc);
1528 /* Deal with TX cruft when link lost */
1531 callout_reset(&sc->timer, hz, emx_timer, sc);
1533 emx_deserialize_skipmain(sc);
1536 if (reg_icr & E1000_ICR_RXO)
1543 emx_intr_mask(void *xsc)
1545 struct emx_softc *sc = xsc;
1547 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
1550 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1551 * so don't check it.
1553 emx_intr_body(sc, FALSE);
1554 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
1558 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1560 struct emx_softc *sc = ifp->if_softc;
1562 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1564 emx_update_link_status(sc);
1566 ifmr->ifm_status = IFM_AVALID;
1567 ifmr->ifm_active = IFM_ETHER;
1569 if (!sc->link_active) {
1570 if (sc->hw.mac.autoneg)
1571 ifmr->ifm_active |= IFM_NONE;
1573 ifmr->ifm_active |= sc->media.ifm_media;
1577 ifmr->ifm_status |= IFM_ACTIVE;
1578 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1579 ifmr->ifm_active |= sc->ifm_flowctrl;
1581 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1582 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1583 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1585 switch (sc->link_speed) {
1587 ifmr->ifm_active |= IFM_10_T;
1590 ifmr->ifm_active |= IFM_100_TX;
1594 ifmr->ifm_active |= IFM_1000_T;
1597 if (sc->link_duplex == FULL_DUPLEX)
1598 ifmr->ifm_active |= IFM_FDX;
1600 ifmr->ifm_active |= IFM_HDX;
1602 if (ifmr->ifm_active & IFM_FDX)
1603 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode);
1607 emx_media_change(struct ifnet *ifp)
1609 struct emx_softc *sc = ifp->if_softc;
1610 struct ifmedia *ifm = &sc->media;
1612 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1614 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1617 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1619 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1620 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT;
1625 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG;
1626 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1630 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1631 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1633 if (IFM_OPTIONS(ifm->ifm_media) &
1634 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1636 if_printf(ifp, "Flow control is not "
1637 "allowed for half-duplex\n");
1641 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1643 sc->hw.mac.autoneg = FALSE;
1644 sc->hw.phy.autoneg_advertised = 0;
1648 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) {
1649 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1651 if (IFM_OPTIONS(ifm->ifm_media) &
1652 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) {
1654 if_printf(ifp, "Flow control is not "
1655 "allowed for half-duplex\n");
1659 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1661 sc->hw.mac.autoneg = FALSE;
1662 sc->hw.phy.autoneg_advertised = 0;
1667 if_printf(ifp, "Unsupported media type %d\n",
1668 IFM_SUBTYPE(ifm->ifm_media));
1672 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK;
1674 if (ifp->if_flags & IFF_RUNNING)
1681 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp,
1682 int *segs_used, int *idx)
1684 bus_dma_segment_t segs[EMX_MAX_SCATTER];
1686 struct emx_txbuf *tx_buffer, *tx_buffer_mapped;
1687 struct e1000_tx_desc *ctxd = NULL;
1688 struct mbuf *m_head = *m_headp;
1689 uint32_t txd_upper, txd_lower, cmd = 0;
1690 int maxsegs, nsegs, i, j, first, last = 0, error;
1692 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1693 error = emx_tso_pullup(tdata, m_headp);
1699 txd_upper = txd_lower = 0;
1702 * Capture the first descriptor index, this descriptor
1703 * will have the index of the EOP which is the only one
1704 * that now gets a DONE bit writeback.
1706 first = tdata->next_avail_tx_desc;
1707 tx_buffer = &tdata->tx_buf[first];
1708 tx_buffer_mapped = tx_buffer;
1709 map = tx_buffer->map;
1711 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED;
1712 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc"));
1713 if (maxsegs > EMX_MAX_SCATTER)
1714 maxsegs = EMX_MAX_SCATTER;
1716 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp,
1717 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1723 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE);
1726 tdata->tx_nsegs += nsegs;
1727 *segs_used += nsegs;
1729 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1730 /* TSO will consume one TX desc */
1731 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower);
1732 tdata->tx_nsegs += i;
1734 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) {
1735 /* TX csum offloading will consume one TX desc */
1736 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower);
1737 tdata->tx_nsegs += i;
1741 /* Handle VLAN tag */
1742 if (m_head->m_flags & M_VLANTAG) {
1743 /* Set the vlan id. */
1744 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16);
1745 /* Tell hardware to add tag */
1746 txd_lower |= htole32(E1000_TXD_CMD_VLE);
1749 i = tdata->next_avail_tx_desc;
1751 /* Set up our transmit descriptors */
1752 for (j = 0; j < nsegs; j++) {
1753 tx_buffer = &tdata->tx_buf[i];
1754 ctxd = &tdata->tx_desc_base[i];
1756 ctxd->buffer_addr = htole64(segs[j].ds_addr);
1757 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
1758 txd_lower | segs[j].ds_len);
1759 ctxd->upper.data = htole32(txd_upper);
1762 if (++i == tdata->num_tx_desc)
1766 tdata->next_avail_tx_desc = i;
1768 KKASSERT(tdata->num_tx_desc_avail > nsegs);
1769 tdata->num_tx_desc_avail -= nsegs;
1772 tx_buffer->m_head = m_head;
1773 tx_buffer_mapped->map = tx_buffer->map;
1774 tx_buffer->map = map;
1776 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) {
1777 tdata->tx_nsegs = 0;
1780 * Report Status (RS) is turned on
1781 * every tx_intr_nsegs descriptors.
1783 cmd = E1000_TXD_CMD_RS;
1786 * Keep track of the descriptor, which will
1787 * be written back by hardware.
1789 tdata->tx_dd[tdata->tx_dd_tail] = last;
1790 EMX_INC_TXDD_IDX(tdata->tx_dd_tail);
1791 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head);
1795 * Last Descriptor of Packet needs End Of Packet (EOP)
1797 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
1800 * Defer TDT updating, until enough descriptors are setup
1804 #ifdef EMX_TSS_DEBUG
1812 emx_set_promisc(struct emx_softc *sc)
1814 struct ifnet *ifp = &sc->arpcom.ac_if;
1817 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1819 if (ifp->if_flags & IFF_PROMISC) {
1820 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1821 /* Turn this on if you want to see bad packets */
1823 reg_rctl |= E1000_RCTL_SBP;
1824 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1825 } else if (ifp->if_flags & IFF_ALLMULTI) {
1826 reg_rctl |= E1000_RCTL_MPE;
1827 reg_rctl &= ~E1000_RCTL_UPE;
1828 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1833 emx_disable_promisc(struct emx_softc *sc)
1835 struct ifnet *ifp = &sc->arpcom.ac_if;
1839 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1840 reg_rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_SBP);
1842 if (ifp->if_flags & IFF_ALLMULTI) {
1843 mcnt = EMX_MCAST_ADDR_MAX;
1845 const struct ifmultiaddr *ifma;
1847 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1848 if (ifma->ifma_addr->sa_family != AF_LINK)
1850 if (mcnt == EMX_MCAST_ADDR_MAX)
1855 /* Don't disable if in MAX groups */
1856 if (mcnt < EMX_MCAST_ADDR_MAX)
1857 reg_rctl &= ~E1000_RCTL_MPE;
1859 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1863 emx_set_multi(struct emx_softc *sc)
1865 struct ifnet *ifp = &sc->arpcom.ac_if;
1866 struct ifmultiaddr *ifma;
1867 uint32_t reg_rctl = 0;
1872 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX);
1874 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1875 if (ifma->ifma_addr->sa_family != AF_LINK)
1878 if (mcnt == EMX_MCAST_ADDR_MAX)
1881 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1882 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1886 if (mcnt >= EMX_MCAST_ADDR_MAX) {
1887 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1888 reg_rctl |= E1000_RCTL_MPE;
1889 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1891 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1896 * This routine checks for link status and updates statistics.
1899 emx_timer(void *xsc)
1901 struct emx_softc *sc = xsc;
1902 struct ifnet *ifp = &sc->arpcom.ac_if;
1904 lwkt_serialize_enter(&sc->main_serialize);
1906 emx_update_link_status(sc);
1907 emx_update_stats(sc);
1909 /* Reset LAA into RAR[0] on 82571 */
1910 if (e1000_get_laa_state_82571(&sc->hw) == TRUE)
1911 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1913 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
1914 emx_print_hw_stats(sc);
1918 callout_reset(&sc->timer, hz, emx_timer, sc);
1920 lwkt_serialize_exit(&sc->main_serialize);
1924 emx_update_link_status(struct emx_softc *sc)
1926 struct e1000_hw *hw = &sc->hw;
1927 struct ifnet *ifp = &sc->arpcom.ac_if;
1928 device_t dev = sc->dev;
1929 uint32_t link_check = 0;
1931 /* Get the cached link value or read phy for real */
1932 switch (hw->phy.media_type) {
1933 case e1000_media_type_copper:
1934 if (hw->mac.get_link_status) {
1935 if (hw->mac.type >= e1000_pch_spt)
1937 /* Do the work to read phy */
1938 e1000_check_for_link(hw);
1939 link_check = !hw->mac.get_link_status;
1940 if (link_check) /* ESB2 fix */
1941 e1000_cfg_on_link_up(hw);
1947 case e1000_media_type_fiber:
1948 e1000_check_for_link(hw);
1949 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1952 case e1000_media_type_internal_serdes:
1953 e1000_check_for_link(hw);
1954 link_check = sc->hw.mac.serdes_has_link;
1957 case e1000_media_type_unknown:
1962 /* Now check for a transition */
1963 if (link_check && sc->link_active == 0) {
1964 e1000_get_speed_and_duplex(hw, &sc->link_speed,
1968 * Check if we should enable/disable SPEED_MODE bit on
1971 if (sc->link_speed != SPEED_1000 &&
1972 (hw->mac.type == e1000_82571 ||
1973 hw->mac.type == e1000_82572)) {
1976 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
1977 tarc0 &= ~EMX_TARC_SPEED_MODE;
1978 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
1981 char flowctrl[IFM_ETH_FC_STRLEN];
1983 e1000_fc2str(hw->fc.current_mode, flowctrl,
1985 device_printf(dev, "Link is up %d Mbps %s, "
1986 "Flow control: %s\n",
1988 (sc->link_duplex == FULL_DUPLEX) ?
1989 "Full Duplex" : "Half Duplex",
1992 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE)
1993 e1000_force_flowctrl(hw, sc->ifm_flowctrl);
1994 sc->link_active = 1;
1996 ifp->if_baudrate = sc->link_speed * 1000000;
1997 ifp->if_link_state = LINK_STATE_UP;
1998 if_link_state_change(ifp);
1999 } else if (!link_check && sc->link_active == 1) {
2000 ifp->if_baudrate = sc->link_speed = 0;
2001 sc->link_duplex = 0;
2003 device_printf(dev, "Link is Down\n");
2004 sc->link_active = 0;
2005 ifp->if_link_state = LINK_STATE_DOWN;
2006 if_link_state_change(ifp);
2011 emx_stop(struct emx_softc *sc)
2013 struct ifnet *ifp = &sc->arpcom.ac_if;
2016 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2018 emx_disable_intr(sc);
2020 callout_stop(&sc->timer);
2022 ifp->if_flags &= ~IFF_RUNNING;
2023 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2024 struct emx_txdata *tdata = &sc->tx_data[i];
2026 ifsq_clr_oactive(tdata->ifsq);
2027 ifsq_watchdog_stop(&tdata->tx_watchdog);
2028 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED;
2030 tdata->tx_running = 0;
2031 callout_stop(&tdata->tx_gc_timer);
2034 /* I219 needs some special flushing to avoid hangs */
2035 if (sc->hw.mac.type >= e1000_pch_spt)
2036 emx_flush_txrx_ring(sc);
2039 * Disable multiple receive queues.
2042 * We should disable multiple receive queues before
2043 * resetting the hardware.
2045 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0);
2047 e1000_reset_hw(&sc->hw);
2048 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
2050 for (i = 0; i < sc->tx_ring_cnt; ++i)
2051 emx_free_tx_ring(&sc->tx_data[i]);
2052 for (i = 0; i < sc->rx_ring_cnt; ++i)
2053 emx_free_rx_ring(&sc->rx_data[i]);
2057 emx_reset(struct emx_softc *sc)
2059 device_t dev = sc->dev;
2060 uint16_t rx_buffer_size;
2063 /* Set up smart power down as default off on newer adapters. */
2064 if (!emx_smart_pwr_down &&
2065 (sc->hw.mac.type == e1000_82571 ||
2066 sc->hw.mac.type == e1000_82572)) {
2067 uint16_t phy_tmp = 0;
2069 /* Speed up time to link by disabling smart power down. */
2070 e1000_read_phy_reg(&sc->hw,
2071 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
2072 phy_tmp &= ~IGP02E1000_PM_SPD;
2073 e1000_write_phy_reg(&sc->hw,
2074 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
2078 * Packet Buffer Allocation (PBA)
2079 * Writing PBA sets the receive portion of the buffer
2080 * the remainder is used for the transmit buffer.
2082 switch (sc->hw.mac.type) {
2083 /* Total Packet Buffer on these is 48K */
2086 case e1000_80003es2lan:
2087 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
2090 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
2091 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
2095 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
2101 pba = E1000_PBA_26K;
2105 /* Devices before 82547 had a Packet Buffer of 64K. */
2106 if (sc->hw.mac.max_frame_size > 8192)
2107 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
2109 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
2111 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba);
2114 * These parameters control the automatic generation (Tx) and
2115 * response (Rx) to Ethernet PAUSE frames.
2116 * - High water mark should allow for at least two frames to be
2117 * received after sending an XOFF.
2118 * - Low water mark works best when it is very near the high water mark.
2119 * This allows the receiver to restart by sending XON when it has
2120 * drained a bit. Here we use an arbitary value of 1500 which will
2121 * restart after one full frame is pulled from the buffer. There
2122 * could be several smaller frames in the buffer and if so they will
2123 * not trigger the XON until their total number reduces the buffer
2125 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2127 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10;
2129 sc->hw.fc.high_water = rx_buffer_size -
2130 roundup2(sc->hw.mac.max_frame_size, 1024);
2131 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500;
2133 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME;
2134 sc->hw.fc.send_xon = TRUE;
2135 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl);
2138 * Device specific overrides/settings
2140 if (sc->hw.mac.type == e1000_pch_lpt ||
2141 sc->hw.mac.type == e1000_pch_spt ||
2142 sc->hw.mac.type == e1000_pch_cnp) {
2143 sc->hw.fc.high_water = 0x5C20;
2144 sc->hw.fc.low_water = 0x5048;
2145 sc->hw.fc.pause_time = 0x0650;
2146 sc->hw.fc.refresh_time = 0x0400;
2147 /* Jumbos need adjusted PBA */
2148 if (sc->arpcom.ac_if.if_mtu > ETHERMTU)
2149 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12);
2151 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26);
2152 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2153 sc->hw.fc.pause_time = 0xFFFF;
2156 /* I219 needs some special flushing to avoid hangs */
2157 if (sc->hw.mac.type >= e1000_pch_spt)
2158 emx_flush_txrx_ring(sc);
2160 /* Issue a global reset */
2161 e1000_reset_hw(&sc->hw);
2162 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
2163 emx_disable_aspm(sc);
2165 if (e1000_init_hw(&sc->hw) < 0) {
2166 device_printf(dev, "Hardware Initialization Failed\n");
2170 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
2171 e1000_get_phy_info(&sc->hw);
2172 e1000_check_for_link(&sc->hw);
2178 emx_setup_ifp(struct emx_softc *sc)
2180 struct ifnet *ifp = &sc->arpcom.ac_if;
2183 if_initname(ifp, device_get_name(sc->dev),
2184 device_get_unit(sc->dev));
2186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2187 ifp->if_init = emx_init;
2188 ifp->if_ioctl = emx_ioctl;
2189 ifp->if_start = emx_start;
2190 #ifdef IFPOLL_ENABLE
2191 ifp->if_npoll = emx_npoll;
2193 ifp->if_serialize = emx_serialize;
2194 ifp->if_deserialize = emx_deserialize;
2195 ifp->if_tryserialize = emx_tryserialize;
2197 ifp->if_serialize_assert = emx_serialize_assert;
2200 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc;
2202 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1);
2203 ifq_set_ready(&ifp->if_snd);
2204 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
2206 ifp->if_mapsubq = ifq_mapsubq_modulo;
2207 ifq_set_subq_divisor(&ifp->if_snd, 1);
2209 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
2211 ifp->if_capabilities = IFCAP_HWCSUM |
2212 IFCAP_VLAN_HWTAGGING |
2215 if (sc->rx_ring_cnt > 1)
2216 ifp->if_capabilities |= IFCAP_RSS;
2217 ifp->if_capenable = ifp->if_capabilities;
2218 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO;
2221 * Tell the upper layer(s) we support long frames.
2223 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
2225 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2226 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
2227 struct emx_txdata *tdata = &sc->tx_data[i];
2229 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res));
2230 ifsq_set_priv(ifsq, tdata);
2231 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize);
2234 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog, 0);
2238 * Specify the media types supported by this sc and register
2239 * callbacks to update media and link information
2241 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2242 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
2243 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
2246 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
2247 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
2249 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
2250 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
2252 if (sc->hw.phy.type != e1000_phy_ife) {
2253 ifmedia_add(&sc->media,
2254 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2257 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2258 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl);
2262 * Workaround for SmartSpeed on 82541 and 82547 controllers
2265 emx_smartspeed(struct emx_softc *sc)
2269 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp ||
2270 sc->hw.mac.autoneg == 0 ||
2271 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
2274 if (sc->smartspeed == 0) {
2276 * If Master/Slave config fault is asserted twice,
2277 * we assume back-to-back
2279 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2280 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2282 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp);
2283 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
2284 e1000_read_phy_reg(&sc->hw,
2285 PHY_1000T_CTRL, &phy_tmp);
2286 if (phy_tmp & CR_1000T_MS_ENABLE) {
2287 phy_tmp &= ~CR_1000T_MS_ENABLE;
2288 e1000_write_phy_reg(&sc->hw,
2289 PHY_1000T_CTRL, phy_tmp);
2291 if (sc->hw.mac.autoneg &&
2292 !e1000_phy_setup_autoneg(&sc->hw) &&
2293 !e1000_read_phy_reg(&sc->hw,
2294 PHY_CONTROL, &phy_tmp)) {
2295 phy_tmp |= MII_CR_AUTO_NEG_EN |
2296 MII_CR_RESTART_AUTO_NEG;
2297 e1000_write_phy_reg(&sc->hw,
2298 PHY_CONTROL, phy_tmp);
2303 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) {
2304 /* If still no link, perhaps using 2/3 pair cable */
2305 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp);
2306 phy_tmp |= CR_1000T_MS_ENABLE;
2307 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp);
2308 if (sc->hw.mac.autoneg &&
2309 !e1000_phy_setup_autoneg(&sc->hw) &&
2310 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) {
2311 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2312 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp);
2316 /* Restart process after EMX_SMARTSPEED_MAX iterations */
2317 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX)
2322 emx_create_tx_ring(struct emx_txdata *tdata)
2324 device_t dev = tdata->sc->dev;
2325 struct emx_txbuf *tx_buffer;
2326 int error, i, tsize, ntxd;
2329 * Validate number of transmit descriptors. It must not exceed
2330 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2332 ntxd = device_getenv_int(dev, "txd", emx_txd);
2333 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 ||
2334 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) {
2335 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
2336 EMX_DEFAULT_TXD, ntxd);
2337 tdata->num_tx_desc = EMX_DEFAULT_TXD;
2339 tdata->num_tx_desc = ntxd;
2343 * Allocate Transmit Descriptor ring
2345 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc),
2347 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag,
2348 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
2349 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap,
2350 &tdata->tx_desc_paddr);
2351 if (tdata->tx_desc_base == NULL) {
2352 device_printf(dev, "Unable to allocate tx_desc memory\n");
2356 tsize = __VM_CACHELINE_ALIGN(
2357 sizeof(struct emx_txbuf) * tdata->num_tx_desc);
2358 tdata->tx_buf = kmalloc(tsize, M_DEVBUF,
2359 M_WAITOK | M_ZERO | M_CACHEALIGN);
2362 * Create DMA tags for tx buffers
2364 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */
2365 1, 0, /* alignment, bounds */
2366 BUS_SPACE_MAXADDR, /* lowaddr */
2367 BUS_SPACE_MAXADDR, /* highaddr */
2368 NULL, NULL, /* filter, filterarg */
2369 EMX_TSO_SIZE, /* maxsize */
2370 EMX_MAX_SCATTER, /* nsegments */
2371 EMX_MAX_SEGSIZE, /* maxsegsize */
2372 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2373 BUS_DMA_ONEBPAGE, /* flags */
2376 device_printf(dev, "Unable to allocate TX DMA tag\n");
2377 kfree(tdata->tx_buf, M_DEVBUF);
2378 tdata->tx_buf = NULL;
2383 * Create DMA maps for tx buffers
2385 for (i = 0; i < tdata->num_tx_desc; i++) {
2386 tx_buffer = &tdata->tx_buf[i];
2388 error = bus_dmamap_create(tdata->txtag,
2389 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2392 device_printf(dev, "Unable to create TX DMA map\n");
2393 emx_destroy_tx_ring(tdata, i);
2399 * Setup TX parameters
2401 tdata->spare_tx_desc = EMX_TX_SPARE;
2402 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG;
2405 * Keep following relationship between spare_tx_desc, oact_tx_desc
2406 * and tx_intr_nsegs:
2407 * (spare_tx_desc + EMX_TX_RESERVED) <=
2408 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs
2410 tdata->oact_tx_desc = tdata->num_tx_desc / 8;
2411 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX)
2412 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX;
2413 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED)
2414 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED;
2416 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16;
2417 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc)
2418 tdata->tx_intr_nsegs = tdata->oact_tx_desc;
2421 * Pullup extra 4bytes into the first data segment for TSO, see:
2422 * 82571/82572 specification update errata #7
2424 * Same applies to I217 (and maybe I218 and I219).
2427 * 4bytes instead of 2bytes, which are mentioned in the errata,
2428 * are pulled; mainly to keep rest of the data properly aligned.
2430 if (tdata->sc->hw.mac.type == e1000_82571 ||
2431 tdata->sc->hw.mac.type == e1000_82572 ||
2432 tdata->sc->hw.mac.type == e1000_pch_lpt ||
2433 tdata->sc->hw.mac.type == e1000_pch_spt ||
2434 tdata->sc->hw.mac.type == e1000_pch_cnp)
2435 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX;
2441 emx_init_tx_ring(struct emx_txdata *tdata)
2443 /* Clear the old ring contents */
2444 bzero(tdata->tx_desc_base,
2445 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc);
2448 tdata->next_avail_tx_desc = 0;
2449 tdata->next_tx_to_clean = 0;
2450 tdata->num_tx_desc_avail = tdata->num_tx_desc;
2451 tdata->tx_nmbuf = 0;
2452 tdata->tx_running = 0;
2454 tdata->tx_flags |= EMX_TXFLAG_ENABLED;
2455 if (tdata->sc->tx_ring_inuse > 1) {
2456 tdata->tx_flags |= EMX_TXFLAG_FORCECTX;
2458 if_printf(&tdata->sc->arpcom.ac_if,
2459 "TX %d force ctx setup\n", tdata->idx);
2465 emx_init_tx_unit(struct emx_softc *sc)
2467 uint32_t tctl, tarc, tipg = 0, txdctl;
2470 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2471 struct emx_txdata *tdata = &sc->tx_data[i];
2474 /* Setup the Base and Length of the Tx Descriptor Ring */
2475 bus_addr = tdata->tx_desc_paddr;
2476 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i),
2477 tdata->num_tx_desc * sizeof(struct e1000_tx_desc));
2478 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i),
2479 (uint32_t)(bus_addr >> 32));
2480 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i),
2481 (uint32_t)bus_addr);
2482 /* Setup the HW Tx Head and Tail descriptor pointers */
2483 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0);
2484 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0);
2486 txdctl = 0x1f; /* PTHRESH */
2487 txdctl |= 1 << 8; /* HTHRESH */
2488 txdctl |= 1 << 16; /* WTHRESH */
2489 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */
2490 txdctl |= E1000_TXDCTL_GRAN;
2491 txdctl |= 1 << 25; /* LWTHRESH */
2493 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(i), txdctl);
2496 /* Set the default values for the Tx Inter Packet Gap timer */
2497 switch (sc->hw.mac.type) {
2498 case e1000_80003es2lan:
2499 tipg = DEFAULT_82543_TIPG_IPGR1;
2500 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2501 E1000_TIPG_IPGR2_SHIFT;
2505 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
2506 sc->hw.phy.media_type == e1000_media_type_internal_serdes)
2507 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
2509 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2510 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2511 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2515 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg);
2517 /* NOTE: 0 is not allowed for TIDV */
2518 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1);
2519 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0);
2522 * Errata workaround (obtained from Linux). This is necessary
2523 * to make multiple TX queues work on 82574.
2524 * XXX can't find it in any published errata though.
2526 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0));
2527 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl);
2529 if (sc->hw.mac.type == e1000_82571 ||
2530 sc->hw.mac.type == e1000_82572) {
2531 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2532 tarc |= EMX_TARC_SPEED_MODE;
2533 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2534 } else if (sc->hw.mac.type == e1000_80003es2lan) {
2535 /* errata: program both queues to unweighted RR */
2536 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2538 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2539 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2541 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2542 } else if (sc->hw.mac.type == e1000_82574) {
2543 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2544 tarc |= EMX_TARC_ERRATA;
2545 if (sc->tx_ring_inuse > 1) {
2546 tarc |= (EMX_TARC_COMPENSATION_MODE | EMX_TARC_MQ_FIX);
2547 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2548 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2550 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2554 /* Program the Transmit Control Register */
2555 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL);
2556 tctl &= ~E1000_TCTL_CT;
2557 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2558 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2559 tctl |= E1000_TCTL_MULR;
2561 /* This write will effectively turn on the transmit unit. */
2562 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl);
2564 if (sc->hw.mac.type == e1000_82571 ||
2565 sc->hw.mac.type == e1000_82572 ||
2566 sc->hw.mac.type == e1000_80003es2lan) {
2567 /* Bit 28 of TARC1 must be cleared when MULR is enabled */
2568 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2570 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2571 } else if (sc->hw.mac.type >= e1000_pch_spt) {
2574 reg = E1000_READ_REG(&sc->hw, E1000_IOSFPC);
2575 reg |= E1000_RCTL_RDMTS_HEX;
2576 E1000_WRITE_REG(&sc->hw, E1000_IOSFPC, reg);
2577 reg = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2578 reg |= E1000_TARC0_CB_MULTIQ_3_REQ;
2579 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), reg);
2582 if (sc->tx_ring_inuse > 1) {
2583 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0));
2584 tarc &= ~EMX_TARC_COUNT_MASK;
2586 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc);
2588 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1));
2589 tarc &= ~EMX_TARC_COUNT_MASK;
2591 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc);
2596 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc)
2598 struct emx_txbuf *tx_buffer;
2601 /* Free Transmit Descriptor ring */
2602 if (tdata->tx_desc_base) {
2603 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap);
2604 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base,
2605 tdata->tx_desc_dmap);
2606 bus_dma_tag_destroy(tdata->tx_desc_dtag);
2608 tdata->tx_desc_base = NULL;
2611 if (tdata->tx_buf == NULL)
2614 for (i = 0; i < ndesc; i++) {
2615 tx_buffer = &tdata->tx_buf[i];
2617 KKASSERT(tx_buffer->m_head == NULL);
2618 bus_dmamap_destroy(tdata->txtag, tx_buffer->map);
2620 bus_dma_tag_destroy(tdata->txtag);
2622 kfree(tdata->tx_buf, M_DEVBUF);
2623 tdata->tx_buf = NULL;
2627 * The offload context needs to be set when we transfer the first
2628 * packet of a particular protocol (TCP/UDP). This routine has been
2629 * enhanced to deal with inserted VLAN headers.
2631 * If the new packet's ether header length, ip header length and
2632 * csum offloading type are same as the previous packet, we should
2633 * avoid allocating a new csum context descriptor; mainly to take
2634 * advantage of the pipeline effect of the TX data read request.
2636 * This function returns number of TX descrptors allocated for
2640 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp,
2641 uint32_t *txd_upper, uint32_t *txd_lower)
2643 struct e1000_context_desc *TXD;
2644 int curr_txd, ehdrlen, csum_flags;
2645 uint32_t cmd, hdr_len, ip_hlen;
2647 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES;
2648 ip_hlen = mp->m_pkthdr.csum_iphlen;
2649 ehdrlen = mp->m_pkthdr.csum_lhlen;
2651 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
2652 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen &&
2653 tdata->csum_flags == csum_flags) {
2655 * Same csum offload context as the previous packets;
2658 *txd_upper = tdata->csum_txd_upper;
2659 *txd_lower = tdata->csum_txd_lower;
2664 * Setup a new csum offload context.
2667 curr_txd = tdata->next_avail_tx_desc;
2668 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
2672 /* Setup of IP header checksum. */
2673 if (csum_flags & CSUM_IP) {
2675 * Start offset for header checksum calculation.
2676 * End offset for header checksum calculation.
2677 * Offset of place to put the checksum.
2679 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2680 TXD->lower_setup.ip_fields.ipcse =
2681 htole16(ehdrlen + ip_hlen - 1);
2682 TXD->lower_setup.ip_fields.ipcso =
2683 ehdrlen + offsetof(struct ip, ip_sum);
2684 cmd |= E1000_TXD_CMD_IP;
2685 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2687 hdr_len = ehdrlen + ip_hlen;
2689 if (csum_flags & CSUM_TCP) {
2691 * Start offset for payload checksum calculation.
2692 * End offset for payload checksum calculation.
2693 * Offset of place to put the checksum.
2695 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2696 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2697 TXD->upper_setup.tcp_fields.tucso =
2698 hdr_len + offsetof(struct tcphdr, th_sum);
2699 cmd |= E1000_TXD_CMD_TCP;
2700 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2701 } else if (csum_flags & CSUM_UDP) {
2703 * Start offset for header checksum calculation.
2704 * End offset for header checksum calculation.
2705 * Offset of place to put the checksum.
2707 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2708 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2709 TXD->upper_setup.tcp_fields.tucso =
2710 hdr_len + offsetof(struct udphdr, uh_sum);
2711 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2714 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2715 E1000_TXD_DTYP_D; /* Data descr */
2717 /* Save the information for this csum offloading context */
2718 tdata->csum_lhlen = ehdrlen;
2719 tdata->csum_iphlen = ip_hlen;
2720 tdata->csum_flags = csum_flags;
2721 tdata->csum_txd_upper = *txd_upper;
2722 tdata->csum_txd_lower = *txd_lower;
2724 TXD->tcp_seg_setup.data = htole32(0);
2725 TXD->cmd_and_length =
2726 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
2728 if (++curr_txd == tdata->num_tx_desc)
2731 KKASSERT(tdata->num_tx_desc_avail > 0);
2732 tdata->num_tx_desc_avail--;
2734 tdata->next_avail_tx_desc = curr_txd;
2739 emx_txeof(struct emx_txdata *tdata)
2741 struct emx_txbuf *tx_buffer;
2742 int first, num_avail;
2744 if (tdata->tx_dd_head == tdata->tx_dd_tail)
2747 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2750 num_avail = tdata->num_tx_desc_avail;
2751 first = tdata->next_tx_to_clean;
2753 while (tdata->tx_dd_head != tdata->tx_dd_tail) {
2754 int dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2755 struct e1000_tx_desc *tx_desc;
2757 tx_desc = &tdata->tx_desc_base[dd_idx];
2758 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2759 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2761 if (++dd_idx == tdata->num_tx_desc)
2764 while (first != dd_idx) {
2767 KKASSERT(num_avail < tdata->num_tx_desc);
2770 tx_buffer = &tdata->tx_buf[first];
2771 if (tx_buffer->m_head)
2772 emx_free_txbuf(tdata, tx_buffer);
2774 if (++first == tdata->num_tx_desc)
2781 tdata->next_tx_to_clean = first;
2782 tdata->num_tx_desc_avail = num_avail;
2784 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2785 tdata->tx_dd_head = 0;
2786 tdata->tx_dd_tail = 0;
2789 if (!EMX_IS_OACTIVE(tdata)) {
2790 ifsq_clr_oactive(tdata->ifsq);
2792 /* All clean, turn off the timer */
2793 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2794 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
2796 tdata->tx_running = EMX_TX_RUNNING;
2800 emx_tx_collect(struct emx_txdata *tdata, boolean_t gc)
2802 struct emx_txbuf *tx_buffer;
2803 int tdh, first, num_avail, dd_idx = -1;
2805 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2808 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx));
2809 if (tdh == tdata->next_tx_to_clean) {
2810 if (gc && tdata->tx_nmbuf > 0)
2811 tdata->tx_running = EMX_TX_RUNNING;
2817 if (tdata->tx_dd_head != tdata->tx_dd_tail)
2818 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2820 num_avail = tdata->num_tx_desc_avail;
2821 first = tdata->next_tx_to_clean;
2823 while (first != tdh) {
2826 KKASSERT(num_avail < tdata->num_tx_desc);
2829 tx_buffer = &tdata->tx_buf[first];
2830 if (tx_buffer->m_head)
2831 emx_free_txbuf(tdata, tx_buffer);
2833 if (first == dd_idx) {
2834 EMX_INC_TXDD_IDX(tdata->tx_dd_head);
2835 if (tdata->tx_dd_head == tdata->tx_dd_tail) {
2836 tdata->tx_dd_head = 0;
2837 tdata->tx_dd_tail = 0;
2840 dd_idx = tdata->tx_dd[tdata->tx_dd_head];
2844 if (++first == tdata->num_tx_desc)
2847 tdata->next_tx_to_clean = first;
2848 tdata->num_tx_desc_avail = num_avail;
2850 if (!EMX_IS_OACTIVE(tdata)) {
2851 ifsq_clr_oactive(tdata->ifsq);
2853 /* All clean, turn off the timer */
2854 if (tdata->num_tx_desc_avail == tdata->num_tx_desc)
2855 ifsq_watchdog_set_count(&tdata->tx_watchdog, 0);
2857 if (!gc || tdata->tx_nmbuf > 0)
2858 tdata->tx_running = EMX_TX_RUNNING;
2862 * When Link is lost sometimes there is work still in the TX ring
2863 * which will result in a watchdog, rather than allow that do an
2864 * attempted cleanup and then reinit here. Note that this has been
2865 * seens mostly with fiber adapters.
2868 emx_tx_purge(struct emx_softc *sc)
2872 if (sc->link_active)
2875 for (i = 0; i < sc->tx_ring_inuse; ++i) {
2876 struct emx_txdata *tdata = &sc->tx_data[i];
2878 if (tdata->tx_watchdog.wd_timer) {
2879 emx_tx_collect(tdata, FALSE);
2880 if (tdata->tx_watchdog.wd_timer) {
2881 if_printf(&sc->arpcom.ac_if,
2882 "Link lost, TX pending, reinit\n");
2891 emx_newbuf(struct emx_rxdata *rdata, int i, int init)
2894 bus_dma_segment_t seg;
2896 struct emx_rxbuf *rx_buffer;
2899 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
2902 if_printf(&rdata->sc->arpcom.ac_if,
2903 "Unable to allocate RX mbuf\n");
2907 m->m_len = m->m_pkthdr.len = MCLBYTES;
2909 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN)
2910 m_adj(m, ETHER_ALIGN);
2912 error = bus_dmamap_load_mbuf_segment(rdata->rxtag,
2913 rdata->rx_sparemap, m,
2914 &seg, 1, &nseg, BUS_DMA_NOWAIT);
2918 if_printf(&rdata->sc->arpcom.ac_if,
2919 "Unable to load RX mbuf\n");
2924 rx_buffer = &rdata->rx_buf[i];
2925 if (rx_buffer->m_head != NULL)
2926 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
2928 map = rx_buffer->map;
2929 rx_buffer->map = rdata->rx_sparemap;
2930 rdata->rx_sparemap = map;
2932 rx_buffer->m_head = m;
2933 rx_buffer->paddr = seg.ds_addr;
2935 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer);
2940 emx_create_rx_ring(struct emx_rxdata *rdata)
2942 device_t dev = rdata->sc->dev;
2943 struct emx_rxbuf *rx_buffer;
2944 int i, error, rsize, nrxd;
2947 * Validate number of receive descriptors. It must not exceed
2948 * hardware maximum, and must be multiple of E1000_DBA_ALIGN.
2950 nrxd = device_getenv_int(dev, "rxd", emx_rxd);
2951 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 ||
2952 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) {
2953 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
2954 EMX_DEFAULT_RXD, nrxd);
2955 rdata->num_rx_desc = EMX_DEFAULT_RXD;
2957 rdata->num_rx_desc = nrxd;
2961 * Allocate Receive Descriptor ring
2963 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t),
2965 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag,
2966 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2967 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap,
2968 &rdata->rx_desc_paddr);
2969 if (rdata->rx_desc == NULL) {
2970 device_printf(dev, "Unable to allocate rx_desc memory\n");
2974 rsize = __VM_CACHELINE_ALIGN(
2975 sizeof(struct emx_rxbuf) * rdata->num_rx_desc);
2976 rdata->rx_buf = kmalloc(rsize, M_DEVBUF,
2977 M_WAITOK | M_ZERO | M_CACHEALIGN);
2980 * Create DMA tag for rx buffers
2982 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */
2983 1, 0, /* alignment, bounds */
2984 BUS_SPACE_MAXADDR, /* lowaddr */
2985 BUS_SPACE_MAXADDR, /* highaddr */
2986 NULL, NULL, /* filter, filterarg */
2987 MCLBYTES, /* maxsize */
2989 MCLBYTES, /* maxsegsize */
2990 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2993 device_printf(dev, "Unable to allocate RX DMA tag\n");
2994 kfree(rdata->rx_buf, M_DEVBUF);
2995 rdata->rx_buf = NULL;
3000 * Create spare DMA map for rx buffers
3002 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
3003 &rdata->rx_sparemap);
3005 device_printf(dev, "Unable to create spare RX DMA map\n");
3006 bus_dma_tag_destroy(rdata->rxtag);
3007 kfree(rdata->rx_buf, M_DEVBUF);
3008 rdata->rx_buf = NULL;
3013 * Create DMA maps for rx buffers
3015 for (i = 0; i < rdata->num_rx_desc; i++) {
3016 rx_buffer = &rdata->rx_buf[i];
3018 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK,
3021 device_printf(dev, "Unable to create RX DMA map\n");
3022 emx_destroy_rx_ring(rdata, i);
3030 emx_free_rx_ring(struct emx_rxdata *rdata)
3034 for (i = 0; i < rdata->num_rx_desc; i++) {
3035 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i];
3037 if (rx_buffer->m_head != NULL) {
3038 bus_dmamap_unload(rdata->rxtag, rx_buffer->map);
3039 m_freem(rx_buffer->m_head);
3040 rx_buffer->m_head = NULL;
3044 if (rdata->fmp != NULL)
3045 m_freem(rdata->fmp);
3051 emx_free_tx_ring(struct emx_txdata *tdata)
3055 for (i = 0; i < tdata->num_tx_desc; i++) {
3056 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i];
3058 if (tx_buffer->m_head != NULL)
3059 emx_free_txbuf(tdata, tx_buffer);
3062 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX;
3064 tdata->csum_flags = 0;
3065 tdata->csum_lhlen = 0;
3066 tdata->csum_iphlen = 0;
3067 tdata->csum_thlen = 0;
3068 tdata->csum_mss = 0;
3069 tdata->csum_pktlen = 0;
3071 tdata->tx_dd_head = 0;
3072 tdata->tx_dd_tail = 0;
3073 tdata->tx_nsegs = 0;
3077 emx_init_rx_ring(struct emx_rxdata *rdata)
3081 /* Reset descriptor ring */
3082 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc);
3084 /* Allocate new ones. */
3085 for (i = 0; i < rdata->num_rx_desc; i++) {
3086 error = emx_newbuf(rdata, i, 1);
3091 /* Setup our descriptor pointers */
3092 rdata->next_rx_desc_to_check = 0;
3098 emx_init_rx_unit(struct emx_softc *sc)
3100 struct ifnet *ifp = &sc->arpcom.ac_if;
3102 uint32_t rctl, itr, rfctl, rxcsum;
3106 * Make sure receives are disabled while setting
3107 * up the descriptor ring
3109 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
3110 /* Do not disable if ever enabled on this hardware */
3111 if (sc->hw.mac.type != e1000_82574)
3112 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
3115 * Set the interrupt throttling rate. Value is calculated
3116 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3118 if (sc->int_throttle_ceil)
3119 itr = 1000000000 / 256 / sc->int_throttle_ceil;
3122 emx_set_itr(sc, itr);
3124 /* Use extended RX descriptor */
3125 rfctl = E1000_READ_REG(&sc->hw, E1000_RFCTL);
3126 rfctl |= E1000_RFCTL_EXTEN;
3127 /* Disable accelerated ackknowledge */
3128 if (sc->hw.mac.type == e1000_82574)
3129 rfctl |= E1000_RFCTL_ACK_DIS;
3130 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl);
3133 * Receive Checksum Offload for TCP and UDP
3135 * Checksum offloading is also enabled if multiple receive
3136 * queue is to be supported, since we need it to figure out
3139 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
3140 if ((ifp->if_capenable & IFCAP_RXCSUM) ||
3141 sc->rx_ring_cnt > 1) {
3144 * PCSD must be enabled to enable multiple
3147 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3150 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
3153 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
3156 * Configure multiple receive queue (RSS)
3158 if (sc->rx_ring_cnt > 1) {
3159 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE];
3162 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING,
3163 ("invalid number of RX ring (%d)", sc->rx_ring_cnt));
3167 * When we reach here, RSS has already been disabled
3168 * in emx_stop(), so we could safely configure RSS key
3169 * and redirect table.
3175 toeplitz_get_key(key, sizeof(key));
3176 for (i = 0; i < EMX_NRSSRK; ++i) {
3179 rssrk = EMX_RSSRK_VAL(key, i);
3180 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
3182 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk);
3186 * Configure RSS redirect table.
3188 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table,
3192 for (j = 0; j < EMX_NRETA; ++j) {
3195 for (i = 0; i < EMX_RETA_SIZE; ++i) {
3198 q = sc->rdr_table[r] << EMX_RETA_RINGIDX_SHIFT;
3199 reta |= q << (8 * i);
3202 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
3203 E1000_WRITE_REG(&sc->hw, E1000_RETA(j), reta);
3207 * Enable multiple receive queues.
3208 * Enable IPv4 RSS standard hash functions.
3209 * Disable RSS interrupt.
3211 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
3212 E1000_MRQC_ENABLE_RSS_2Q |
3213 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3214 E1000_MRQC_RSS_FIELD_IPV4);
3218 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3219 * long latencies are observed, like Lenovo X60. This
3220 * change eliminates the problem, but since having positive
3221 * values in RDTR is a known source of problems on other
3222 * platforms another solution is being sought.
3224 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) {
3225 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573);
3226 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573);
3229 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3230 struct emx_rxdata *rdata = &sc->rx_data[i];
3233 * Setup the Base and Length of the Rx Descriptor Ring
3235 bus_addr = rdata->rx_desc_paddr;
3236 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i),
3237 rdata->num_rx_desc * sizeof(emx_rxdesc_t));
3238 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i),
3239 (uint32_t)(bus_addr >> 32));
3240 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i),
3241 (uint32_t)bus_addr);
3244 * Setup the HW Rx Head and Tail Descriptor Pointers
3246 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0);
3247 E1000_WRITE_REG(&sc->hw, E1000_RDT(i),
3248 sc->rx_data[i].num_rx_desc - 1);
3251 /* Set PTHRESH for improved jumbo performance */
3252 if (ifp->if_mtu > ETHERMTU && sc->hw.mac.type == e1000_82574) {
3255 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3256 rxdctl = E1000_READ_REG(&sc->hw, E1000_RXDCTL(i));
3257 rxdctl |= 0x20; /* PTHRESH */
3258 rxdctl |= 4 << 8; /* HTHRESH */
3259 rxdctl |= 4 << 16; /* WTHRESH */
3260 rxdctl |= 1 << 24; /* Switch to granularity */
3261 E1000_WRITE_REG(&sc->hw, E1000_RXDCTL(i), rxdctl);
3265 if (sc->hw.mac.type >= e1000_pch2lan) {
3266 if (ifp->if_mtu > ETHERMTU)
3267 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE);
3269 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE);
3272 /* Setup the Receive Control Register */
3273 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3274 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3275 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC |
3276 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3278 /* Make sure VLAN Filters are off */
3279 rctl &= ~E1000_RCTL_VFE;
3281 /* Don't store bad paket */
3282 rctl &= ~E1000_RCTL_SBP;
3285 rctl |= E1000_RCTL_SZ_2048;
3287 if (ifp->if_mtu > ETHERMTU)
3288 rctl |= E1000_RCTL_LPE;
3290 rctl &= ~E1000_RCTL_LPE;
3292 /* Enable Receives */
3293 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl);
3297 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc)
3299 struct emx_rxbuf *rx_buffer;
3302 /* Free Receive Descriptor ring */
3303 if (rdata->rx_desc) {
3304 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap);
3305 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc,
3306 rdata->rx_desc_dmap);
3307 bus_dma_tag_destroy(rdata->rx_desc_dtag);
3309 rdata->rx_desc = NULL;
3312 if (rdata->rx_buf == NULL)
3315 for (i = 0; i < ndesc; i++) {
3316 rx_buffer = &rdata->rx_buf[i];
3318 KKASSERT(rx_buffer->m_head == NULL);
3319 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map);
3321 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap);
3322 bus_dma_tag_destroy(rdata->rxtag);
3324 kfree(rdata->rx_buf, M_DEVBUF);
3325 rdata->rx_buf = NULL;
3329 emx_rxeof(struct emx_rxdata *rdata, int count)
3331 struct ifnet *ifp = &rdata->sc->arpcom.ac_if;
3333 emx_rxdesc_t *current_desc;
3335 int i, cpuid = mycpuid;
3337 i = rdata->next_rx_desc_to_check;
3338 current_desc = &rdata->rx_desc[i];
3339 staterr = le32toh(current_desc->rxd_staterr);
3341 if (!(staterr & E1000_RXD_STAT_DD))
3344 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
3345 struct pktinfo *pi = NULL, pi0;
3346 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i];
3347 struct mbuf *m = NULL;
3352 mp = rx_buf->m_head;
3355 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3356 * needs to access the last received byte in the mbuf.
3358 bus_dmamap_sync(rdata->rxtag, rx_buf->map,
3359 BUS_DMASYNC_POSTREAD);
3361 len = le16toh(current_desc->rxd_length);
3362 if (staterr & E1000_RXD_STAT_EOP) {
3369 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3371 uint32_t mrq, rss_hash;
3374 * Save several necessary information,
3375 * before emx_newbuf() destroy it.
3377 if ((staterr & E1000_RXD_STAT_VP) && eop)
3378 vlan = le16toh(current_desc->rxd_vlan);
3380 mrq = le32toh(current_desc->rxd_mrq);
3381 rss_hash = le32toh(current_desc->rxd_rss);
3383 EMX_RSS_DPRINTF(rdata->sc, 10,
3384 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n",
3385 rdata->idx, mrq, rss_hash);
3387 if (emx_newbuf(rdata, i, 0) != 0) {
3388 IFNET_STAT_INC(ifp, iqdrops, 1);
3392 /* Assign correct length to the current fragment */
3395 if (rdata->fmp == NULL) {
3396 mp->m_pkthdr.len = len;
3397 rdata->fmp = mp; /* Store the first mbuf */
3401 * Chain mbuf's together
3403 rdata->lmp->m_next = mp;
3404 rdata->lmp = rdata->lmp->m_next;
3405 rdata->fmp->m_pkthdr.len += len;
3409 rdata->fmp->m_pkthdr.rcvif = ifp;
3410 IFNET_STAT_INC(ifp, ipackets, 1);
3412 if (ifp->if_capenable & IFCAP_RXCSUM)
3413 emx_rxcsum(staterr, rdata->fmp);
3415 if (staterr & E1000_RXD_STAT_VP) {
3416 rdata->fmp->m_pkthdr.ether_vlantag =
3418 rdata->fmp->m_flags |= M_VLANTAG;
3424 if (ifp->if_capenable & IFCAP_RSS) {
3425 pi = emx_rssinfo(m, &pi0, mrq,
3428 #ifdef EMX_RSS_DEBUG
3433 IFNET_STAT_INC(ifp, ierrors, 1);
3435 emx_setup_rxdesc(current_desc, rx_buf);
3436 if (rdata->fmp != NULL) {
3437 m_freem(rdata->fmp);
3445 ifp->if_input(ifp, m, pi, cpuid);
3447 /* Advance our pointers to the next descriptor. */
3448 if (++i == rdata->num_rx_desc)
3451 current_desc = &rdata->rx_desc[i];
3452 staterr = le32toh(current_desc->rxd_staterr);
3454 rdata->next_rx_desc_to_check = i;
3456 /* Advance the E1000's Receive Queue "Tail Pointer". */
3458 i = rdata->num_rx_desc - 1;
3459 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i);
3463 emx_enable_intr(struct emx_softc *sc)
3465 uint32_t ims_mask = IMS_ENABLE_MASK;
3467 lwkt_serialize_handler_enable(&sc->main_serialize);
3470 if (sc->hw.mac.type == e1000_82574) {
3471 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK);
3472 ims_mask |= EM_MSIX_MASK;
3475 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask);
3479 emx_disable_intr(struct emx_softc *sc)
3481 if (sc->hw.mac.type == e1000_82574)
3482 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0);
3483 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
3485 lwkt_serialize_handler_disable(&sc->main_serialize);
3489 * Bit of a misnomer, what this really means is
3490 * to enable OS management of the system... aka
3491 * to disable special hardware management features
3494 emx_get_mgmt(struct emx_softc *sc)
3496 /* A shared code workaround */
3497 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3498 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
3499 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3501 /* disable hardware interception of ARP */
3502 manc &= ~(E1000_MANC_ARP_EN);
3504 /* enable receiving management packets to the host */
3505 manc |= E1000_MANC_EN_MNG2HOST;
3506 #define E1000_MNG2HOST_PORT_623 (1 << 5)
3507 #define E1000_MNG2HOST_PORT_664 (1 << 6)
3508 manc2h |= E1000_MNG2HOST_PORT_623;
3509 manc2h |= E1000_MNG2HOST_PORT_664;
3510 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
3512 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3517 * Give control back to hardware management
3518 * controller if there is one.
3521 emx_rel_mgmt(struct emx_softc *sc)
3523 if (sc->flags & EMX_FLAG_HAS_MGMT) {
3524 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
3526 /* re-enable hardware interception of ARP */
3527 manc |= E1000_MANC_ARP_EN;
3528 manc &= ~E1000_MANC_EN_MNG2HOST;
3530 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
3535 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3536 * For ASF and Pass Through versions of f/w this means that
3537 * the driver is loaded. For AMT version (only with 82573)
3538 * of the f/w this means that the network i/f is open.
3541 emx_get_hw_control(struct emx_softc *sc)
3543 /* Let firmware know the driver has taken over */
3544 if (sc->hw.mac.type == e1000_82573) {
3547 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3548 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3549 swsm | E1000_SWSM_DRV_LOAD);
3553 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3554 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3555 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
3557 sc->flags |= EMX_FLAG_HW_CTRL;
3561 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3562 * For ASF and Pass Through versions of f/w this means that the
3563 * driver is no longer loaded. For AMT version (only with 82573)
3564 * of the f/w this means that the network i/f is closed.
3567 emx_rel_hw_control(struct emx_softc *sc)
3569 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0)
3571 sc->flags &= ~EMX_FLAG_HW_CTRL;
3573 /* Let firmware taken over control of h/w */
3574 if (sc->hw.mac.type == e1000_82573) {
3577 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM);
3578 E1000_WRITE_REG(&sc->hw, E1000_SWSM,
3579 swsm & ~E1000_SWSM_DRV_LOAD);
3583 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
3584 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
3585 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
3590 emx_is_valid_eaddr(const uint8_t *addr)
3592 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
3594 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3601 * Enable PCI Wake On Lan capability
3604 emx_enable_wol(device_t dev)
3606 uint16_t cap, status;
3609 /* First find the capabilities pointer*/
3610 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
3612 /* Read the PM Capabilities */
3613 id = pci_read_config(dev, cap, 1);
3614 if (id != PCIY_PMG) /* Something wrong */
3618 * OK, we have the power capabilities,
3619 * so now get the status register
3621 cap += PCIR_POWER_STATUS;
3622 status = pci_read_config(dev, cap, 2);
3623 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
3624 pci_write_config(dev, cap, status, 2);
3628 emx_update_stats(struct emx_softc *sc)
3630 struct ifnet *ifp = &sc->arpcom.ac_if;
3632 if (sc->hw.phy.media_type == e1000_media_type_copper ||
3633 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
3634 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
3635 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
3637 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
3638 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
3639 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
3640 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
3642 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
3643 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
3644 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
3645 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
3646 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
3647 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
3648 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
3649 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
3650 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
3651 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
3652 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
3653 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
3654 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
3655 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
3656 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
3657 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
3658 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
3659 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
3660 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
3661 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
3663 /* For the 64-bit byte counters the low dword must be read first. */
3664 /* Both registers clear on the read of the high dword */
3666 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH);
3667 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH);
3669 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
3670 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
3671 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
3672 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
3673 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
3675 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
3676 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
3678 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
3679 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
3680 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
3681 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
3682 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
3683 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
3684 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
3685 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
3686 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
3687 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
3689 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
3690 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC);
3691 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS);
3692 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR);
3693 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC);
3694 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC);
3696 IFNET_STAT_SET(ifp, collisions, sc->stats.colc);
3699 IFNET_STAT_SET(ifp, ierrors,
3700 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc +
3701 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr);
3704 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol);
3708 emx_print_debug_info(struct emx_softc *sc)
3710 device_t dev = sc->dev;
3711 uint8_t *hw_addr = sc->hw.hw_addr;
3714 device_printf(dev, "Adapter hardware address = %p \n", hw_addr);
3715 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n",
3716 E1000_READ_REG(&sc->hw, E1000_CTRL),
3717 E1000_READ_REG(&sc->hw, E1000_RCTL));
3718 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n",
3719 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\
3720 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) );
3721 device_printf(dev, "Flow control watermarks high = %d low = %d\n",
3722 sc->hw.fc.high_water, sc->hw.fc.low_water);
3723 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n",
3724 E1000_READ_REG(&sc->hw, E1000_TIDV),
3725 E1000_READ_REG(&sc->hw, E1000_TADV));
3726 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n",
3727 E1000_READ_REG(&sc->hw, E1000_RDTR),
3728 E1000_READ_REG(&sc->hw, E1000_RADV));
3730 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3731 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i,
3732 E1000_READ_REG(&sc->hw, E1000_TDH(i)),
3733 E1000_READ_REG(&sc->hw, E1000_TDT(i)));
3735 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3736 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i,
3737 E1000_READ_REG(&sc->hw, E1000_RDH(i)),
3738 E1000_READ_REG(&sc->hw, E1000_RDT(i)));
3741 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3742 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i,
3743 sc->tx_data[i].num_tx_desc_avail);
3744 device_printf(dev, "TX %d TSO segments = %lu\n", i,
3745 sc->tx_data[i].tso_segments);
3746 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i,
3747 sc->tx_data[i].tso_ctx_reused);
3752 emx_print_hw_stats(struct emx_softc *sc)
3754 device_t dev = sc->dev;
3756 device_printf(dev, "Excessive collisions = %lld\n",
3757 (long long)sc->stats.ecol);
3758 #if (DEBUG_HW > 0) /* Dont output these errors normally */
3759 device_printf(dev, "Symbol errors = %lld\n",
3760 (long long)sc->stats.symerrs);
3762 device_printf(dev, "Sequence errors = %lld\n",
3763 (long long)sc->stats.sec);
3764 device_printf(dev, "Defer count = %lld\n",
3765 (long long)sc->stats.dc);
3766 device_printf(dev, "Missed Packets = %lld\n",
3767 (long long)sc->stats.mpc);
3768 device_printf(dev, "Receive No Buffers = %lld\n",
3769 (long long)sc->stats.rnbc);
3770 /* RLEC is inaccurate on some hardware, calculate our own. */
3771 device_printf(dev, "Receive Length Errors = %lld\n",
3772 ((long long)sc->stats.roc + (long long)sc->stats.ruc));
3773 device_printf(dev, "Receive errors = %lld\n",
3774 (long long)sc->stats.rxerrc);
3775 device_printf(dev, "Crc errors = %lld\n",
3776 (long long)sc->stats.crcerrs);
3777 device_printf(dev, "Alignment errors = %lld\n",
3778 (long long)sc->stats.algnerrc);
3779 device_printf(dev, "Collision/Carrier extension errors = %lld\n",
3780 (long long)sc->stats.cexterr);
3781 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns);
3782 device_printf(dev, "XON Rcvd = %lld\n",
3783 (long long)sc->stats.xonrxc);
3784 device_printf(dev, "XON Xmtd = %lld\n",
3785 (long long)sc->stats.xontxc);
3786 device_printf(dev, "XOFF Rcvd = %lld\n",
3787 (long long)sc->stats.xoffrxc);
3788 device_printf(dev, "XOFF Xmtd = %lld\n",
3789 (long long)sc->stats.xofftxc);
3790 device_printf(dev, "Good Packets Rcvd = %lld\n",
3791 (long long)sc->stats.gprc);
3792 device_printf(dev, "Good Packets Xmtd = %lld\n",
3793 (long long)sc->stats.gptc);
3797 emx_print_nvm_info(struct emx_softc *sc)
3799 uint16_t eeprom_data;
3802 /* Its a bit crude, but it gets the job done */
3803 kprintf("\nInterface EEPROM Dump:\n");
3804 kprintf("Offset\n0x0000 ");
3805 for (i = 0, j = 0; i < 32; i++, j++) {
3806 if (j == 8) { /* Make the offset block */
3808 kprintf("\n0x00%x0 ",row);
3810 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data);
3811 kprintf("%04x ", eeprom_data);
3817 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
3819 struct emx_softc *sc;
3824 error = sysctl_handle_int(oidp, &result, 0, req);
3825 if (error || !req->newptr)
3828 sc = (struct emx_softc *)arg1;
3829 ifp = &sc->arpcom.ac_if;
3831 ifnet_serialize_all(ifp);
3834 emx_print_debug_info(sc);
3837 * This value will cause a hex dump of the
3838 * first 32 16-bit words of the EEPROM to
3842 emx_print_nvm_info(sc);
3844 ifnet_deserialize_all(ifp);
3850 emx_sysctl_stats(SYSCTL_HANDLER_ARGS)
3855 error = sysctl_handle_int(oidp, &result, 0, req);
3856 if (error || !req->newptr)
3860 struct emx_softc *sc = (struct emx_softc *)arg1;
3861 struct ifnet *ifp = &sc->arpcom.ac_if;
3863 ifnet_serialize_all(ifp);
3864 emx_print_hw_stats(sc);
3865 ifnet_deserialize_all(ifp);
3871 emx_add_sysctl(struct emx_softc *sc)
3873 struct sysctl_ctx_list *ctx;
3874 struct sysctl_oid *tree;
3878 ctx = device_get_sysctl_ctx(sc->dev);
3879 tree = device_get_sysctl_tree(sc->dev);
3880 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3881 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3882 emx_sysctl_debug_info, "I", "Debug Information");
3884 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3885 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3886 emx_sysctl_stats, "I", "Statistics");
3888 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3889 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0,
3891 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3892 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0,
3895 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3896 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3897 emx_sysctl_int_throttle, "I", "interrupt throttling rate");
3898 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3899 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3900 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt");
3901 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3902 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0,
3903 emx_sysctl_tx_wreg_nsegs, "I",
3904 "# segments sent before write to hardware register");
3906 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3907 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0,
3909 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3910 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0,
3912 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3913 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0,
3914 "# of TX rings used");
3916 #ifdef IFPOLL_ENABLE
3917 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3918 OID_AUTO, "tx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
3919 sc->tx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
3920 "TX polling CPU map");
3921 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
3922 OID_AUTO, "rx_poll_cpumap", CTLTYPE_OPAQUE | CTLFLAG_RD,
3923 sc->rx_rmap, 0, if_ringmap_cpumap_sysctl, "I",
3924 "RX polling CPU map");
3927 #ifdef EMX_RSS_DEBUG
3928 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree),
3929 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug,
3930 0, "RSS debug level");
3931 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3932 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i);
3933 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3934 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts,
3938 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3939 #ifdef EMX_TSS_DEBUG
3940 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i);
3941 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3942 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts,
3946 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_nmbuf", i);
3947 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3948 pkt_desc, CTLFLAG_RD, &sc->tx_data[i].tx_nmbuf, 0,
3949 "# of pending TX mbufs");
3950 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_gc", i);
3951 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
3952 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_gc,
3958 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS)
3960 struct emx_softc *sc = (void *)arg1;
3961 struct ifnet *ifp = &sc->arpcom.ac_if;
3962 int error, throttle;
3964 throttle = sc->int_throttle_ceil;
3965 error = sysctl_handle_int(oidp, &throttle, 0, req);
3966 if (error || req->newptr == NULL)
3968 if (throttle < 0 || throttle > 1000000000 / 256)
3973 * Set the interrupt throttling rate in 256ns increments,
3974 * recalculate sysctl value assignment to get exact frequency.
3976 throttle = 1000000000 / 256 / throttle;
3978 /* Upper 16bits of ITR is reserved and should be zero */
3979 if (throttle & 0xffff0000)
3983 ifnet_serialize_all(ifp);
3986 sc->int_throttle_ceil = 1000000000 / 256 / throttle;
3988 sc->int_throttle_ceil = 0;
3990 if (ifp->if_flags & IFF_RUNNING)
3991 emx_set_itr(sc, throttle);
3993 ifnet_deserialize_all(ifp);
3996 if_printf(ifp, "Interrupt moderation set to %d/sec\n",
3997 sc->int_throttle_ceil);
4003 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
4005 struct emx_softc *sc = (void *)arg1;
4006 struct ifnet *ifp = &sc->arpcom.ac_if;
4007 struct emx_txdata *tdata = &sc->tx_data[0];
4010 segs = tdata->tx_intr_nsegs;
4011 error = sysctl_handle_int(oidp, &segs, 0, req);
4012 if (error || req->newptr == NULL)
4017 ifnet_serialize_all(ifp);
4020 * Don't allow tx_intr_nsegs to become:
4021 * o Less the oact_tx_desc
4022 * o Too large that no TX desc will cause TX interrupt to
4023 * be generated (OACTIVE will never recover)
4024 * o Too small that will cause tx_dd[] overflow
4026 if (segs < tdata->oact_tx_desc ||
4027 segs >= tdata->num_tx_desc - tdata->oact_tx_desc ||
4028 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) {
4034 for (i = 0; i < sc->tx_ring_cnt; ++i)
4035 sc->tx_data[i].tx_intr_nsegs = segs;
4038 ifnet_deserialize_all(ifp);
4044 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS)
4046 struct emx_softc *sc = (void *)arg1;
4047 struct ifnet *ifp = &sc->arpcom.ac_if;
4048 int error, nsegs, i;
4050 nsegs = sc->tx_data[0].tx_wreg_nsegs;
4051 error = sysctl_handle_int(oidp, &nsegs, 0, req);
4052 if (error || req->newptr == NULL)
4055 ifnet_serialize_all(ifp);
4056 for (i = 0; i < sc->tx_ring_cnt; ++i)
4057 sc->tx_data[i].tx_wreg_nsegs =nsegs;
4058 ifnet_deserialize_all(ifp);
4064 emx_dma_alloc(struct emx_softc *sc)
4069 * Create top level busdma tag
4071 error = bus_dma_tag_create(NULL, 1, 0,
4072 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
4074 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
4075 0, &sc->parent_dtag);
4077 device_printf(sc->dev, "could not create top level DMA tag\n");
4082 * Allocate transmit descriptors ring and buffers
4084 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4085 error = emx_create_tx_ring(&sc->tx_data[i]);
4087 device_printf(sc->dev,
4088 "Could not setup transmit structures\n");
4094 * Allocate receive descriptors ring and buffers
4096 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4097 error = emx_create_rx_ring(&sc->rx_data[i]);
4099 device_printf(sc->dev,
4100 "Could not setup receive structures\n");
4108 emx_dma_free(struct emx_softc *sc)
4112 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4113 emx_destroy_tx_ring(&sc->tx_data[i],
4114 sc->tx_data[i].num_tx_desc);
4117 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4118 emx_destroy_rx_ring(&sc->rx_data[i],
4119 sc->rx_data[i].num_rx_desc);
4122 /* Free top level busdma tag */
4123 if (sc->parent_dtag != NULL)
4124 bus_dma_tag_destroy(sc->parent_dtag);
4128 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
4130 struct emx_softc *sc = ifp->if_softc;
4132 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz);
4136 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4138 struct emx_softc *sc = ifp->if_softc;
4140 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz);
4144 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
4146 struct emx_softc *sc = ifp->if_softc;
4148 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz);
4152 emx_serialize_skipmain(struct emx_softc *sc)
4154 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1);
4158 emx_deserialize_skipmain(struct emx_softc *sc)
4160 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1);
4166 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
4167 boolean_t serialized)
4169 struct emx_softc *sc = ifp->if_softc;
4171 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE,
4175 #endif /* INVARIANTS */
4177 #ifdef IFPOLL_ENABLE
4180 emx_npoll_status(struct ifnet *ifp)
4182 struct emx_softc *sc = ifp->if_softc;
4185 ASSERT_SERIALIZED(&sc->main_serialize);
4187 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
4188 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4189 callout_stop(&sc->timer);
4190 sc->hw.mac.get_link_status = 1;
4191 emx_update_link_status(sc);
4192 callout_reset(&sc->timer, hz, emx_timer, sc);
4197 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
4199 struct emx_txdata *tdata = arg;
4201 ASSERT_SERIALIZED(&tdata->tx_serialize);
4204 emx_try_txgc(tdata, 1);
4208 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
4210 struct emx_rxdata *rdata = arg;
4212 ASSERT_SERIALIZED(&rdata->rx_serialize);
4214 emx_rxeof(rdata, cycle);
4218 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info)
4220 struct emx_softc *sc = ifp->if_softc;
4223 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4228 info->ifpi_status.status_func = emx_npoll_status;
4229 info->ifpi_status.serializer = &sc->main_serialize;
4231 txr_cnt = emx_get_txring_inuse(sc, TRUE);
4232 for (i = 0; i < txr_cnt; ++i) {
4233 struct emx_txdata *tdata = &sc->tx_data[i];
4235 cpu = if_ringmap_cpumap(sc->tx_rmap, i);
4236 KKASSERT(cpu < netisr_ncpus);
4237 info->ifpi_tx[cpu].poll_func = emx_npoll_tx;
4238 info->ifpi_tx[cpu].arg = tdata;
4239 info->ifpi_tx[cpu].serializer = &tdata->tx_serialize;
4240 ifsq_set_cpuid(tdata->ifsq, cpu);
4243 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4244 struct emx_rxdata *rdata = &sc->rx_data[i];
4246 cpu = if_ringmap_cpumap(sc->rx_rmap, i);
4247 KKASSERT(cpu < netisr_ncpus);
4248 info->ifpi_rx[cpu].poll_func = emx_npoll_rx;
4249 info->ifpi_rx[cpu].arg = rdata;
4250 info->ifpi_rx[cpu].serializer = &rdata->rx_serialize;
4253 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4254 struct emx_txdata *tdata = &sc->tx_data[i];
4256 ifsq_set_cpuid(tdata->ifsq,
4257 rman_get_cpuid(sc->intr_res));
4260 if (ifp->if_flags & IFF_RUNNING)
4264 #endif /* IFPOLL_ENABLE */
4267 emx_set_itr(struct emx_softc *sc, uint32_t itr)
4269 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr);
4270 if (sc->hw.mac.type == e1000_82574) {
4274 * When using MSIX interrupts we need to
4275 * throttle using the EITR register
4277 for (i = 0; i < 4; ++i)
4278 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr);
4283 * Disable the L0s, 82574L Errata #20
4286 emx_disable_aspm(struct emx_softc *sc)
4288 uint16_t link_cap, link_ctrl, disable;
4289 uint8_t pcie_ptr, reg;
4290 device_t dev = sc->dev;
4292 switch (sc->hw.mac.type) {
4297 * 82573 specification update
4298 * errata #8 disable L0s
4299 * errata #41 disable L1
4301 * 82571/82572 specification update
4302 # errata #13 disable L1
4303 * errata #68 disable L0s
4305 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1;
4310 * 82574 specification update errata #20
4312 * There is no need to disable L1
4314 disable = PCIEM_LNKCTL_ASPM_L0S;
4321 pcie_ptr = pci_get_pciecap_ptr(dev);
4325 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2);
4326 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0)
4330 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable);
4332 reg = pcie_ptr + PCIER_LINKCTRL;
4333 link_ctrl = pci_read_config(dev, reg, 2);
4334 link_ctrl &= ~disable;
4335 pci_write_config(dev, reg, link_ctrl, 2);
4339 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp)
4341 int iphlen, hoff, thoff, ex = 0;
4346 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
4348 iphlen = m->m_pkthdr.csum_iphlen;
4349 thoff = m->m_pkthdr.csum_thlen;
4350 hoff = m->m_pkthdr.csum_lhlen;
4352 KASSERT(iphlen > 0, ("invalid ip hlen"));
4353 KASSERT(thoff > 0, ("invalid tcp hlen"));
4354 KASSERT(hoff > 0, ("invalid ether hlen"));
4356 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX)
4359 if (m->m_len < hoff + iphlen + thoff + ex) {
4360 m = m_pullup(m, hoff + iphlen + thoff + ex);
4367 ip = mtodoff(m, struct ip *, hoff);
4374 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp,
4375 uint32_t *txd_upper, uint32_t *txd_lower)
4377 struct e1000_context_desc *TXD;
4378 int hoff, iphlen, thoff, hlen;
4379 int mss, pktlen, curr_txd;
4381 #ifdef EMX_TSO_DEBUG
4382 tdata->tso_segments++;
4385 iphlen = mp->m_pkthdr.csum_iphlen;
4386 thoff = mp->m_pkthdr.csum_thlen;
4387 hoff = mp->m_pkthdr.csum_lhlen;
4388 mss = mp->m_pkthdr.tso_segsz;
4389 pktlen = mp->m_pkthdr.len;
4391 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 &&
4392 tdata->csum_flags == CSUM_TSO &&
4393 tdata->csum_iphlen == iphlen &&
4394 tdata->csum_lhlen == hoff &&
4395 tdata->csum_thlen == thoff &&
4396 tdata->csum_mss == mss &&
4397 tdata->csum_pktlen == pktlen) {
4398 *txd_upper = tdata->csum_txd_upper;
4399 *txd_lower = tdata->csum_txd_lower;
4400 #ifdef EMX_TSO_DEBUG
4401 tdata->tso_ctx_reused++;
4405 hlen = hoff + iphlen + thoff;
4408 * Setup a new TSO context.
4411 curr_txd = tdata->next_avail_tx_desc;
4412 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd];
4414 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
4415 E1000_TXD_DTYP_D | /* Data descr type */
4416 E1000_TXD_CMD_TSE; /* Do TSE on this packet */
4418 /* IP and/or TCP header checksum calculation and insertion. */
4419 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
4422 * Start offset for header checksum calculation.
4423 * End offset for header checksum calculation.
4424 * Offset of place put the checksum.
4426 TXD->lower_setup.ip_fields.ipcss = hoff;
4427 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1);
4428 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum);
4431 * Start offset for payload checksum calculation.
4432 * End offset for payload checksum calculation.
4433 * Offset of place to put the checksum.
4435 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen;
4436 TXD->upper_setup.tcp_fields.tucse = 0;
4437 TXD->upper_setup.tcp_fields.tucso =
4438 hoff + iphlen + offsetof(struct tcphdr, th_sum);
4441 * Payload size per packet w/o any headers.
4442 * Length of all headers up to payload.
4444 TXD->tcp_seg_setup.fields.mss = htole16(mss);
4445 TXD->tcp_seg_setup.fields.hdr_len = hlen;
4446 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS |
4447 E1000_TXD_CMD_DEXT | /* Extended descr */
4448 E1000_TXD_CMD_TSE | /* TSE context */
4449 E1000_TXD_CMD_IP | /* Do IP csum */
4450 E1000_TXD_CMD_TCP | /* Do TCP checksum */
4451 (pktlen - hlen)); /* Total len */
4453 /* Save the information for this TSO context */
4454 tdata->csum_flags = CSUM_TSO;
4455 tdata->csum_lhlen = hoff;
4456 tdata->csum_iphlen = iphlen;
4457 tdata->csum_thlen = thoff;
4458 tdata->csum_mss = mss;
4459 tdata->csum_pktlen = pktlen;
4460 tdata->csum_txd_upper = *txd_upper;
4461 tdata->csum_txd_lower = *txd_lower;
4463 if (++curr_txd == tdata->num_tx_desc)
4466 KKASSERT(tdata->num_tx_desc_avail > 0);
4467 tdata->num_tx_desc_avail--;
4469 tdata->next_avail_tx_desc = curr_txd;
4474 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling)
4477 return sc->tx_ring_cnt;
4483 * Remove all descriptors from the TX ring.
4485 * We want to clear all pending descriptors from the TX ring. Zeroing
4486 * happens when the HW reads the regs. We assign the ring itself as
4487 * the data of the next descriptor. We don't care about the data we
4488 * are about to reset the HW.
4491 emx_flush_tx_ring(struct emx_softc *sc)
4493 struct e1000_hw *hw = &sc->hw;
4497 tctl = E1000_READ_REG(hw, E1000_TCTL);
4498 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
4500 for (i = 0; i < sc->tx_ring_inuse; ++i) {
4501 struct emx_txdata *tdata = &sc->tx_data[i];
4502 struct e1000_tx_desc *txd;
4504 if (E1000_READ_REG(hw, E1000_TDLEN(i)) == 0)
4507 txd = &tdata->tx_desc_base[tdata->next_avail_tx_desc++];
4508 if (tdata->next_avail_tx_desc == tdata->num_tx_desc)
4509 tdata->next_avail_tx_desc = 0;
4511 /* Just use the ring as a dummy buffer addr */
4512 txd->buffer_addr = tdata->tx_desc_paddr;
4513 txd->lower.data = htole32(E1000_TXD_CMD_IFCS | 512);
4514 txd->upper.data = 0;
4516 E1000_WRITE_REG(hw, E1000_TDT(i), tdata->next_avail_tx_desc);
4522 * Remove all descriptors from the RX rings.
4524 * Mark all descriptors in the RX rings as consumed and disable the RX rings.
4527 emx_flush_rx_ring(struct emx_softc *sc)
4529 struct e1000_hw *hw = &sc->hw;
4533 rctl = E1000_READ_REG(hw, E1000_RCTL);
4534 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4535 E1000_WRITE_FLUSH(hw);
4538 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4541 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
4542 /* Zero the lower 14 bits (prefetch and host thresholds) */
4543 rxdctl &= 0xffffc000;
4545 * Update thresholds: prefetch threshold to 31, host threshold
4546 * to 1 and make sure the granularity is "descriptors" and not
4549 rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
4550 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
4553 /* Momentarily enable the RX rings for the changes to take effect */
4554 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
4555 E1000_WRITE_FLUSH(hw);
4557 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
4561 * Remove all descriptors from the descriptor rings.
4563 * In i219, the descriptor rings must be emptied before resetting the HW
4564 * or before changing the device state to D3 during runtime (runtime PM).
4566 * Failure to do this will cause the HW to enter a unit hang state which
4567 * can only be released by PCI reset on the device.
4570 emx_flush_txrx_ring(struct emx_softc *sc)
4572 struct e1000_hw *hw = &sc->hw;
4573 device_t dev = sc->dev;
4574 uint16_t hang_state;
4575 uint32_t fext_nvm11, tdlen;
4579 * First, disable MULR fix in FEXTNVM11.
4581 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
4582 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
4583 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
4586 * Do nothing if we're not in faulty state, or if the queue is
4590 for (i = 0; i < sc->tx_ring_inuse; ++i)
4591 tdlen += E1000_READ_REG(hw, E1000_TDLEN(i));
4592 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2);
4593 if ((hang_state & EMX_FLUSH_DESC_REQUIRED) && tdlen)
4594 emx_flush_tx_ring(sc);
4597 * Recheck, maybe the fault is caused by the RX ring.
4599 hang_state = pci_read_config(dev, EMX_PCICFG_DESC_RING_STATUS, 2);
4600 if (hang_state & EMX_FLUSH_DESC_REQUIRED)
4601 emx_flush_rx_ring(sc);