igb: Improve tiny packets reception performance on low frequency CPU
[dragonfly.git] / sys / dev / netif / igb / if_igb.c
CommitLineData
1f7e3916
SZ
1/*
2 * Copyright (c) 2001-2011, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
d0f59cad 32#include "opt_ifpoll.h"
8d6600da 33#include "opt_igb.h"
1f7e3916
SZ
34
35#include <sys/param.h>
36#include <sys/bus.h>
37#include <sys/endian.h>
38#include <sys/interrupt.h>
39#include <sys/kernel.h>
40#include <sys/malloc.h>
41#include <sys/mbuf.h>
42#include <sys/proc.h>
43#include <sys/rman.h>
44#include <sys/serialize.h>
45#include <sys/serialize2.h>
46#include <sys/socket.h>
47#include <sys/sockio.h>
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50
51#include <net/bpf.h>
52#include <net/ethernet.h>
53#include <net/if.h>
54#include <net/if_arp.h>
55#include <net/if_dl.h>
56#include <net/if_media.h>
57#include <net/ifq_var.h>
58#include <net/toeplitz.h>
59#include <net/toeplitz2.h>
60#include <net/vlan/if_vlan_var.h>
61#include <net/vlan/if_vlan_ether.h>
62#include <net/if_poll.h>
63
64#include <netinet/in_systm.h>
65#include <netinet/in.h>
66#include <netinet/ip.h>
67#include <netinet/tcp.h>
68#include <netinet/udp.h>
69
70#include <bus/pci/pcivar.h>
71#include <bus/pci/pcireg.h>
72
73#include <dev/netif/ig_hal/e1000_api.h>
74#include <dev/netif/ig_hal/e1000_82575.h>
75#include <dev/netif/igb/if_igb.h>
76
8d6600da
SZ
77#ifdef IGB_RSS_DEBUG
78#define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \
79do { \
80 if (sc->rss_debug >= lvl) \
81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
82} while (0)
83#else /* !IGB_RSS_DEBUG */
84#define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
85#endif /* IGB_RSS_DEBUG */
86
1f7e3916
SZ
87#define IGB_NAME "Intel(R) PRO/1000 "
88#define IGB_DEVICE(id) \
89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id }
90#define IGB_DEVICE_NULL { 0, 0, NULL }
91
92static struct igb_device {
93 uint16_t vid;
94 uint16_t did;
95 const char *desc;
96} igb_devices[] = {
97 IGB_DEVICE(82575EB_COPPER),
98 IGB_DEVICE(82575EB_FIBER_SERDES),
99 IGB_DEVICE(82575GB_QUAD_COPPER),
100 IGB_DEVICE(82576),
101 IGB_DEVICE(82576_NS),
102 IGB_DEVICE(82576_NS_SERDES),
103 IGB_DEVICE(82576_FIBER),
104 IGB_DEVICE(82576_SERDES),
105 IGB_DEVICE(82576_SERDES_QUAD),
106 IGB_DEVICE(82576_QUAD_COPPER),
107 IGB_DEVICE(82576_QUAD_COPPER_ET2),
108 IGB_DEVICE(82576_VF),
109 IGB_DEVICE(82580_COPPER),
110 IGB_DEVICE(82580_FIBER),
111 IGB_DEVICE(82580_SERDES),
112 IGB_DEVICE(82580_SGMII),
113 IGB_DEVICE(82580_COPPER_DUAL),
114 IGB_DEVICE(82580_QUAD_FIBER),
115 IGB_DEVICE(DH89XXCC_SERDES),
116 IGB_DEVICE(DH89XXCC_SGMII),
117 IGB_DEVICE(DH89XXCC_SFP),
118 IGB_DEVICE(DH89XXCC_BACKPLANE),
119 IGB_DEVICE(I350_COPPER),
120 IGB_DEVICE(I350_FIBER),
121 IGB_DEVICE(I350_SERDES),
122 IGB_DEVICE(I350_SGMII),
123 IGB_DEVICE(I350_VF),
124
125 /* required last entry */
126 IGB_DEVICE_NULL
127};
128
129static int igb_probe(device_t);
130static int igb_attach(device_t);
131static int igb_detach(device_t);
132static int igb_shutdown(device_t);
133static int igb_suspend(device_t);
134static int igb_resume(device_t);
135
136static boolean_t igb_is_valid_ether_addr(const uint8_t *);
137static void igb_setup_ifp(struct igb_softc *);
48faa653 138static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *);
23f6ffe4
SZ
139static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **);
140static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *);
1f7e3916
SZ
141static void igb_add_sysctl(struct igb_softc *);
142static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
9c0ecdcc 143static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS);
b6220144 144static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS);
9c0ecdcc 145static void igb_set_ring_inuse(struct igb_softc *, boolean_t);
d0f59cad
SZ
146#ifdef IFPOLL_ENABLE
147static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS);
148static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS);
149#endif
1f7e3916
SZ
150
151static void igb_vf_init_stats(struct igb_softc *);
152static void igb_reset(struct igb_softc *);
153static void igb_update_stats_counters(struct igb_softc *);
154static void igb_update_vf_stats_counters(struct igb_softc *);
155static void igb_update_link_status(struct igb_softc *);
156static void igb_init_tx_unit(struct igb_softc *);
157static void igb_init_rx_unit(struct igb_softc *);
158
159static void igb_set_vlan(struct igb_softc *);
160static void igb_set_multi(struct igb_softc *);
161static void igb_set_promisc(struct igb_softc *);
162static void igb_disable_promisc(struct igb_softc *);
163
a619b256
SZ
164static int igb_alloc_rings(struct igb_softc *);
165static void igb_free_rings(struct igb_softc *);
1f7e3916
SZ
166static int igb_create_tx_ring(struct igb_tx_ring *);
167static int igb_create_rx_ring(struct igb_rx_ring *);
168static void igb_free_tx_ring(struct igb_tx_ring *);
169static void igb_free_rx_ring(struct igb_rx_ring *);
170static void igb_destroy_tx_ring(struct igb_tx_ring *, int);
171static void igb_destroy_rx_ring(struct igb_rx_ring *, int);
172static void igb_init_tx_ring(struct igb_tx_ring *);
173static int igb_init_rx_ring(struct igb_rx_ring *);
174static int igb_newbuf(struct igb_rx_ring *, int, boolean_t);
871c0e2b 175static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *);
b56e8196 176static void igb_rx_refresh(struct igb_rx_ring *, int);
1f7e3916
SZ
177
178static void igb_stop(struct igb_softc *);
179static void igb_init(void *);
180static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
181static void igb_media_status(struct ifnet *, struct ifmediareq *);
182static int igb_media_change(struct ifnet *);
183static void igb_timer(void *);
184static void igb_watchdog(struct ifnet *);
185static void igb_start(struct ifnet *);
d0f59cad
SZ
186#ifdef IFPOLL_ENABLE
187static void igb_npoll(struct ifnet *, struct ifpoll_info *);
188static void igb_npoll_rx(struct ifnet *, void *, int);
189static void igb_npoll_tx(struct ifnet *, void *, int);
2f00683b 190static void igb_npoll_status(struct ifnet *);
1f7e3916 191#endif
7d235eb5
SZ
192static void igb_serialize(struct ifnet *, enum ifnet_serialize);
193static void igb_deserialize(struct ifnet *, enum ifnet_serialize);
194static int igb_tryserialize(struct ifnet *, enum ifnet_serialize);
195#ifdef INVARIANTS
196static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize,
197 boolean_t);
198#endif
1f7e3916
SZ
199
200static void igb_intr(void *);
9c0ecdcc 201static void igb_intr_shared(void *);
1f7e3916
SZ
202static void igb_rxeof(struct igb_rx_ring *, int);
203static void igb_txeof(struct igb_tx_ring *);
9c0ecdcc 204static void igb_set_eitr(struct igb_softc *, int, int);
1f7e3916
SZ
205static void igb_enable_intr(struct igb_softc *);
206static void igb_disable_intr(struct igb_softc *);
f6167a56
SZ
207static void igb_init_unshared_intr(struct igb_softc *);
208static void igb_init_intr(struct igb_softc *);
209static int igb_setup_intr(struct igb_softc *);
9c0ecdcc
SZ
210static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int);
211static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int);
be922da6 212static void igb_set_intr_mask(struct igb_softc *);
3c7cc5e2
SZ
213static int igb_alloc_intr(struct igb_softc *);
214static void igb_free_intr(struct igb_softc *);
9c0ecdcc
SZ
215static void igb_teardown_intr(struct igb_softc *);
216static void igb_msix_try_alloc(struct igb_softc *);
217static void igb_msix_free(struct igb_softc *, boolean_t);
218static int igb_msix_setup(struct igb_softc *);
219static void igb_msix_teardown(struct igb_softc *, int);
220static void igb_msix_rx(void *);
221static void igb_msix_tx(void *);
222static void igb_msix_status(void *);
1f7e3916
SZ
223
224/* Management and WOL Support */
225static void igb_get_mgmt(struct igb_softc *);
226static void igb_rel_mgmt(struct igb_softc *);
227static void igb_get_hw_control(struct igb_softc *);
73a4e8ed
SZ
228static void igb_rel_hw_control(struct igb_softc *);
229static void igb_enable_wol(device_t);
1f7e3916
SZ
230
231static device_method_t igb_methods[] = {
232 /* Device interface */
233 DEVMETHOD(device_probe, igb_probe),
234 DEVMETHOD(device_attach, igb_attach),
235 DEVMETHOD(device_detach, igb_detach),
236 DEVMETHOD(device_shutdown, igb_shutdown),
237 DEVMETHOD(device_suspend, igb_suspend),
238 DEVMETHOD(device_resume, igb_resume),
239 { 0, 0 }
240};
241
242static driver_t igb_driver = {
243 "igb",
244 igb_methods,
245 sizeof(struct igb_softc),
246};
247
248static devclass_t igb_devclass;
249
250DECLARE_DUMMY_MODULE(if_igb);
251MODULE_DEPEND(igb, ig_hal, 1, 1, 1);
252DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL);
253
254static int igb_rxd = IGB_DEFAULT_RXD;
255static int igb_txd = IGB_DEFAULT_TXD;
8d6600da 256static int igb_rxr = 0;
1f7e3916
SZ
257static int igb_msi_enable = 1;
258static int igb_msix_enable = 1;
259static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */
260static int igb_fc_setting = e1000_fc_full;
261
262/*
263 * DMA Coalescing, only for i350 - default to off,
264 * this feature is for power savings
265 */
266static int igb_dma_coalesce = 0;
267
268TUNABLE_INT("hw.igb.rxd", &igb_rxd);
269TUNABLE_INT("hw.igb.txd", &igb_txd);
8d6600da 270TUNABLE_INT("hw.igb.rxr", &igb_rxr);
1f7e3916
SZ
271TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable);
272TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable);
273TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
274
275/* i350 specific */
276TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
277TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
278
279static __inline void
280igb_rxcsum(uint32_t staterr, struct mbuf *mp)
281{
282 /* Ignore Checksum bit is set */
283 if (staterr & E1000_RXD_STAT_IXSM)
284 return;
285
286 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) ==
287 E1000_RXD_STAT_IPCS)
288 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID;
289
290 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
291 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) {
292 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
293 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED;
294 mp->m_pkthdr.csum_data = htons(0xffff);
295 }
296 }
297}
298
8d6600da
SZ
299static __inline struct pktinfo *
300igb_rssinfo(struct mbuf *m, struct pktinfo *pi,
301 uint32_t hash, uint32_t hashtype, uint32_t staterr)
302{
303 switch (hashtype) {
304 case E1000_RXDADV_RSSTYPE_IPV4_TCP:
305 pi->pi_netisr = NETISR_IP;
306 pi->pi_flags = 0;
307 pi->pi_l3proto = IPPROTO_TCP;
308 break;
309
310 case E1000_RXDADV_RSSTYPE_IPV4:
311 if (staterr & E1000_RXD_STAT_IXSM)
312 return NULL;
313
314 if ((staterr &
315 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) ==
316 E1000_RXD_STAT_TCPCS) {
317 pi->pi_netisr = NETISR_IP;
318 pi->pi_flags = 0;
319 pi->pi_l3proto = IPPROTO_UDP;
320 break;
321 }
322 /* FALL THROUGH */
323 default:
324 return NULL;
325 }
326
327 m->m_flags |= M_HASH;
328 m->m_pkthdr.hash = toeplitz_hash(hash);
329 return pi;
330}
331
1f7e3916
SZ
332static int
333igb_probe(device_t dev)
334{
335 const struct igb_device *d;
336 uint16_t vid, did;
337
338 vid = pci_get_vendor(dev);
339 did = pci_get_device(dev);
340
341 for (d = igb_devices; d->desc != NULL; ++d) {
342 if (vid == d->vid && did == d->did) {
343 device_set_desc(dev, d->desc);
344 return 0;
345 }
346 }
347 return ENXIO;
348}
349
350static int
351igb_attach(device_t dev)
352{
353 struct igb_softc *sc = device_get_softc(dev);
1f7e3916 354 uint16_t eeprom_data;
8d6600da 355 int error = 0, i, j, ring_max;
d0f59cad
SZ
356#ifdef IFPOLL_ENABLE
357 int offset, offset_def;
358#endif
1f7e3916
SZ
359
360#ifdef notyet
361 /* SYSCTL stuff */
362 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
363 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
364 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
365 igb_sysctl_nvm_info, "I", "NVM Information");
1f7e3916
SZ
366 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
367 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
368 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
369 adapter, 0, igb_set_flowcntl, "I", "Flow Control");
370#endif
371
372 callout_init_mp(&sc->timer);
9c0ecdcc 373 lwkt_serialize_init(&sc->main_serialize);
1f7e3916 374
62be5890
SZ
375 if_initname(&sc->arpcom.ac_if, device_get_name(dev),
376 device_get_unit(dev));
1f7e3916
SZ
377 sc->dev = sc->osdep.dev = dev;
378
379 /*
380 * Determine hardware and mac type
381 */
382 sc->hw.vendor_id = pci_get_vendor(dev);
383 sc->hw.device_id = pci_get_device(dev);
384 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
385 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
386 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
387
388 if (e1000_set_mac_type(&sc->hw))
389 return ENXIO;
390
391 /* Are we a VF device? */
392 if (sc->hw.mac.type == e1000_vfadapt ||
393 sc->hw.mac.type == e1000_vfadapt_i350)
394 sc->vf_ifp = 1;
395 else
396 sc->vf_ifp = 0;
397
9b7aa975
SZ
398 /*
399 * Configure total supported RX/TX ring count
400 */
401 switch (sc->hw.mac.type) {
402 case e1000_82575:
403 ring_max = IGB_MAX_RING_82575;
404 break;
405 case e1000_82580:
406 ring_max = IGB_MAX_RING_82580;
407 break;
408 case e1000_i350:
409 ring_max = IGB_MAX_RING_I350;
410 break;
411 case e1000_82576:
412 ring_max = IGB_MAX_RING_82576;
413 break;
414 default:
415 ring_max = IGB_MIN_RING;
416 break;
417 }
418 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr);
419 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max);
420#ifdef IGB_RSS_DEBUG
421 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt);
422#endif
423 sc->rx_ring_inuse = sc->rx_ring_cnt;
424 sc->tx_ring_cnt = 1; /* XXX */
425
23f6ffe4
SZ
426 if (sc->hw.mac.type == e1000_82575)
427 sc->flags |= IGB_FLAG_TSO_IPLEN0;
428
1f7e3916
SZ
429 /* Enable bus mastering */
430 pci_enable_busmaster(dev);
431
432 /*
433 * Allocate IO memory
434 */
435 sc->mem_rid = PCIR_BAR(0);
436 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
437 RF_ACTIVE);
438 if (sc->mem_res == NULL) {
439 device_printf(dev, "Unable to allocate bus resource: memory\n");
440 error = ENXIO;
441 goto failed;
442 }
443 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res);
444 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res);
445
446 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle;
447
1f7e3916
SZ
448 /* Save PCI command register for Shared Code */
449 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
450 sc->hw.back = &sc->osdep;
451
1f7e3916
SZ
452 /* Do Shared Code initialization */
453 if (e1000_setup_init_funcs(&sc->hw, TRUE)) {
454 device_printf(dev, "Setup of Shared code failed\n");
455 error = ENXIO;
456 goto failed;
457 }
458
459 e1000_get_bus_info(&sc->hw);
460
461 sc->hw.mac.autoneg = DO_AUTO_NEG;
462 sc->hw.phy.autoneg_wait_to_complete = FALSE;
463 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
464
465 /* Copper options */
466 if (sc->hw.phy.media_type == e1000_media_type_copper) {
467 sc->hw.phy.mdix = AUTO_ALL_MODES;
468 sc->hw.phy.disable_polarity_correction = FALSE;
469 sc->hw.phy.ms_type = IGB_MASTER_SLAVE;
470 }
471
472 /* Set the frame limits assuming standard ethernet sized frames. */
473 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
1f7e3916 474
a619b256
SZ
475 /* Allocate RX/TX rings */
476 error = igb_alloc_rings(sc);
1f7e3916
SZ
477 if (error)
478 goto failed;
479
d0f59cad
SZ
480#ifdef IFPOLL_ENABLE
481 /*
482 * NPOLLING RX CPU offset
483 */
484 if (sc->rx_ring_cnt == ncpus2) {
485 offset = 0;
486 } else {
487 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2;
488 offset = device_getenv_int(dev, "npoll.rxoff", offset_def);
489 if (offset >= ncpus2 ||
490 offset % sc->rx_ring_cnt != 0) {
491 device_printf(dev, "invalid npoll.rxoff %d, use %d\n",
492 offset, offset_def);
493 offset = offset_def;
494 }
495 }
496 sc->rx_npoll_off = offset;
497
498 /*
499 * NPOLLING TX CPU offset
500 */
501 offset_def = sc->rx_npoll_off;
502 offset = device_getenv_int(dev, "npoll.txoff", offset_def);
503 if (offset >= ncpus2) {
504 device_printf(dev, "invalid npoll.txoff %d, use %d\n",
505 offset, offset_def);
506 offset = offset_def;
507 }
508 sc->tx_npoll_off = offset;
509#endif
510
3c7cc5e2
SZ
511 /* Allocate interrupt */
512 error = igb_alloc_intr(sc);
513 if (error)
a1647e40 514 goto failed;
a1647e40 515
7d235eb5
SZ
516 /*
517 * Setup serializers
518 */
7d235eb5
SZ
519 i = 0;
520 sc->serializes[i++] = &sc->main_serialize;
521
522 sc->tx_serialize = i;
523 for (j = 0; j < sc->tx_ring_cnt; ++j)
524 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
525
526 sc->rx_serialize = i;
527 for (j = 0; j < sc->rx_ring_cnt; ++j)
528 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
529
530 sc->serialize_cnt = i;
531 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE);
532
1f7e3916
SZ
533 /* Allocate the appropriate stats memory */
534 if (sc->vf_ifp) {
535 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF,
536 M_WAITOK | M_ZERO);
537 igb_vf_init_stats(sc);
538 } else {
539 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF,
540 M_WAITOK | M_ZERO);
541 }
542
543 /* Allocate multicast array memory. */
544 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
545 M_DEVBUF, M_WAITOK);
546
547 /* Some adapter-specific advanced features */
548 if (sc->hw.mac.type >= e1000_i350) {
549#ifdef notyet
550 igb_set_sysctl_value(adapter, "dma_coalesce",
551 "configure dma coalesce",
552 &adapter->dma_coalesce, igb_dma_coalesce);
553 igb_set_sysctl_value(adapter, "eee_disabled",
554 "enable Energy Efficient Ethernet",
555 &adapter->hw.dev_spec._82575.eee_disable,
556 igb_eee_disabled);
557#else
558 sc->dma_coalesce = igb_dma_coalesce;
559 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled;
560#endif
561 e1000_set_eee_i350(&sc->hw);
562 }
563
564 /*
565 * Start from a known state, this is important in reading the nvm and
566 * mac from that.
567 */
568 e1000_reset_hw(&sc->hw);
569
570 /* Make sure we have a good EEPROM before we read from it */
571 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
572 /*
573 * Some PCI-E parts fail the first check due to
574 * the link being in sleep state, call it again,
575 * if it fails a second time its a real issue.
576 */
577 if (e1000_validate_nvm_checksum(&sc->hw) < 0) {
578 device_printf(dev,
579 "The EEPROM Checksum Is Not Valid\n");
580 error = EIO;
581 goto failed;
582 }
583 }
584
585 /* Copy the permanent MAC address out of the EEPROM */
586 if (e1000_read_mac_addr(&sc->hw) < 0) {
587 device_printf(dev, "EEPROM read error while reading MAC"
588 " address\n");
589 error = EIO;
590 goto failed;
591 }
592 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) {
593 device_printf(dev, "Invalid MAC address\n");
594 error = EIO;
595 goto failed;
596 }
597
1f7e3916
SZ
598 /* Setup OS specific network interface */
599 igb_setup_ifp(sc);
600
601 /* Add sysctl tree, must after igb_setup_ifp() */
602 igb_add_sysctl(sc);
603
604 /* Now get a good starting state */
605 igb_reset(sc);
606
607 /* Initialize statistics */
608 igb_update_stats_counters(sc);
609
610 sc->hw.mac.get_link_status = 1;
611 igb_update_link_status(sc);
612
613 /* Indicate SOL/IDER usage */
614 if (e1000_check_reset_block(&sc->hw)) {
615 device_printf(dev,
616 "PHY reset is blocked due to SOL/IDER session.\n");
617 }
618
619 /* Determine if we have to control management hardware */
396b7048
SZ
620 if (e1000_enable_mng_pass_thru(&sc->hw))
621 sc->flags |= IGB_FLAG_HAS_MGMT;
1f7e3916
SZ
622
623 /*
624 * Setup Wake-on-Lan
625 */
626 /* APME bit in EEPROM is mapped to WUC.APME */
627 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME;
628 if (eeprom_data)
629 sc->wol = E1000_WUFC_MAG;
630 /* XXX disable WOL */
631 sc->wol = 0;
632
633#ifdef notyet
634 /* Register for VLAN events */
635 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
636 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
637 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
638 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
639#endif
640
641#ifdef notyet
642 igb_add_hw_stats(adapter);
643#endif
644
f6167a56 645 error = igb_setup_intr(sc);
1f7e3916 646 if (error) {
1f7e3916
SZ
647 ether_ifdetach(&sc->arpcom.ac_if);
648 goto failed;
649 }
1f7e3916
SZ
650 return 0;
651
652failed:
653 igb_detach(dev);
654 return error;
655}
656
657static int
658igb_detach(device_t dev)
659{
660 struct igb_softc *sc = device_get_softc(dev);
661
662 if (device_is_attached(dev)) {
663 struct ifnet *ifp = &sc->arpcom.ac_if;
664
665 ifnet_serialize_all(ifp);
666
667 igb_stop(sc);
668
669 e1000_phy_hw_reset(&sc->hw);
670
671 /* Give control back to firmware */
672 igb_rel_mgmt(sc);
673 igb_rel_hw_control(sc);
674
675 if (sc->wol) {
676 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
677 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
678 igb_enable_wol(dev);
679 }
680
9c0ecdcc 681 igb_teardown_intr(sc);
1f7e3916
SZ
682
683 ifnet_deserialize_all(ifp);
684
685 ether_ifdetach(ifp);
686 } else if (sc->mem_res != NULL) {
687 igb_rel_hw_control(sc);
688 }
689 bus_generic_detach(dev);
690
9c0ecdcc
SZ
691 if (sc->sysctl_tree != NULL)
692 sysctl_ctx_free(&sc->sysctl_ctx);
693
3c7cc5e2 694 igb_free_intr(sc);
1f7e3916 695
9c0ecdcc
SZ
696 if (sc->msix_mem_res != NULL) {
697 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid,
698 sc->msix_mem_res);
699 }
1f7e3916
SZ
700 if (sc->mem_res != NULL) {
701 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
702 sc->mem_res);
703 }
704
a619b256 705 igb_free_rings(sc);
1f7e3916
SZ
706
707 if (sc->mta != NULL)
708 kfree(sc->mta, M_DEVBUF);
709 if (sc->stats != NULL)
710 kfree(sc->stats, M_DEVBUF);
711
1f7e3916
SZ
712 return 0;
713}
714
715static int
716igb_shutdown(device_t dev)
717{
718 return igb_suspend(dev);
719}
720
721static int
722igb_suspend(device_t dev)
723{
724 struct igb_softc *sc = device_get_softc(dev);
725 struct ifnet *ifp = &sc->arpcom.ac_if;
726
727 ifnet_serialize_all(ifp);
728
729 igb_stop(sc);
730
731 igb_rel_mgmt(sc);
732 igb_rel_hw_control(sc);
733
734 if (sc->wol) {
735 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN);
736 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol);
737 igb_enable_wol(dev);
738 }
739
740 ifnet_deserialize_all(ifp);
741
742 return bus_generic_suspend(dev);
743}
744
745static int
746igb_resume(device_t dev)
747{
748 struct igb_softc *sc = device_get_softc(dev);
749 struct ifnet *ifp = &sc->arpcom.ac_if;
750
751 ifnet_serialize_all(ifp);
752
753 igb_init(sc);
754 igb_get_mgmt(sc);
755
756 if_devstart(ifp);
757
758 ifnet_deserialize_all(ifp);
759
760 return bus_generic_resume(dev);
761}
762
763static int
764igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
765{
766 struct igb_softc *sc = ifp->if_softc;
767 struct ifreq *ifr = (struct ifreq *)data;
768 int max_frame_size, mask, reinit;
769 int error = 0;
770
771 ASSERT_IFNET_SERIALIZED_ALL(ifp);
772
773 switch (command) {
774 case SIOCSIFMTU:
775 max_frame_size = 9234;
776 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
777 ETHER_CRC_LEN) {
778 error = EINVAL;
779 break;
780 }
781
782 ifp->if_mtu = ifr->ifr_mtu;
783 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN +
784 ETHER_CRC_LEN;
785
786 if (ifp->if_flags & IFF_RUNNING)
787 igb_init(sc);
788 break;
789
790 case SIOCSIFFLAGS:
791 if (ifp->if_flags & IFF_UP) {
792 if (ifp->if_flags & IFF_RUNNING) {
793 if ((ifp->if_flags ^ sc->if_flags) &
794 (IFF_PROMISC | IFF_ALLMULTI)) {
795 igb_disable_promisc(sc);
796 igb_set_promisc(sc);
797 }
798 } else {
799 igb_init(sc);
800 }
801 } else if (ifp->if_flags & IFF_RUNNING) {
802 igb_stop(sc);
803 }
804 sc->if_flags = ifp->if_flags;
805 break;
806
807 case SIOCADDMULTI:
808 case SIOCDELMULTI:
809 if (ifp->if_flags & IFF_RUNNING) {
810 igb_disable_intr(sc);
811 igb_set_multi(sc);
d0f59cad
SZ
812#ifdef IFPOLL_ENABLE
813 if (!(ifp->if_flags & IFF_NPOLLING))
1f7e3916
SZ
814#endif
815 igb_enable_intr(sc);
816 }
817 break;
818
819 case SIOCSIFMEDIA:
820 /*
821 * As the speed/duplex settings are being
822 * changed, we need toreset the PHY.
823 */
824 sc->hw.phy.reset_disable = FALSE;
825
826 /* Check SOL/IDER usage */
827 if (e1000_check_reset_block(&sc->hw)) {
828 if_printf(ifp, "Media change is "
829 "blocked due to SOL/IDER session.\n");
830 break;
831 }
832 /* FALL THROUGH */
833
834 case SIOCGIFMEDIA:
835 error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
836 break;
837
838 case SIOCSIFCAP:
839 reinit = 0;
840 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
77d8cab9
SZ
841 if (mask & IFCAP_RXCSUM) {
842 ifp->if_capenable ^= IFCAP_RXCSUM;
1f7e3916
SZ
843 reinit = 1;
844 }
845 if (mask & IFCAP_VLAN_HWTAGGING) {
846 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
847 reinit = 1;
848 }
77d8cab9
SZ
849 if (mask & IFCAP_TXCSUM) {
850 ifp->if_capenable ^= IFCAP_TXCSUM;
851 if (ifp->if_capenable & IFCAP_TXCSUM)
852 ifp->if_hwassist |= IGB_CSUM_FEATURES;
853 else
854 ifp->if_hwassist &= ~IGB_CSUM_FEATURES;
855 }
23f6ffe4
SZ
856 if (mask & IFCAP_TSO) {
857 ifp->if_capenable ^= IFCAP_TSO;
858 if (ifp->if_capenable & IFCAP_TSO)
859 ifp->if_hwassist |= CSUM_TSO;
860 else
861 ifp->if_hwassist &= ~CSUM_TSO;
862 }
8d6600da
SZ
863 if (mask & IFCAP_RSS)
864 ifp->if_capenable ^= IFCAP_RSS;
1f7e3916
SZ
865 if (reinit && (ifp->if_flags & IFF_RUNNING))
866 igb_init(sc);
867 break;
868
869 default:
870 error = ether_ioctl(ifp, command, data);
871 break;
872 }
873 return error;
874}
875
876static void
877igb_init(void *xsc)
878{
879 struct igb_softc *sc = xsc;
880 struct ifnet *ifp = &sc->arpcom.ac_if;
9c0ecdcc 881 boolean_t polling;
1f7e3916
SZ
882 int i;
883
884 ASSERT_IFNET_SERIALIZED_ALL(ifp);
885
886 igb_stop(sc);
887
888 /* Get the latest mac address, User can use a LAA */
889 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
890
891 /* Put the address into the Receive Address Array */
892 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
893
894 igb_reset(sc);
895 igb_update_link_status(sc);
896
897 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
898
1f7e3916
SZ
899 /* Configure for OS presence */
900 igb_get_mgmt(sc);
901
9c0ecdcc 902 polling = FALSE;
d0f59cad
SZ
903#ifdef IFPOLL_ENABLE
904 if (ifp->if_flags & IFF_NPOLLING)
9c0ecdcc 905 polling = TRUE;
be922da6 906#endif
9c0ecdcc
SZ
907
908 /* Configured used RX/TX rings */
909 igb_set_ring_inuse(sc, polling);
910
911 /* Initialize interrupt */
912 igb_init_intr(sc);
be922da6 913
1f7e3916 914 /* Prepare transmit descriptors and buffers */
27866bf1 915 for (i = 0; i < sc->tx_ring_cnt; ++i)
1f7e3916
SZ
916 igb_init_tx_ring(&sc->tx_rings[i]);
917 igb_init_tx_unit(sc);
918
919 /* Setup Multicast table */
920 igb_set_multi(sc);
921
922#if 0
923 /*
924 * Figure out the desired mbuf pool
925 * for doing jumbo/packetsplit
926 */
927 if (adapter->max_frame_size <= 2048)
928 adapter->rx_mbuf_sz = MCLBYTES;
929 else if (adapter->max_frame_size <= 4096)
930 adapter->rx_mbuf_sz = MJUMPAGESIZE;
931 else
932 adapter->rx_mbuf_sz = MJUM9BYTES;
1f7e3916
SZ
933#endif
934
935 /* Prepare receive descriptors and buffers */
be922da6 936 for (i = 0; i < sc->rx_ring_inuse; ++i) {
1f7e3916
SZ
937 int error;
938
939 error = igb_init_rx_ring(&sc->rx_rings[i]);
940 if (error) {
941 if_printf(ifp, "Could not setup receive structures\n");
942 igb_stop(sc);
943 return;
944 }
945 }
946 igb_init_rx_unit(sc);
947
948 /* Enable VLAN support */
949 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
950 igb_set_vlan(sc);
951
952 /* Don't lose promiscuous settings */
953 igb_set_promisc(sc);
954
1f7e3916
SZ
955 ifp->if_flags |= IFF_RUNNING;
956 ifp->if_flags &= ~IFF_OACTIVE;
957
7b61c9f2
SZ
958 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX)
959 sc->timer_cpuid = 0; /* XXX fixed */
960 else
961 sc->timer_cpuid = rman_get_cpuid(sc->intr_res);
962 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
1f7e3916
SZ
963 e1000_clear_hw_cntrs_base_generic(&sc->hw);
964
9c0ecdcc 965 /* This clears any pending interrupts */
1f7e3916 966 E1000_READ_REG(&sc->hw, E1000_ICR);
9c0ecdcc 967
1f7e3916
SZ
968 /*
969 * Only enable interrupts if we are not polling, make sure
970 * they are off otherwise.
971 */
9c0ecdcc 972 if (polling) {
1f7e3916 973 igb_disable_intr(sc);
9c0ecdcc 974 } else {
1f7e3916
SZ
975 igb_enable_intr(sc);
976 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC);
977 }
978
979 /* Set Energy Efficient Ethernet */
980 e1000_set_eee_i350(&sc->hw);
981
982 /* Don't reset the phy next time init gets called */
983 sc->hw.phy.reset_disable = TRUE;
984}
985
986static void
987igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
988{
989 struct igb_softc *sc = ifp->if_softc;
990 u_char fiber_type = IFM_1000_SX;
991
992 ASSERT_IFNET_SERIALIZED_ALL(ifp);
993
994 igb_update_link_status(sc);
995
996 ifmr->ifm_status = IFM_AVALID;
997 ifmr->ifm_active = IFM_ETHER;
998
999 if (!sc->link_active)
1000 return;
1001
1002 ifmr->ifm_status |= IFM_ACTIVE;
1003
1004 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1005 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1006 ifmr->ifm_active |= fiber_type | IFM_FDX;
1007 } else {
1008 switch (sc->link_speed) {
1009 case 10:
1010 ifmr->ifm_active |= IFM_10_T;
1011 break;
1012
1013 case 100:
1014 ifmr->ifm_active |= IFM_100_TX;
1015 break;
1016
1017 case 1000:
1018 ifmr->ifm_active |= IFM_1000_T;
1019 break;
1020 }
1021 if (sc->link_duplex == FULL_DUPLEX)
1022 ifmr->ifm_active |= IFM_FDX;
1023 else
1024 ifmr->ifm_active |= IFM_HDX;
1025 }
1026}
1027
1028static int
1029igb_media_change(struct ifnet *ifp)
1030{
1031 struct igb_softc *sc = ifp->if_softc;
1032 struct ifmedia *ifm = &sc->media;
1033
1034 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1035
1036 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1037 return EINVAL;
1038
1039 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1040 case IFM_AUTO:
1041 sc->hw.mac.autoneg = DO_AUTO_NEG;
1042 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1043 break;
1044
1045 case IFM_1000_LX:
1046 case IFM_1000_SX:
1047 case IFM_1000_T:
1048 sc->hw.mac.autoneg = DO_AUTO_NEG;
1049 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1050 break;
1051
1052 case IFM_100_TX:
1053 sc->hw.mac.autoneg = FALSE;
1054 sc->hw.phy.autoneg_advertised = 0;
1055 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1056 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
1057 else
1058 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
1059 break;
1060
1061 case IFM_10_T:
1062 sc->hw.mac.autoneg = FALSE;
1063 sc->hw.phy.autoneg_advertised = 0;
1064 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1065 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
1066 else
1067 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
1068 break;
1069
1070 default:
1071 if_printf(ifp, "Unsupported media type\n");
1072 break;
1073 }
1074
1075 igb_init(sc);
1076
1077 return 0;
1078}
1079
1080static void
1081igb_set_promisc(struct igb_softc *sc)
1082{
1083 struct ifnet *ifp = &sc->arpcom.ac_if;
1084 struct e1000_hw *hw = &sc->hw;
1085 uint32_t reg;
1086
1087 if (sc->vf_ifp) {
1088 e1000_promisc_set_vf(hw, e1000_promisc_enabled);
1089 return;
1090 }
1091
1092 reg = E1000_READ_REG(hw, E1000_RCTL);
1093 if (ifp->if_flags & IFF_PROMISC) {
1094 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1095 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1096 } else if (ifp->if_flags & IFF_ALLMULTI) {
1097 reg |= E1000_RCTL_MPE;
1098 reg &= ~E1000_RCTL_UPE;
1099 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1100 }
1101}
1102
1103static void
1104igb_disable_promisc(struct igb_softc *sc)
1105{
1106 struct e1000_hw *hw = &sc->hw;
1107 uint32_t reg;
1108
1109 if (sc->vf_ifp) {
1110 e1000_promisc_set_vf(hw, e1000_promisc_disabled);
1111 return;
1112 }
1113 reg = E1000_READ_REG(hw, E1000_RCTL);
1114 reg &= ~E1000_RCTL_UPE;
1115 reg &= ~E1000_RCTL_MPE;
1116 E1000_WRITE_REG(hw, E1000_RCTL, reg);
1117}
1118
1119static void
1120igb_set_multi(struct igb_softc *sc)
1121{
1122 struct ifnet *ifp = &sc->arpcom.ac_if;
1123 struct ifmultiaddr *ifma;
1124 uint32_t reg_rctl = 0;
1125 uint8_t *mta;
1126 int mcnt = 0;
1127
1128 mta = sc->mta;
1129 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1130
1131 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1132 if (ifma->ifma_addr->sa_family != AF_LINK)
1133 continue;
1134
1135 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1136 break;
1137
1138 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1139 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
1140 mcnt++;
1141 }
1142
1143 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
1144 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL);
1145 reg_rctl |= E1000_RCTL_MPE;
1146 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl);
1147 } else {
1148 e1000_update_mc_addr_list(&sc->hw, mta, mcnt);
1149 }
1150}
1151
1152static void
1153igb_timer(void *xsc)
1154{
1155 struct igb_softc *sc = xsc;
1f7e3916 1156
27dd00d6 1157 lwkt_serialize_enter(&sc->main_serialize);
1f7e3916
SZ
1158
1159 igb_update_link_status(sc);
1160 igb_update_stats_counters(sc);
1161
7b61c9f2 1162 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid);
1f7e3916 1163
27dd00d6 1164 lwkt_serialize_exit(&sc->main_serialize);
1f7e3916
SZ
1165}
1166
1167static void
1168igb_update_link_status(struct igb_softc *sc)
1169{
1170 struct ifnet *ifp = &sc->arpcom.ac_if;
1171 struct e1000_hw *hw = &sc->hw;
1172 uint32_t link_check, thstat, ctrl;
1173
1174 link_check = thstat = ctrl = 0;
1175
1176 /* Get the cached link value or read for real */
1177 switch (hw->phy.media_type) {
1178 case e1000_media_type_copper:
1179 if (hw->mac.get_link_status) {
1180 /* Do the work to read phy */
1181 e1000_check_for_link(hw);
1182 link_check = !hw->mac.get_link_status;
1183 } else {
1184 link_check = TRUE;
1185 }
1186 break;
1187
1188 case e1000_media_type_fiber:
1189 e1000_check_for_link(hw);
1190 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1191 break;
1192
1193 case e1000_media_type_internal_serdes:
1194 e1000_check_for_link(hw);
1195 link_check = hw->mac.serdes_has_link;
1196 break;
1197
1198 /* VF device is type_unknown */
1199 case e1000_media_type_unknown:
1200 e1000_check_for_link(hw);
1201 link_check = !hw->mac.get_link_status;
1202 /* Fall thru */
1203 default:
1204 break;
1205 }
1206
1207 /* Check for thermal downshift or shutdown */
1208 if (hw->mac.type == e1000_i350) {
1209 thstat = E1000_READ_REG(hw, E1000_THSTAT);
1210 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
1211 }
1212
1213 /* Now we check if a transition has happened */
1214 if (link_check && sc->link_active == 0) {
1215 e1000_get_speed_and_duplex(hw,
1216 &sc->link_speed, &sc->link_duplex);
1217 if (bootverbose) {
1218 if_printf(ifp, "Link is up %d Mbps %s\n",
1219 sc->link_speed,
1220 sc->link_duplex == FULL_DUPLEX ?
1221 "Full Duplex" : "Half Duplex");
1222 }
1223 sc->link_active = 1;
1224
1225 ifp->if_baudrate = sc->link_speed * 1000000;
1226 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1227 (thstat & E1000_THSTAT_LINK_THROTTLE))
1228 if_printf(ifp, "Link: thermal downshift\n");
1229 /* This can sleep */
1230 ifp->if_link_state = LINK_STATE_UP;
1231 if_link_state_change(ifp);
1232 } else if (!link_check && sc->link_active == 1) {
1233 ifp->if_baudrate = sc->link_speed = 0;
1234 sc->link_duplex = 0;
1235 if (bootverbose)
1236 if_printf(ifp, "Link is Down\n");
1237 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
1238 (thstat & E1000_THSTAT_PWR_DOWN))
1239 if_printf(ifp, "Link: thermal shutdown\n");
1240 sc->link_active = 0;
1241 /* This can sleep */
1242 ifp->if_link_state = LINK_STATE_DOWN;
1243 if_link_state_change(ifp);
1244 }
1245}
1246
1247static void
1248igb_stop(struct igb_softc *sc)
1249{
1250 struct ifnet *ifp = &sc->arpcom.ac_if;
1251 int i;
1252
1253 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1254
1255 igb_disable_intr(sc);
1256
1257 callout_stop(&sc->timer);
1258
1259 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1260 ifp->if_timer = 0;
1261
1262 e1000_reset_hw(&sc->hw);
1263 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0);
1264
1265 e1000_led_off(&sc->hw);
1266 e1000_cleanup_led(&sc->hw);
1267
27866bf1 1268 for (i = 0; i < sc->tx_ring_cnt; ++i)
1f7e3916 1269 igb_free_tx_ring(&sc->tx_rings[i]);
27866bf1 1270 for (i = 0; i < sc->rx_ring_cnt; ++i)
1f7e3916
SZ
1271 igb_free_rx_ring(&sc->rx_rings[i]);
1272}
1273
1274static void
1275igb_reset(struct igb_softc *sc)
1276{
1277 struct ifnet *ifp = &sc->arpcom.ac_if;
1278 struct e1000_hw *hw = &sc->hw;
1279 struct e1000_fc_info *fc = &hw->fc;
1280 uint32_t pba = 0;
1281 uint16_t hwm;
1282
1283 /* Let the firmware know the OS is in control */
1284 igb_get_hw_control(sc);
1285
1286 /*
1287 * Packet Buffer Allocation (PBA)
1288 * Writing PBA sets the receive portion of the buffer
1289 * the remainder is used for the transmit buffer.
1290 */
1291 switch (hw->mac.type) {
1292 case e1000_82575:
1293 pba = E1000_PBA_32K;
1294 break;
1295
1296 case e1000_82576:
1297 case e1000_vfadapt:
1298 pba = E1000_READ_REG(hw, E1000_RXPBS);
1299 pba &= E1000_RXPBS_SIZE_MASK_82576;
1300 break;
1301
1302 case e1000_82580:
1303 case e1000_i350:
1304 case e1000_vfadapt_i350:
1305 pba = E1000_READ_REG(hw, E1000_RXPBS);
1306 pba = e1000_rxpbs_adjust_82580(pba);
1307 break;
1308 /* XXX pba = E1000_PBA_35K; */
1309
1310 default:
1311 break;
1312 }
1313
1314 /* Special needs in case of Jumbo frames */
1315 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) {
1316 uint32_t tx_space, min_tx, min_rx;
1317
1318 pba = E1000_READ_REG(hw, E1000_PBA);
1319 tx_space = pba >> 16;
1320 pba &= 0xffff;
1321
1322 min_tx = (sc->max_frame_size +
1323 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2;
1324 min_tx = roundup2(min_tx, 1024);
1325 min_tx >>= 10;
1326 min_rx = sc->max_frame_size;
1327 min_rx = roundup2(min_rx, 1024);
1328 min_rx >>= 10;
1329 if (tx_space < min_tx && (min_tx - tx_space) < pba) {
1330 pba = pba - (min_tx - tx_space);
1331 /*
1332 * if short on rx space, rx wins
1333 * and must trump tx adjustment
1334 */
1335 if (pba < min_rx)
1336 pba = min_rx;
1337 }
1338 E1000_WRITE_REG(hw, E1000_PBA, pba);
1339 }
1340
1341 /*
1342 * These parameters control the automatic generation (Tx) and
1343 * response (Rx) to Ethernet PAUSE frames.
1344 * - High water mark should allow for at least two frames to be
1345 * received after sending an XOFF.
1346 * - Low water mark works best when it is very near the high water mark.
1347 * This allows the receiver to restart by sending XON when it has
1348 * drained a bit.
1349 */
1350 hwm = min(((pba << 10) * 9 / 10),
1351 ((pba << 10) - 2 * sc->max_frame_size));
1352
1353 if (hw->mac.type < e1000_82576) {
1354 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1355 fc->low_water = fc->high_water - 8;
1356 } else {
1357 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1358 fc->low_water = fc->high_water - 16;
1359 }
1360 fc->pause_time = IGB_FC_PAUSE_TIME;
1361 fc->send_xon = TRUE;
1362
1363 /* Issue a global reset */
1364 e1000_reset_hw(hw);
1365 E1000_WRITE_REG(hw, E1000_WUC, 0);
1366
1367 if (e1000_init_hw(hw) < 0)
1368 if_printf(ifp, "Hardware Initialization Failed\n");
1369
1370 /* Setup DMA Coalescing */
1371 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) {
1372 uint32_t reg;
1373
1374 hwm = (pba - 4) << 10;
1375 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT)
1376 & E1000_DMACR_DMACTHR_MASK;
1377
1378 /* transition to L0x or L1 if available..*/
1379 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
1380
1381 /* timer = +-1000 usec in 32usec intervals */
1382 reg |= (1000 >> 5);
1383 E1000_WRITE_REG(hw, E1000_DMACR, reg);
1384
1385 /* No lower threshold */
1386 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
1387
1388 /* set hwm to PBA - 2 * max frame size */
1389 E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
1390
1391 /* Set the interval before transition */
1392 reg = E1000_READ_REG(hw, E1000_DMCTLX);
1393 reg |= 0x800000FF; /* 255 usec */
1394 E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
1395
1396 /* free space in tx packet buffer to wake from DMA coal */
1397 E1000_WRITE_REG(hw, E1000_DMCTXTH,
1398 (20480 - (2 * sc->max_frame_size)) >> 6);
1399
1400 /* make low power state decision controlled by DMA coal */
1401 reg = E1000_READ_REG(hw, E1000_PCIEMISC);
1402 E1000_WRITE_REG(hw, E1000_PCIEMISC,
1403 reg | E1000_PCIEMISC_LX_DECISION);
1404 if_printf(ifp, "DMA Coalescing enabled\n");
1405 }
1406
1407 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN);
1408 e1000_get_phy_info(hw);
1409 e1000_check_for_link(hw);
1410}
1411
1412static void
1413igb_setup_ifp(struct igb_softc *sc)
1414{
1415 struct ifnet *ifp = &sc->arpcom.ac_if;
1416
1f7e3916
SZ
1417 ifp->if_softc = sc;
1418 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9c0ecdcc 1419 ifp->if_init = igb_init;
1f7e3916
SZ
1420 ifp->if_ioctl = igb_ioctl;
1421 ifp->if_start = igb_start;
7d235eb5
SZ
1422 ifp->if_serialize = igb_serialize;
1423 ifp->if_deserialize = igb_deserialize;
1424 ifp->if_tryserialize = igb_tryserialize;
1425#ifdef INVARIANTS
1426 ifp->if_serialize_assert = igb_serialize_assert;
1427#endif
d0f59cad
SZ
1428#ifdef IFPOLL_ENABLE
1429 ifp->if_npoll = igb_npoll;
1f7e3916
SZ
1430#endif
1431 ifp->if_watchdog = igb_watchdog;
1432
91b8700a 1433 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1);
1f7e3916
SZ
1434 ifq_set_ready(&ifp->if_snd);
1435
1436 ether_ifattach(ifp, sc->hw.mac.addr, NULL);
1437
1438 ifp->if_capabilities =
23f6ffe4 1439 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO;
8d6600da
SZ
1440 if (IGB_ENABLE_HWRSS(sc))
1441 ifp->if_capabilities |= IFCAP_RSS;
1f7e3916 1442 ifp->if_capenable = ifp->if_capabilities;
23f6ffe4 1443 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO;
1f7e3916
SZ
1444
1445 /*
1446 * Tell the upper layer(s) we support long frames
1447 */
1448 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1449
1450 /*
1451 * Specify the media types supported by this adapter and register
1452 * callbacks to update media and link information
1453 */
1454 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status);
1455 if (sc->hw.phy.media_type == e1000_media_type_fiber ||
1456 sc->hw.phy.media_type == e1000_media_type_internal_serdes) {
1457 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX,
1458 0, NULL);
1459 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1460 } else {
1461 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1462 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX,
1463 0, NULL);
1464 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1465 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
1466 0, NULL);
1467 if (sc->hw.phy.type != e1000_phy_ife) {
1468 ifmedia_add(&sc->media,
1469 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1470 ifmedia_add(&sc->media,
1471 IFM_ETHER | IFM_1000_T, 0, NULL);
1472 }
1473 }
1474 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1475 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1476}
1477
1478static void
1479igb_add_sysctl(struct igb_softc *sc)
1480{
9c0ecdcc 1481 char node[32];
8d6600da 1482 int i;
8d6600da 1483
1f7e3916
SZ
1484 sysctl_ctx_init(&sc->sysctl_ctx);
1485 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1486 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
1487 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, "");
1488 if (sc->sysctl_tree == NULL) {
1489 device_printf(sc->dev, "can't add sysctl node\n");
1490 return;
1491 }
1492
1493 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
8d6600da 1494 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
be922da6
SZ
1495 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1496 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0,
1497 "# of RX rings used");
8d6600da
SZ
1498 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1499 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0,
1500 "# of RX descs");
1f7e3916 1501 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
8d6600da
SZ
1502 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0,
1503 "# of TX descs");
1f7e3916 1504
9c0ecdcc
SZ
1505 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
1506 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1507 SYSCTL_CHILDREN(sc->sysctl_tree),
1508 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1509 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate");
1510 } else {
1511 for (i = 0; i < sc->msix_cnt; ++i) {
1512 struct igb_msix_data *msix = &sc->msix_data[i];
1513
1514 ksnprintf(node, sizeof(node), "msix%d_rate", i);
1515 SYSCTL_ADD_PROC(&sc->sysctl_ctx,
1516 SYSCTL_CHILDREN(sc->sysctl_tree),
1517 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW,
1518 msix, 0, igb_sysctl_msix_rate, "I",
1519 msix->msix_rate_desc);
1520 }
1521 }
b6220144
SZ
1522
1523 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1524 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW,
1525 sc, 0, igb_sysctl_tx_intr_nsegs, "I",
8d6600da
SZ
1526 "# of segments per TX interrupt");
1527
89358783
SZ
1528 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1529 OID_AUTO, "tx_wreg_nsegs", CTLFLAG_RW,
1530 &sc->tx_rings[0].wreg_nsegs, 0,
871c0e2b
SZ
1531 "# of segments before write to hardare register");
1532
d0f59cad
SZ
1533#ifdef IFPOLL_ENABLE
1534 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1535 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW,
1536 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset");
1537 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1538 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW,
1539 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset");
1540#endif
1541
8d6600da
SZ
1542#ifdef IGB_RSS_DEBUG
1543 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
1544 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0,
1545 "RSS debug level");
b56e8196 1546#endif
8d6600da 1547 for (i = 0; i < sc->rx_ring_cnt; ++i) {
b56e8196 1548#ifdef IGB_RSS_DEBUG
9c0ecdcc 1549 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
8d6600da 1550 SYSCTL_ADD_ULONG(&sc->sysctl_ctx,
9c0ecdcc 1551 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
8d6600da 1552 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets");
8d6600da 1553#endif
b56e8196
SZ
1554 ksnprintf(node, sizeof(node), "rx%d_wreg", i);
1555 SYSCTL_ADD_INT(&sc->sysctl_ctx,
1556 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node,
1557 CTLFLAG_RW, &sc->rx_rings[i].rx_wreg, 0,
1558 "# of segments before write to hardare register");
1559 }
1f7e3916
SZ
1560}
1561
1562static int
a619b256 1563igb_alloc_rings(struct igb_softc *sc)
1f7e3916
SZ
1564{
1565 int error, i;
1566
1f7e3916
SZ
1567 /*
1568 * Create top level busdma tag
1569 */
1570 error = bus_dma_tag_create(NULL, 1, 0,
1571 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1572 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0,
1573 &sc->parent_tag);
1574 if (error) {
1575 device_printf(sc->dev, "could not create top level DMA tag\n");
1576 return error;
1577 }
1578
1579 /*
1580 * Allocate TX descriptor rings and buffers
1581 */
7b269c72
SZ
1582 sc->tx_rings = kmalloc_cachealign(
1583 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt,
1f7e3916 1584 M_DEVBUF, M_WAITOK | M_ZERO);
27866bf1 1585 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1f7e3916
SZ
1586 struct igb_tx_ring *txr = &sc->tx_rings[i];
1587
1588 /* Set up some basics */
1589 txr->sc = sc;
1590 txr->me = i;
7d235eb5 1591 lwkt_serialize_init(&txr->tx_serialize);
1f7e3916
SZ
1592
1593 error = igb_create_tx_ring(txr);
1594 if (error)
1595 return error;
1596 }
1597
1598 /*
1599 * Allocate RX descriptor rings and buffers
1600 */
7b269c72
SZ
1601 sc->rx_rings = kmalloc_cachealign(
1602 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt,
1f7e3916 1603 M_DEVBUF, M_WAITOK | M_ZERO);
27866bf1 1604 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1f7e3916
SZ
1605 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1606
1607 /* Set up some basics */
1608 rxr->sc = sc;
1609 rxr->me = i;
7d235eb5 1610 lwkt_serialize_init(&rxr->rx_serialize);
1f7e3916
SZ
1611
1612 error = igb_create_rx_ring(rxr);
1613 if (error)
1614 return error;
1615 }
1616
1f7e3916
SZ
1617 return 0;
1618}
1619
1620static void
a619b256 1621igb_free_rings(struct igb_softc *sc)
1f7e3916
SZ
1622{
1623 int i;
1624
1f7e3916 1625 if (sc->tx_rings != NULL) {
91b8700a
SZ
1626 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1627 struct igb_tx_ring *txr = &sc->tx_rings[i];
1628
1629 igb_destroy_tx_ring(txr, txr->num_tx_desc);
1630 }
1f7e3916
SZ
1631 kfree(sc->tx_rings, M_DEVBUF);
1632 }
1633
1634 if (sc->rx_rings != NULL) {
91b8700a
SZ
1635 for (i = 0; i < sc->rx_ring_cnt; ++i) {
1636 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1637
1638 igb_destroy_rx_ring(rxr, rxr->num_rx_desc);
1639 }
1f7e3916
SZ
1640 kfree(sc->rx_rings, M_DEVBUF);
1641 }
1642}
1643
1644static int
1645igb_create_tx_ring(struct igb_tx_ring *txr)
1646{
c1a8a339 1647 int tsize, error, i, ntxd;
1f7e3916
SZ
1648
1649 /*
1650 * Validate number of transmit descriptors. It must not exceed
1651 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
1652 */
c1a8a339
SZ
1653 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd);
1654 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 ||
1655 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) {
1f7e3916
SZ
1656 device_printf(txr->sc->dev,
1657 "Using %d TX descriptors instead of %d!\n",
c1a8a339 1658 IGB_DEFAULT_TXD, ntxd);
91b8700a 1659 txr->num_tx_desc = IGB_DEFAULT_TXD;
1f7e3916 1660 } else {
c1a8a339 1661 txr->num_tx_desc = ntxd;
1f7e3916
SZ
1662 }
1663
1664 /*
1665 * Allocate TX descriptor ring
1666 */
91b8700a 1667 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc),
1f7e3916
SZ
1668 IGB_DBA_ALIGN);
1669 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1670 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK,
1671 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr);
1672 if (txr->txdma.dma_vaddr == NULL) {
1673 device_printf(txr->sc->dev,
1674 "Unable to allocate TX Descriptor memory\n");
1675 return ENOMEM;
1676 }
1677 txr->tx_base = txr->txdma.dma_vaddr;
1678 bzero(txr->tx_base, tsize);
1679
e2a02a4c
SZ
1680 tsize = __VM_CACHELINE_ALIGN(
1681 sizeof(struct igb_tx_buf) * txr->num_tx_desc);
1682 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO);
1f7e3916 1683
b6220144
SZ
1684 /*
1685 * Allocate TX head write-back buffer
1686 */
1687 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag,
1688 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK,
1689 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr);
1690 if (txr->tx_hdr == NULL) {
1691 device_printf(txr->sc->dev,
1692 "Unable to allocate TX head write-back buffer\n");
1693 return ENOMEM;
1694 }
1695
1f7e3916
SZ
1696 /*
1697 * Create DMA tag for TX buffers
1698 */
1699 error = bus_dma_tag_create(txr->sc->parent_tag,
1700 1, 0, /* alignment, bounds */
1701 BUS_SPACE_MAXADDR, /* lowaddr */
1702 BUS_SPACE_MAXADDR, /* highaddr */
1703 NULL, NULL, /* filter, filterarg */
1704 IGB_TSO_SIZE, /* maxsize */
1705 IGB_MAX_SCATTER, /* nsegments */
1706 PAGE_SIZE, /* maxsegsize */
1707 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
1708 BUS_DMA_ONEBPAGE, /* flags */
1709 &txr->tx_tag);
1710 if (error) {
1711 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n");
1712 kfree(txr->tx_buf, M_DEVBUF);
1713 txr->tx_buf = NULL;
1714 return error;
1715 }
1716
1717 /*
1718 * Create DMA maps for TX buffers
1719 */
91b8700a 1720 for (i = 0; i < txr->num_tx_desc; ++i) {
1f7e3916
SZ
1721 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1722
1723 error = bus_dmamap_create(txr->tx_tag,
1724 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map);
1725 if (error) {
1726 device_printf(txr->sc->dev,
1727 "Unable to create TX DMA map\n");
1728 igb_destroy_tx_ring(txr, i);
1729 return error;
1730 }
1731 }
b6220144
SZ
1732
1733 /*
1734 * Initialize various watermark
1735 */
1736 txr->spare_desc = IGB_TX_SPARE;
91b8700a 1737 txr->intr_nsegs = txr->num_tx_desc / 16;
871c0e2b 1738 txr->wreg_nsegs = 8;
91b8700a
SZ
1739 txr->oact_hi_desc = txr->num_tx_desc / 2;
1740 txr->oact_lo_desc = txr->num_tx_desc / 8;
b6220144
SZ
1741 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX)
1742 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX;
1743 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED)
1744 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED;
1745
1f7e3916
SZ
1746 return 0;
1747}
1748
1749static void
1750igb_free_tx_ring(struct igb_tx_ring *txr)
1751{
1752 int i;
1753
91b8700a 1754 for (i = 0; i < txr->num_tx_desc; ++i) {
1f7e3916
SZ
1755 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1756
1757 if (txbuf->m_head != NULL) {
1758 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1759 m_freem(txbuf->m_head);
1760 txbuf->m_head = NULL;
1761 }
1762 }
1763}
1764
1765static void
1766igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc)
1767{
1768 int i;
1769
1770 if (txr->txdma.dma_vaddr != NULL) {
1771 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map);
1772 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr,
1773 txr->txdma.dma_map);
1774 bus_dma_tag_destroy(txr->txdma.dma_tag);
1775 txr->txdma.dma_vaddr = NULL;
1776 }
1777
b6220144
SZ
1778 if (txr->tx_hdr != NULL) {
1779 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap);
1780 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr,
1781 txr->tx_hdr_dmap);
1782 bus_dma_tag_destroy(txr->tx_hdr_dtag);
1783 txr->tx_hdr = NULL;
1784 }
1785
1f7e3916
SZ
1786 if (txr->tx_buf == NULL)
1787 return;
1788
1789 for (i = 0; i < ndesc; ++i) {
1790 struct igb_tx_buf *txbuf = &txr->tx_buf[i];
1791
1792 KKASSERT(txbuf->m_head == NULL);
1793 bus_dmamap_destroy(txr->tx_tag, txbuf->map);
1794 }
1795 bus_dma_tag_destroy(txr->tx_tag);
1796
1797 kfree(txr->tx_buf, M_DEVBUF);
1798 txr->tx_buf = NULL;
1799}
1800
1801static void
1802igb_init_tx_ring(struct igb_tx_ring *txr)
1803{
1f7e3916
SZ
1804 /* Clear the old descriptor contents */
1805 bzero(txr->tx_base,
91b8700a 1806 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc);
1f7e3916 1807
b6220144
SZ
1808 /* Clear TX head write-back buffer */
1809 *(txr->tx_hdr) = 0;
1810
1f7e3916
SZ
1811 /* Reset indices */
1812 txr->next_avail_desc = 0;
1813 txr->next_to_clean = 0;
b6220144 1814 txr->tx_nsegs = 0;
1f7e3916
SZ
1815
1816 /* Set number of descriptors available */
91b8700a 1817 txr->tx_avail = txr->num_tx_desc;
1f7e3916
SZ
1818}
1819
1820static void
1821igb_init_tx_unit(struct igb_softc *sc)
1822{
1823 struct e1000_hw *hw = &sc->hw;
1824 uint32_t tctl;
1825 int i;
1826
1827 /* Setup the Tx Descriptor Rings */
27866bf1 1828 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1f7e3916
SZ
1829 struct igb_tx_ring *txr = &sc->tx_rings[i];
1830 uint64_t bus_addr = txr->txdma.dma_paddr;
c3162c4e 1831 uint64_t hdr_paddr = txr->tx_hdr_paddr;
1f7e3916 1832 uint32_t txdctl = 0;
b6220144 1833 uint32_t dca_txctrl;
1f7e3916
SZ
1834
1835 E1000_WRITE_REG(hw, E1000_TDLEN(i),
91b8700a 1836 txr->num_tx_desc * sizeof(struct e1000_tx_desc));
1f7e3916
SZ
1837 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1838 (uint32_t)(bus_addr >> 32));
1839 E1000_WRITE_REG(hw, E1000_TDBAL(i),
1840 (uint32_t)bus_addr);
1841
1842 /* Setup the HW Tx Head and Tail descriptor pointers */
1843 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1844 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1845
b6220144
SZ
1846 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i));
1847 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1848 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl);
1849
54691ff1
SZ
1850 /*
1851 * Don't set WB_on_EITR:
1852 * - 82575 does not have it
1853 * - It almost has no effect on 82576, see:
1854 * 82576 specification update errata #26
1855 * - It causes unnecessary bus traffic
1856 */
b6220144 1857 E1000_WRITE_REG(hw, E1000_TDWBAH(i),
c3162c4e 1858 (uint32_t)(hdr_paddr >> 32));
b6220144 1859 E1000_WRITE_REG(hw, E1000_TDWBAL(i),
c3162c4e 1860 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE);
c7c6ca44
SZ
1861
1862 /*
1863 * WTHRESH is ignored by the hardware, since header
1864 * write back mode is used.
1865 */
1866 txdctl |= IGB_TX_PTHRESH;
1867 txdctl |= IGB_TX_HTHRESH << 8;
1868 txdctl |= IGB_TX_WTHRESH << 16;
1869 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1870 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1f7e3916
SZ
1871 }
1872
1873 if (sc->vf_ifp)
1874 return;
1875
1876 e1000_config_collision_dist(hw);
1877
1878 /* Program the Transmit Control Register */
1879 tctl = E1000_READ_REG(hw, E1000_TCTL);
1880 tctl &= ~E1000_TCTL_CT;
1881 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1882 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1883
1884 /* This write will effectively turn on the transmit unit. */
1885 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1886}
1887
1888static boolean_t
48faa653 1889igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp)
1f7e3916
SZ
1890{
1891 struct e1000_adv_tx_context_desc *TXD;
1f7e3916 1892 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
1f7e3916 1893 int ehdrlen, ctxd, ip_hlen = 0;
1f7e3916
SZ
1894 boolean_t offload = TRUE;
1895
1896 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0)
1897 offload = FALSE;
1898
1899 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
48faa653 1900
1f7e3916 1901 ctxd = txr->next_avail_desc;
1f7e3916
SZ
1902 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd];
1903
1904 /*
1905 * In advanced descriptors the vlan tag must
1906 * be placed into the context descriptor, thus
1907 * we need to be here just for that setup.
1908 */
1909 if (mp->m_flags & M_VLANTAG) {
23f6ffe4
SZ
1910 uint16_t vlantag;
1911
1f7e3916
SZ
1912 vlantag = htole16(mp->m_pkthdr.ether_vlantag);
1913 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT);
1914 } else if (!offload) {
1915 return FALSE;
1916 }
1917
48faa653
SZ
1918 ehdrlen = mp->m_pkthdr.csum_lhlen;
1919 KASSERT(ehdrlen > 0, ("invalid ether hlen"));
1f7e3916
SZ
1920
1921 /* Set the ether header length */
1922 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
48faa653
SZ
1923 if (mp->m_pkthdr.csum_flags & CSUM_IP) {
1924 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
1925 ip_hlen = mp->m_pkthdr.csum_iphlen;
1926 KASSERT(ip_hlen > 0, ("invalid ip hlen"));
1f7e3916 1927 }
1f7e3916 1928 vlan_macip_lens |= ip_hlen;
1f7e3916 1929
23f6ffe4 1930 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
1f7e3916
SZ
1931 if (mp->m_pkthdr.csum_flags & CSUM_TCP)
1932 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
1933 else if (mp->m_pkthdr.csum_flags & CSUM_UDP)
1934 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
1935
1936 /* 82575 needs the queue index added */
1937 if (txr->sc->hw.mac.type == e1000_82575)
1938 mss_l4len_idx = txr->me << 4;
1939
1940 /* Now copy bits into descriptor */
1941 TXD->vlan_macip_lens = htole32(vlan_macip_lens);
1942 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
1943 TXD->seqnum_seed = htole32(0);
1944 TXD->mss_l4len_idx = htole32(mss_l4len_idx);
1945
1f7e3916 1946 /* We've consumed the first desc, adjust counters */
91b8700a 1947 if (++ctxd == txr->num_tx_desc)
1f7e3916
SZ
1948 ctxd = 0;
1949 txr->next_avail_desc = ctxd;
1950 --txr->tx_avail;
1951
1952 return offload;
1953}
1954
1955static void
1956igb_txeof(struct igb_tx_ring *txr)
1957{
1958 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
b6220144 1959 int first, hdr, avail;
1f7e3916 1960
91b8700a 1961 if (txr->tx_avail == txr->num_tx_desc)
1f7e3916
SZ
1962 return;
1963
1964 first = txr->next_to_clean;
b6220144 1965 hdr = *(txr->tx_hdr);
1f7e3916 1966
b6220144
SZ
1967 if (first == hdr)
1968 return;
1f7e3916 1969
b6220144
SZ
1970 avail = txr->tx_avail;
1971 while (first != hdr) {
1972 struct igb_tx_buf *txbuf = &txr->tx_buf[first];
1f7e3916 1973
b6220144
SZ
1974 ++avail;
1975 if (txbuf->m_head) {
1976 bus_dmamap_unload(txr->tx_tag, txbuf->map);
1977 m_freem(txbuf->m_head);
1978 txbuf->m_head = NULL;
1979 ++ifp->if_opackets;
1f7e3916 1980 }
91b8700a 1981 if (++first == txr->num_tx_desc)
b6220144 1982 first = 0;
1f7e3916
SZ
1983 }
1984 txr->next_to_clean = first;
b6220144 1985 txr->tx_avail = avail;
1f7e3916
SZ
1986
1987 /*
1988 * If we have a minimum free, clear IFF_OACTIVE
1989 * to tell the stack that it is OK to send packets.
1990 */
b6220144 1991 if (IGB_IS_NOT_OACTIVE(txr)) {
1f7e3916
SZ
1992 ifp->if_flags &= ~IFF_OACTIVE;
1993
1f7e3916
SZ
1994 /*
1995 * We have enough TX descriptors, turn off
b6220144
SZ
1996 * the watchdog. We allow small amount of
1997 * packets (roughly intr_nsegs) pending on
1998 * the transmit ring.
1f7e3916
SZ
1999 */
2000 ifp->if_timer = 0;
1f7e3916
SZ
2001 }
2002}
2003
2004static int
2005igb_create_rx_ring(struct igb_rx_ring *rxr)
2006{
c1a8a339 2007 int rsize, i, error, nrxd;
1f7e3916
SZ
2008
2009 /*
2010 * Validate number of receive descriptors. It must not exceed
2011 * hardware maximum, and must be multiple of IGB_DBA_ALIGN.
2012 */
c1a8a339
SZ
2013 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd);
2014 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 ||
2015 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) {
1f7e3916
SZ
2016 device_printf(rxr->sc->dev,
2017 "Using %d RX descriptors instead of %d!\n",
c1a8a339 2018 IGB_DEFAULT_RXD, nrxd);
91b8700a 2019 rxr->num_rx_desc = IGB_DEFAULT_RXD;
1f7e3916 2020 } else {
c1a8a339 2021 rxr->num_rx_desc = nrxd;
1f7e3916
SZ
2022 }
2023
2024 /*
2025 * Allocate RX descriptor ring
2026 */
91b8700a 2027 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc),
1f7e3916
SZ
2028 IGB_DBA_ALIGN);
2029 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag,
2030 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK,
2031 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map,
2032 &rxr->rxdma.dma_paddr);
2033 if (rxr->rxdma.dma_vaddr == NULL) {
2034 device_printf(rxr->sc->dev,
2035 "Unable to allocate RxDescriptor memory\n");
2036 return ENOMEM;
2037 }
2038 rxr->rx_base = rxr->rxdma.dma_vaddr;
2039 bzero(rxr->rx_base, rsize);
2040
e2a02a4c
SZ
2041 rsize = __VM_CACHELINE_ALIGN(
2042 sizeof(struct igb_rx_buf) * rxr->num_rx_desc);
2043 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO);
1f7e3916
SZ
2044
2045 /*
2046 * Create DMA tag for RX buffers
2047 */
2048 error = bus_dma_tag_create(rxr->sc->parent_tag,
2049 1, 0, /* alignment, bounds */
2050 BUS_SPACE_MAXADDR, /* lowaddr */
2051 BUS_SPACE_MAXADDR, /* highaddr */
2052 NULL, NULL, /* filter, filterarg */
2053 MCLBYTES, /* maxsize */
2054 1, /* nsegments */
2055 MCLBYTES, /* maxsegsize */
2056 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2057 &rxr->rx_tag);
2058 if (error) {
2059 device_printf(rxr->sc->dev,
2060 "Unable to create RX payload DMA tag\n");
2061 kfree(rxr->rx_buf, M_DEVBUF);
2062 rxr->rx_buf = NULL;
2063 return error;
2064 }
2065
2066 /*
2067 * Create spare DMA map for RX buffers
2068 */
2069 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK,
2070 &rxr->rx_sparemap);
2071 if (error) {
2072 device_printf(rxr->sc->dev,
2073 "Unable to create spare RX DMA maps\n");
2074 bus_dma_tag_destroy(rxr->rx_tag);
2075 kfree(rxr->rx_buf, M_DEVBUF);
2076 rxr->rx_buf = NULL;
2077 return error;
2078 }
2079
2080 /*
2081 * Create DMA maps for RX buffers
2082 */
91b8700a 2083 for (i = 0; i < rxr->num_rx_desc; i++) {
1f7e3916
SZ
2084 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2085
2086 error = bus_dmamap_create(rxr->rx_tag,
2087 BUS_DMA_WAITOK, &rxbuf->map);
2088 if (error) {
2089 device_printf(rxr->sc->dev,
2090 "Unable to create RX DMA maps\n");
2091 igb_destroy_rx_ring(rxr, i);
2092 return error;
2093 }
2094 }
b56e8196
SZ
2095
2096 /*
2097 * Initialize various watermark
2098 */
2099 rxr->rx_wreg = 32;
2100
1f7e3916
SZ
2101 return 0;
2102}
2103
2104static void
2105igb_free_rx_ring(struct igb_rx_ring *rxr)
2106{
2107 int i;
2108
91b8700a 2109 for (i = 0; i < rxr->num_rx_desc; ++i) {
1f7e3916
SZ
2110 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2111
2112 if (rxbuf->m_head != NULL) {
2113 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2114 m_freem(rxbuf->m_head);
2115 rxbuf->m_head = NULL;
2116 }
2117 }
2118
2119 if (rxr->fmp != NULL)
2120 m_freem(rxr->fmp);
2121 rxr->fmp = NULL;
2122 rxr->lmp = NULL;
2123}
2124
2125static void
2126igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc)
2127{
2128 int i;
2129
2130 if (rxr->rxdma.dma_vaddr != NULL) {
2131 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map);
2132 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr,
2133 rxr->rxdma.dma_map);
2134 bus_dma_tag_destroy(rxr->rxdma.dma_tag);
2135 rxr->rxdma.dma_vaddr = NULL;
2136 }
2137
2138 if (rxr->rx_buf == NULL)
2139 return;
2140
2141 for (i = 0; i < ndesc; ++i) {
2142 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2143
2144 KKASSERT(rxbuf->m_head == NULL);
2145 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map);
2146 }
2147 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap);
2148 bus_dma_tag_destroy(rxr->rx_tag);
2149
2150 kfree(rxr->rx_buf, M_DEVBUF);
2151 rxr->rx_buf = NULL;
2152}
2153
2154static void
2155igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf)
2156{
2157 rxd->read.pkt_addr = htole64(rxbuf->paddr);
2158 rxd->wb.upper.status_error = 0;
2159}
2160
2161static int
2162igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait)
2163{
2164 struct mbuf *m;
2165 bus_dma_segment_t seg;
2166 bus_dmamap_t map;
2167 struct igb_rx_buf *rxbuf;
2168 int error, nseg;
2169
2170 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2171 if (m == NULL) {
2172 if (wait) {
2173 if_printf(&rxr->sc->arpcom.ac_if,
2174 "Unable to allocate RX mbuf\n");
2175 }
2176 return ENOBUFS;
2177 }
2178 m->m_len = m->m_pkthdr.len = MCLBYTES;
2179
2180 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2181 m_adj(m, ETHER_ALIGN);
2182
2183 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag,
2184 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
2185 if (error) {
2186 m_freem(m);
2187 if (wait) {
2188 if_printf(&rxr->sc->arpcom.ac_if,
2189 "Unable to load RX mbuf\n");
2190 }
2191 return error;
2192 }
2193
2194 rxbuf = &rxr->rx_buf[i];
2195 if (rxbuf->m_head != NULL)
2196 bus_dmamap_unload(rxr->rx_tag, rxbuf->map);
2197
2198 map = rxbuf->map;
2199 rxbuf->map = rxr->rx_sparemap;
2200 rxr->rx_sparemap = map;
2201
2202 rxbuf->m_head = m;
2203 rxbuf->paddr = seg.ds_addr;
2204
2205 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf);
2206 return 0;
2207}
2208
2209static int
2210igb_init_rx_ring(struct igb_rx_ring *rxr)
2211{
2212 int i;
2213
2214 /* Clear the ring contents */
2215 bzero(rxr->rx_base,
91b8700a 2216 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc));
1f7e3916
SZ
2217
2218 /* Now replenish the ring mbufs */
91b8700a 2219 for (i = 0; i < rxr->num_rx_desc; ++i) {
1f7e3916
SZ
2220 int error;
2221
2222 error = igb_newbuf(rxr, i, TRUE);
2223 if (error)
2224 return error;
2225 }
2226
2227 /* Setup our descriptor indices */
2228 rxr->next_to_check = 0;
2229
2230 rxr->fmp = NULL;
2231 rxr->lmp = NULL;
2232 rxr->discard = FALSE;
2233
2234 return 0;
2235}
2236
2237static void
2238igb_init_rx_unit(struct igb_softc *sc)
2239{
2240 struct ifnet *ifp = &sc->arpcom.ac_if;
2241 struct e1000_hw *hw = &sc->hw;
2242 uint32_t rctl, rxcsum, srrctl = 0;
2243 int i;
2244
2245 /*
2246 * Make sure receives are disabled while setting
2247 * up the descriptor ring
2248 */
2249 rctl = E1000_READ_REG(hw, E1000_RCTL);
2250 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2251
2252#if 0
2253 /*
2254 ** Set up for header split
2255 */
2256 if (igb_header_split) {
2257 /* Use a standard mbuf for the header */
2258 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2259 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2260 } else
2261#endif
2262 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2263
2264 /*
2265 ** Set up for jumbo frames
2266 */
2267 if (ifp->if_mtu > ETHERMTU) {
2268 rctl |= E1000_RCTL_LPE;
2269#if 0
2270 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
2271 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2272 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
2273 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
2274 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2275 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
2276 }
2277 /* Set maximum packet len */
2278 psize = adapter->max_frame_size;
2279 /* are we on a vlan? */
2280 if (adapter->ifp->if_vlantrunk != NULL)
2281 psize += VLAN_TAG_SIZE;
2282 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
2283#else
2284 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2285 rctl |= E1000_RCTL_SZ_2048;
2286#endif
2287 } else {
2288 rctl &= ~E1000_RCTL_LPE;
2289 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2290 rctl |= E1000_RCTL_SZ_2048;
2291 }
2292
2293 /* Setup the Base and Length of the Rx Descriptor Rings */
be922da6 2294 for (i = 0; i < sc->rx_ring_inuse; ++i) {
1f7e3916
SZ
2295 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2296 uint64_t bus_addr = rxr->rxdma.dma_paddr;
2297 uint32_t rxdctl;
2298
2299 E1000_WRITE_REG(hw, E1000_RDLEN(i),
91b8700a 2300 rxr->num_rx_desc * sizeof(struct e1000_rx_desc));
1f7e3916
SZ
2301 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2302 (uint32_t)(bus_addr >> 32));
2303 E1000_WRITE_REG(hw, E1000_RDBAL(i),
2304 (uint32_t)bus_addr);
2305 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2306 /* Enable this Queue */
2307 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2308 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2309 rxdctl &= 0xFFF00000;
2310 rxdctl |= IGB_RX_PTHRESH;
2311 rxdctl |= IGB_RX_HTHRESH << 8;
54691ff1
SZ
2312 /*
2313 * Don't set WTHRESH to a value above 1 on 82576, see:
2314 * 82576 specification update errata #26
2315 */
1f7e3916
SZ
2316 rxdctl |= IGB_RX_WTHRESH << 16;
2317 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2318 }
2319
8d6600da
SZ
2320 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM);
2321 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE);
2322
1f7e3916 2323 /*
8d6600da
SZ
2324 * Receive Checksum Offload for TCP and UDP
2325 *
2326 * Checksum offloading is also enabled if multiple receive
2327 * queue is to be supported, since we need it to figure out
2328 * fragments.
1f7e3916 2329 */
8d6600da
SZ
2330 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) {
2331 /*
2332 * NOTE:
2333 * PCSD must be enabled to enable multiple
2334 * receive queues.
2335 */
2336 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2337 E1000_RXCSUM_PCSD;
2338 } else {
2339 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
2340 E1000_RXCSUM_PCSD);
2341 }
2342 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum);
2343
2344 if (IGB_ENABLE_HWRSS(sc)) {
2345 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE];
d1218435
SZ
2346 uint32_t reta_shift;
2347 int j, r;
8d6600da
SZ
2348
2349 /*
2350 * NOTE:
2351 * When we reach here, RSS has already been disabled
2352 * in igb_stop(), so we could safely configure RSS key
2353 * and redirect table.
2354 */
2355
2356 /*
2357 * Configure RSS key
2358 */
2359 toeplitz_get_key(key, sizeof(key));
2360 for (i = 0; i < IGB_NRSSRK; ++i) {
2361 uint32_t rssrk;
2362
2363 rssrk = IGB_RSSRK_VAL(key, i);
2364 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk);
2365
2366 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk);
1f7e3916 2367 }
1f7e3916
SZ
2368
2369 /*
8d6600da
SZ
2370 * Configure RSS redirect table in following fashion:
2371 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
2372 */
2373 reta_shift = IGB_RETA_SHIFT;
2374 if (hw->mac.type == e1000_82575)
2375 reta_shift = IGB_RETA_SHIFT_82575;
8d6600da 2376
d1218435
SZ
2377 r = 0;
2378 for (j = 0; j < IGB_NRETA; ++j) {
2379 uint32_t reta = 0;
8d6600da 2380
d1218435
SZ
2381 for (i = 0; i < IGB_RETA_SIZE; ++i) {
2382 uint32_t q;
2383
be922da6 2384 q = (r % sc->rx_ring_inuse) << reta_shift;
d1218435
SZ
2385 reta |= q << (8 * i);
2386 ++r;
2387 }
2388 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta);
2389 E1000_WRITE_REG(hw, E1000_RETA(j), reta);
2390 }
8d6600da
SZ
2391
2392 /*
2393 * Enable multiple receive queues.
2394 * Enable IPv4 RSS standard hash functions.
2395 * Disable RSS interrupt on 82575
2396 */
2397 E1000_WRITE_REG(&sc->hw, E1000_MRQC,
2398 E1000_MRQC_ENABLE_RSS_4Q |
2399 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2400 E1000_MRQC_RSS_FIELD_IPV4);
1f7e3916 2401 }
1f7e3916
SZ
2402
2403 /* Setup the Receive Control Register */
2404 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2405 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2406 E1000_RCTL_RDMTS_HALF |
2407 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2408 /* Strip CRC bytes. */
2409 rctl |= E1000_RCTL_SECRC;
2410 /* Make sure VLAN Filters are off */
2411 rctl &= ~E1000_RCTL_VFE;
2412 /* Don't store bad packets */
2413 rctl &= ~E1000_RCTL_SBP;
2414
2415 /* Enable Receives */
2416 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2417
2418 /*
2419 * Setup the HW Rx Head and Tail Descriptor Pointers
2420 * - needs to be after enable
2421 */
be922da6 2422 for (i = 0; i < sc->rx_ring_inuse; ++i) {
1f7e3916
SZ
2423 struct igb_rx_ring *rxr = &sc->rx_rings[i];
2424
2425 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
91b8700a 2426 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1);
1f7e3916
SZ
2427 }
2428}
2429
b56e8196
SZ
2430static void
2431igb_rx_refresh(struct igb_rx_ring *rxr, int i)
2432{
2433 if (--i < 0)
2434 i = rxr->num_rx_desc - 1;
2435 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i);
2436}
2437
1f7e3916
SZ
2438static void
2439igb_rxeof(struct igb_rx_ring *rxr, int count)
2440{
2441 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
2442 union e1000_adv_rx_desc *cur;
2443 uint32_t staterr;
b56e8196 2444 int i, ncoll = 0;
1f7e3916
SZ
2445
2446 i = rxr->next_to_check;
2447 cur = &rxr->rx_base[i];
2448 staterr = le32toh(cur->wb.upper.status_error);
2449
2450 if ((staterr & E1000_RXD_STAT_DD) == 0)
2451 return;
2452
2453 while ((staterr & E1000_RXD_STAT_DD) && count != 0) {
8d6600da 2454 struct pktinfo *pi = NULL, pi0;
1f7e3916
SZ
2455 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i];
2456 struct mbuf *m = NULL;
2457 boolean_t eop;
2458
2459 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE;
2460 if (eop)
2461 --count;
2462
b56e8196 2463 ++ncoll;
1f7e3916
SZ
2464 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 &&
2465 !rxr->discard) {
2466 struct mbuf *mp = rxbuf->m_head;
8d6600da 2467 uint32_t hash, hashtype;
1f7e3916
SZ
2468 uint16_t vlan;
2469 int len;
2470
2471 len = le16toh(cur->wb.upper.length);
2472 if (rxr->sc->hw.mac.type == e1000_i350 &&
2473 (staterr & E1000_RXDEXT_STATERR_LB))
2474 vlan = be16toh(cur->wb.upper.vlan);
2475 else
2476 vlan = le16toh(cur->wb.upper.vlan);
2477
8d6600da
SZ
2478 hash = le32toh(cur->wb.lower.hi_dword.rss);
2479 hashtype = le32toh(cur->wb.lower.lo_dword.data) &
2480 E1000_RXDADV_RSSTYPE_MASK;
2481
2482 IGB_RSS_DPRINTF(rxr->sc, 10,
2483 "ring%d, hash 0x%08x, hashtype %u\n",
2484 rxr->me, hash, hashtype);
2485
1f7e3916
SZ
2486 bus_dmamap_sync(rxr->rx_tag, rxbuf->map,
2487 BUS_DMASYNC_POSTREAD);
2488
2489 if (igb_newbuf(rxr, i, FALSE) != 0) {
2490 ifp->if_iqdrops++;
2491 goto discard;
2492 }
2493
2494 mp->m_len = len;
2495 if (rxr->fmp == NULL) {
2496 mp->m_pkthdr.len = len;
2497 rxr->fmp = mp;
2498 rxr->lmp = mp;
2499 } else {
2500 rxr->lmp->m_next = mp;
2501 rxr->lmp = rxr->lmp->m_next;
2502 rxr->fmp->m_pkthdr.len += len;
2503 }
2504
2505 if (eop) {
2506 m = rxr->fmp;
2507 rxr->fmp = NULL;
2508 rxr->lmp = NULL;
2509
2510 m->m_pkthdr.rcvif = ifp;
2511 ifp->if_ipackets++;
2512
2513 if (ifp->if_capenable & IFCAP_RXCSUM)
2514 igb_rxcsum(staterr, m);
2515
2516 if (staterr & E1000_RXD_STAT_VP) {
2517 m->m_pkthdr.ether_vlantag = vlan;
2518 m->m_flags |= M_VLANTAG;
2519 }
2520
1f7e3916 2521 if (ifp->if_capenable & IFCAP_RSS) {
8d6600da
SZ
2522 pi = igb_rssinfo(m, &pi0,
2523 hash, hashtype, staterr);
1f7e3916 2524 }
8d6600da
SZ
2525#ifdef IGB_RSS_DEBUG
2526 rxr->rx_packets++;
1f7e3916
SZ
2527#endif
2528 }
2529 } else {
2530 ifp->if_ierrors++;
2531discard:
2532 igb_setup_rxdesc(cur, rxbuf);
2533 if (!eop)
2534 rxr->discard = TRUE;
2535 else
2536 rxr->discard = FALSE;
2537 if (rxr->fmp != NULL) {
2538 m_freem(rxr->fmp);
2539 rxr->fmp = NULL;
2540 rxr->lmp = NULL;
2541 }
2542 m = NULL;
2543 }
2544
2545 if (m != NULL)
8d6600da 2546 ether_input_pkt(ifp, m, pi);
1f7e3916
SZ
2547
2548 /* Advance our pointers to the next descriptor. */
91b8700a 2549 if (++i == rxr->num_rx_desc)
1f7e3916
SZ
2550 i = 0;
2551
b56e8196
SZ
2552 if (ncoll > rxr->rx_wreg) {
2553 igb_rx_refresh(rxr, i);
2554 ncoll = 0;
2555 }
2556
1f7e3916
SZ
2557 cur = &rxr->rx_base[i];
2558 staterr = le32toh(cur->wb.upper.status_error);
2559 }
2560 rxr->next_to_check = i;
2561
b56e8196
SZ
2562 if (ncoll > 0)
2563 igb_rx_refresh(rxr, i);
1f7e3916
SZ
2564}
2565
2566
2567static void
2568igb_set_vlan(struct igb_softc *sc)
2569{
2570 struct e1000_hw *hw = &sc->hw;
2571 uint32_t reg;
2572#if 0
2573 struct ifnet *ifp = sc->arpcom.ac_if;
2574#endif
2575
2576 if (sc->vf_ifp) {
2577 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE);
2578 return;
2579 }
2580
2581 reg = E1000_READ_REG(hw, E1000_CTRL);
2582 reg |= E1000_CTRL_VME;
2583 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2584
2585#if 0
2586 /* Enable the Filter Table */
2587 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2588 reg = E1000_READ_REG(hw, E1000_RCTL);
2589 reg &= ~E1000_RCTL_CFIEN;
2590 reg |= E1000_RCTL_VFE;
2591 E1000_WRITE_REG(hw, E1000_RCTL, reg);
2592 }
2593#endif
2594
2595 /* Update the frame size */
2596 E1000_WRITE_REG(&sc->hw, E1000_RLPML,
2597 sc->max_frame_size + VLAN_TAG_SIZE);
2598
2599#if 0
2600 /* Don't bother with table if no vlans */
2601 if ((adapter->num_vlans == 0) ||
2602 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
2603 return;
2604 /*
2605 ** A soft reset zero's out the VFTA, so
2606 ** we need to repopulate it now.
2607 */
2608 for (int i = 0; i < IGB_VFTA_SIZE; i++)
2609 if (adapter->shadow_vfta[i] != 0) {
2610 if (adapter->vf_ifp)
2611 e1000_vfta_set_vf(hw,
2612 adapter->shadow_vfta[i], TRUE);
2613 else
2614 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
2615 i, adapter->shadow_vfta[i]);
2616 }
2617#endif
2618}
2619
2620static void
2621igb_enable_intr(struct igb_softc *sc)
2622{
9c0ecdcc
SZ
2623 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2624 lwkt_serialize_handler_enable(&sc->main_serialize);
2625 } else {
2626 int i;
2627
2628 for (i = 0; i < sc->msix_cnt; ++i) {
2629 lwkt_serialize_handler_enable(
2630 sc->msix_data[i].msix_serialize);
2631 }
2632 }
1f7e3916 2633
f6167a56 2634 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
9c0ecdcc
SZ
2635 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
2636 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask);
2637 else
2638 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
f6167a56
SZ
2639 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask);
2640 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
1f7e3916
SZ
2641 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
2642 } else {
2643 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK);
2644 }
2645 E1000_WRITE_FLUSH(&sc->hw);
2646}
2647
2648static void
2649igb_disable_intr(struct igb_softc *sc)
2650{
f6167a56 2651 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) {
1f7e3916
SZ
2652 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff);
2653 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0);
f6167a56 2654 }
1f7e3916
SZ
2655 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff);
2656 E1000_WRITE_FLUSH(&sc->hw);
2657
9c0ecdcc
SZ
2658 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
2659 lwkt_serialize_handler_disable(&sc->main_serialize);
2660 } else {
2661 int i;
2662
2663 for (i = 0; i < sc->msix_cnt; ++i) {
2664 lwkt_serialize_handler_disable(
2665 sc->msix_data[i].msix_serialize);
2666 }
2667 }
1f7e3916
SZ
2668}
2669
2670/*
2671 * Bit of a misnomer, what this really means is
2672 * to enable OS management of the system... aka
2673 * to disable special hardware management features
2674 */
2675static void
2676igb_get_mgmt(struct igb_softc *sc)
2677{
396b7048 2678 if (sc->flags & IGB_FLAG_HAS_MGMT) {
1f7e3916
SZ
2679 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H);
2680 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2681
2682 /* disable hardware interception of ARP */
2683 manc &= ~E1000_MANC_ARP_EN;
2684
2685 /* enable receiving management packets to the host */
2686 manc |= E1000_MANC_EN_MNG2HOST;
2687 manc2h |= 1 << 5; /* Mng Port 623 */
2688 manc2h |= 1 << 6; /* Mng Port 664 */
2689 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h);
2690 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2691 }
2692}
2693
2694/*
2695 * Give control back to hardware management controller
2696 * if there is one.
2697 */
2698static void
2699igb_rel_mgmt(struct igb_softc *sc)
2700{
396b7048 2701 if (sc->flags & IGB_FLAG_HAS_MGMT) {
1f7e3916
SZ
2702 int manc = E1000_READ_REG(&sc->hw, E1000_MANC);
2703
2704 /* Re-enable hardware interception of ARP */
2705 manc |= E1000_MANC_ARP_EN;
2706 manc &= ~E1000_MANC_EN_MNG2HOST;
2707
2708 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc);
2709 }
2710}
2711
2712/*
2713 * Sets CTRL_EXT:DRV_LOAD bit.
2714 *
2715 * For ASF and Pass Through versions of f/w this means that
2716 * the driver is loaded.
2717 */
2718static void
2719igb_get_hw_control(struct igb_softc *sc)
2720{
2721 uint32_t ctrl_ext;
2722
2723 if (sc->vf_ifp)
2724 return;
2725
2726 /* Let firmware know the driver has taken over */
2727 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2728 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2729 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2730}
2731
2732/*
2733 * Resets CTRL_EXT:DRV_LOAD bit.
2734 *
2735 * For ASF and Pass Through versions of f/w this means that the
2736 * driver is no longer loaded.
2737 */
2738static void
2739igb_rel_hw_control(struct igb_softc *sc)
2740{
2741 uint32_t ctrl_ext;
2742
2743 if (sc->vf_ifp)
2744 return;
2745
2746 /* Let firmware taken over control of h/w */
2747 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT);
2748 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT,
2749 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2750}
2751
2752static int
2753igb_is_valid_ether_addr(const uint8_t *addr)
2754{
2755 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
2756
2757 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
2758 return FALSE;
2759 return TRUE;
2760}
2761
2762/*
2763 * Enable PCI Wake On Lan capability
2764 */
2765static void
2766igb_enable_wol(device_t dev)
2767{
2768 uint16_t cap, status;
2769 uint8_t id;
2770
2771 /* First find the capabilities pointer*/
2772 cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
2773
2774 /* Read the PM Capabilities */
2775 id = pci_read_config(dev, cap, 1);
2776 if (id != PCIY_PMG) /* Something wrong */
2777 return;
2778
2779 /*
2780 * OK, we have the power capabilities,
2781 * so now get the status register
2782 */
2783 cap += PCIR_POWER_STATUS;
2784 status = pci_read_config(dev, cap, 2);
2785 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
2786 pci_write_config(dev, cap, status, 2);
2787}
2788
2789static void
2790igb_update_stats_counters(struct igb_softc *sc)
2791{
2792 struct e1000_hw *hw = &sc->hw;
2793 struct e1000_hw_stats *stats;
2794 struct ifnet *ifp = &sc->arpcom.ac_if;
2795
2796 /*
2797 * The virtual function adapter has only a
2798 * small controlled set of stats, do only
2799 * those and return.
2800 */
2801 if (sc->vf_ifp) {
2802 igb_update_vf_stats_counters(sc);
2803 return;
2804 }
2805 stats = sc->stats;
2806
2807 if (sc->hw.phy.media_type == e1000_media_type_copper ||
2808 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
2809 stats->symerrs +=
2810 E1000_READ_REG(hw,E1000_SYMERRS);
2811 stats->sec += E1000_READ_REG(hw, E1000_SEC);
2812 }
2813
2814 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
2815 stats->mpc += E1000_READ_REG(hw, E1000_MPC);
2816 stats->scc += E1000_READ_REG(hw, E1000_SCC);
2817 stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
2818
2819 stats->mcc += E1000_READ_REG(hw, E1000_MCC);
2820 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
2821 stats->colc += E1000_READ_REG(hw, E1000_COLC);
2822 stats->dc += E1000_READ_REG(hw, E1000_DC);
2823 stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
2824 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
2825 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
2826
2827 /*
2828 * For watchdog management we need to know if we have been
2829 * paused during the last interval, so capture that here.
2830 */
2831 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
2832 stats->xoffrxc += sc->pause_frames;
2833 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
2834 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
2835 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
2836 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
2837 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
2838 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
2839 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
2840 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
2841 stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
2842 stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
2843 stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
2844 stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
2845
2846 /* For the 64-bit byte counters the low dword must be read first. */
2847 /* Both registers clear on the read of the high dword */
2848
2849 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
2850 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
2851 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
2852 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
2853
2854 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
2855 stats->ruc += E1000_READ_REG(hw, E1000_RUC);
2856 stats->rfc += E1000_READ_REG(hw, E1000_RFC);
2857 stats->roc += E1000_READ_REG(hw, E1000_ROC);
2858 stats->rjc += E1000_READ_REG(hw, E1000_RJC);
2859
2860 stats->tor += E1000_READ_REG(hw, E1000_TORH);
2861 stats->tot += E1000_READ_REG(hw, E1000_TOTH);
2862
2863 stats->tpr += E1000_READ_REG(hw, E1000_TPR);
2864 stats->tpt += E1000_READ_REG(hw, E1000_TPT);
2865 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
2866 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
2867 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
2868 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
2869 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
2870 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
2871 stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
2872 stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
2873
2874 /* Interrupt Counts */
2875
2876 stats->iac += E1000_READ_REG(hw, E1000_IAC);
2877 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
2878 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
2879 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
2880 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
2881 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
2882 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
2883 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
2884 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
2885
2886 /* Host to Card Statistics */
2887
2888 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
2889 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
2890 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
2891 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
2892 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
2893 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
2894 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
2895 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
2896 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32));
2897 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
2898 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
2899 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
2900 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
2901 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
2902
2903 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
2904 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
2905 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
2906 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
2907 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
2908 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
2909
2910 ifp->if_collisions = stats->colc;
2911
2912 /* Rx Errors */
2913 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
2914 stats->ruc + stats->roc + stats->mpc + stats->cexterr;
2915
2916 /* Tx Errors */
2917 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events;
2918
2919 /* Driver specific counters */
2920 sc->device_control = E1000_READ_REG(hw, E1000_CTRL);
2921 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL);
2922 sc->int_mask = E1000_READ_REG(hw, E1000_IMS);
2923 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
2924 sc->packet_buf_alloc_tx =
2925 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
2926 sc->packet_buf_alloc_rx =
2927 (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
2928}
2929
2930static void
2931igb_vf_init_stats(struct igb_softc *sc)
2932{
2933 struct e1000_hw *hw = &sc->hw;
2934 struct e1000_vf_stats *stats;
2935
2936 stats = sc->stats;
2937 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
2938 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
2939 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
2940 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
2941 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
2942}
2943
2944static void
2945igb_update_vf_stats_counters(struct igb_softc *sc)
2946{
2947 struct e1000_hw *hw = &sc->hw;
2948 struct e1000_vf_stats *stats;
2949
2950 if (sc->link_speed == 0)
2951 return;
2952
2953 stats = sc->stats;
2954 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc);
2955 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc);
2956 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc);
2957 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc);
2958 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc);
2959}
2960
d0f59cad 2961#ifdef IFPOLL_ENABLE
1f7e3916
SZ
2962
2963static void
2f00683b 2964igb_npoll_status(struct ifnet *ifp)
1f7e3916
SZ
2965{
2966 struct igb_softc *sc = ifp->if_softc;
2967 uint32_t reg_icr;
2968
d0f59cad 2969 ASSERT_SERIALIZED(&sc->main_serialize);
1f7e3916 2970
d0f59cad
SZ
2971 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
2972 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2973 sc->hw.mac.get_link_status = 1;
2974 igb_update_link_status(sc);
2975 }
2976}
7d235eb5 2977
d0f59cad
SZ
2978static void
2979igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused)
2980{
2981 struct igb_tx_ring *txr = arg;
7d235eb5 2982
d0f59cad 2983 ASSERT_SERIALIZED(&txr->tx_serialize);
1f7e3916 2984
d0f59cad
SZ
2985 igb_txeof(txr);
2986 if (!ifq_is_empty(&ifp->if_snd))
2987 if_devstart(ifp);
2988}
2989
2990static void
2991igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle)
2992{
2993 struct igb_rx_ring *rxr = arg;
2994
2995 ASSERT_SERIALIZED(&rxr->rx_serialize);
2996
2997 igb_rxeof(rxr, cycle);
2998}
2999
3000static void
3001igb_npoll(struct ifnet *ifp, struct ifpoll_info *info)
3002{
3003 struct igb_softc *sc = ifp->if_softc;
3004
3005 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3006
3007 if (info) {
3008 struct igb_tx_ring *txr;
3009 int i, off;
3010
3011 info->ifpi_status.status_func = igb_npoll_status;
3012 info->ifpi_status.serializer = &sc->main_serialize;
3013
3014 off = sc->tx_npoll_off;
3015 KKASSERT(off < ncpus2);
3016 txr = &sc->tx_rings[0];
3017 info->ifpi_tx[off].poll_func = igb_npoll_tx;
3018 info->ifpi_tx[off].arg = txr;
3019 info->ifpi_tx[off].serializer = &txr->tx_serialize;
3020
3021 off = sc->rx_npoll_off;
3022 for (i = 0; i < sc->rx_ring_cnt; ++i) {
3023 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3024 int idx = i + off;
3025
3026 KKASSERT(idx < ncpus2);
3027 info->ifpi_rx[idx].poll_func = igb_npoll_rx;
3028 info->ifpi_rx[idx].arg = rxr;
3029 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
1f7e3916 3030 }
d0f59cad
SZ
3031
3032 if (ifp->if_flags & IFF_RUNNING) {
3033 if (sc->rx_ring_inuse == sc->rx_ring_cnt)
3034 igb_disable_intr(sc);
3035 else
3036 igb_init(sc);
3037 }
3038 ifp->if_npoll_cpuid = sc->tx_npoll_off;
3039 } else {
3040 if (ifp->if_flags & IFF_RUNNING) {
3041 if (sc->rx_ring_inuse == sc->rx_ring_cnt)
3042 igb_enable_intr(sc);
3043 else
3044 igb_init(sc);
3045 }
3046 ifp->if_npoll_cpuid = -1;
1f7e3916
SZ
3047 }
3048}
3049
d0f59cad 3050#endif /* IFPOLL_ENABLE */
1f7e3916
SZ
3051
3052static void
3053igb_intr(void *xsc)
f6167a56
SZ
3054{
3055 struct igb_softc *sc = xsc;
3056 struct ifnet *ifp = &sc->arpcom.ac_if;
3057 uint32_t eicr;
3058
7d235eb5 3059 ASSERT_SERIALIZED(&sc->main_serialize);
f6167a56
SZ
3060
3061 eicr = E1000_READ_REG(&sc->hw, E1000_EICR);
3062
3063 if (eicr == 0)
3064 return;
3065
3066 if (ifp->if_flags & IFF_RUNNING) {
7d235eb5
SZ
3067 struct igb_tx_ring *txr;
3068 int i;
f6167a56 3069
be922da6 3070 for (i = 0; i < sc->rx_ring_inuse; ++i) {
7d235eb5
SZ
3071 struct igb_rx_ring *rxr = &sc->rx_rings[i];
3072
3073 if (eicr & rxr->rx_intr_mask) {
3074 lwkt_serialize_enter(&rxr->rx_serialize);
3075 igb_rxeof(rxr, -1);
3076 lwkt_serialize_exit(&rxr->rx_serialize);
3077 }
3078 }
3079
3080 txr = &sc->tx_rings[0];
3081 if (eicr & txr->tx_intr_mask) {
3082 lwkt_serialize_enter(&txr->tx_serialize);
3083 igb_txeof(txr);
f6167a56
SZ
3084 if (!ifq_is_empty(&ifp->if_snd))
3085 if_devstart(ifp);
7d235eb5 3086 lwkt_serialize_exit(&txr->tx_serialize);
f6167a56
SZ
3087 }
3088 }
3089
3090 if (eicr & E1000_EICR_OTHER) {
3091 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3092
3093 /* Link status change */
3094 if (icr & E1000_ICR_LSC) {
3095 sc->hw.mac.get_link_status = 1;
3096 igb_update_link_status(sc);
3097 }
3098 }
3099
3100 /*
3101 * Reading EICR has the side effect to clear interrupt mask,
3102 * so all interrupts need to be enabled here.
3103 */
3104 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask);
3105}
3106
3107static void
9c0ecdcc 3108igb_intr_shared(void *xsc)
1f7e3916
SZ
3109{
3110 struct igb_softc *sc = xsc;
3111 struct ifnet *ifp = &sc->arpcom.ac_if;
3112 uint32_t reg_icr;
3113
7d235eb5 3114 ASSERT_SERIALIZED(&sc->main_serialize);
1f7e3916
SZ
3115
3116 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
3117
3118 /* Hot eject? */
3119 if (reg_icr == 0xffffffff)
3120 return;
3121
3122 /* Definitely not our interrupt. */
3123 if (reg_icr == 0x0)
3124 return;
3125
3126 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
3127 return;
3128
3129 if (ifp->if_flags & IFF_RUNNING) {
71b8b086
SZ
3130 if (reg_icr &
3131 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) {
3132 int i;
7d235eb5 3133
71b8b086
SZ
3134 for (i = 0; i < sc->rx_ring_inuse; ++i) {
3135 struct igb_rx_ring *rxr = &sc->rx_rings[i];
1f7e3916 3136
71b8b086
SZ
3137 lwkt_serialize_enter(&rxr->rx_serialize);
3138 igb_rxeof(rxr, -1);
3139 lwkt_serialize_exit(&rxr->rx_serialize);
3140 }
7d235eb5
SZ
3141 }
3142
71b8b086
SZ
3143 if (reg_icr & E1000_ICR_TXDW) {
3144 struct igb_tx_ring *txr = &sc->tx_rings[0];
3145
3146 lwkt_serialize_enter(&txr->tx_serialize);
3147 igb_txeof(txr);
3148 if (!ifq_is_empty(&ifp->if_snd))
3149 if_devstart(ifp);
3150 lwkt_serialize_exit(&txr->tx_serialize);
3151 }
1f7e3916
SZ
3152 }
3153
3154 /* Link status change */
3155 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3156 sc->hw.mac.get_link_status = 1;
3157 igb_update_link_status(sc);
3158 }
3159
3160 if (reg_icr & E1000_ICR_RXO)
3161 sc->rx_overruns++;
3162}
3163
1f7e3916 3164static int
871c0e2b
SZ
3165igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp,
3166 int *segs_used, int *idx)
1f7e3916
SZ
3167{
3168 bus_dma_segment_t segs[IGB_MAX_SCATTER];
3169 bus_dmamap_t map;
3170 struct igb_tx_buf *tx_buf, *tx_buf_mapped;
3171 union e1000_adv_tx_desc *txd = NULL;
3172 struct mbuf *m_head = *m_headp;
b6220144
SZ
3173 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0;
3174 int maxsegs, nsegs, i, j, error, last = 0;
1f7e3916
SZ
3175 uint32_t hdrlen = 0;
3176
23f6ffe4
SZ
3177 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3178 error = igb_tso_pullup(txr, m_headp);
3179 if (error)
3180 return error;
3181 m_head = *m_headp;
3182 }
3183
1f7e3916
SZ
3184 /* Set basic descriptor constants */
3185 cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
3186 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
3187 if (m_head->m_flags & M_VLANTAG)
3188 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3189
3190 /*
3191 * Map the packet for DMA.
1f7e3916 3192 */
b6220144 3193 tx_buf = &txr->tx_buf[txr->next_avail_desc];
1f7e3916
SZ
3194 tx_buf_mapped = tx_buf;
3195 map = tx_buf->map;
3196
b6220144
SZ
3197 maxsegs = txr->tx_avail - IGB_TX_RESERVED;
3198 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n"));
1f7e3916
SZ
3199 if (maxsegs > IGB_MAX_SCATTER)
3200 maxsegs = IGB_MAX_SCATTER;
3201
3202 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp,
3203 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
3204 if (error) {
3205 if (error == ENOBUFS)
3206 txr->sc->mbuf_defrag_failed++;
3207 else
3208 txr->sc->no_tx_dma_setup++;
3209
3210 m_freem(*m_headp);
3211 *m_headp = NULL;
3212 return error;
3213 }
3214 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE);
3215
3216 m_head = *m_headp;
3217
1f7e3916 3218 /*
66c68b4b
SZ
3219 * Set up the TX context descriptor, if any hardware offloading is
3220 * needed. This includes CSUM, VLAN, and TSO. It will consume one
3221 * TX descriptor.
3222 *
3223 * Unlike these chips' predecessors (em/emx), TX context descriptor
3224 * will _not_ interfere TX data fetching pipelining.
1f7e3916 3225 */
23f6ffe4
SZ
3226 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3227 igb_tso_ctx(txr, m_head, &hdrlen);
3228 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3229 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3230 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3231 txr->tx_nsegs++;
871c0e2b 3232 (*segs_used)++;
23f6ffe4 3233 } else if (igb_txcsum_ctx(txr, m_head)) {
48faa653
SZ
3234 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
3235 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8);
1f7e3916
SZ
3236 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP))
3237 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8);
b6220144 3238 txr->tx_nsegs++;
871c0e2b 3239 (*segs_used)++;
1f7e3916 3240 }
1f7e3916 3241
871c0e2b 3242 *segs_used += nsegs;
b6220144
SZ
3243 txr->tx_nsegs += nsegs;
3244 if (txr->tx_nsegs >= txr->intr_nsegs) {
3245 /*
3246 * Report Status (RS) is turned on every intr_nsegs
3247 * descriptors (roughly).
3248 */
3249 txr->tx_nsegs = 0;
3250 cmd_rs = E1000_ADVTXD_DCMD_RS;
3251 }
3252
1f7e3916
SZ
3253 /* Calculate payload length */
3254 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
3255 << E1000_ADVTXD_PAYLEN_SHIFT);
3256
3257 /* 82575 needs the queue index added */
3258 if (txr->sc->hw.mac.type == e1000_82575)
3259 olinfo_status |= txr->me << 4;
3260
3261 /* Set up our transmit descriptors */
3262 i = txr->next_avail_desc;
3263 for (j = 0; j < nsegs; j++) {
3264 bus_size_t seg_len;
3265 bus_addr_t seg_addr;
3266
3267 tx_buf = &txr->tx_buf[i];
3268 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
3269 seg_addr = segs[j].ds_addr;
3270 seg_len = segs[j].ds_len;
3271
3272 txd->read.buffer_addr = htole64(seg_addr);
3273 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
3274 txd->read.olinfo_status = htole32(olinfo_status);
3275 last = i;
91b8700a 3276 if (++i == txr->num_tx_desc)
1f7e3916
SZ
3277 i = 0;
3278 tx_buf->m_head = NULL;
1f7e3916
SZ
3279 }
3280
3281 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n"));
3282 txr->next_avail_desc = i;
3283 txr->tx_avail -= nsegs;
3284
3285 tx_buf->m_head = m_head;
3286 tx_buf_mapped->map = tx_buf->map;
3287 tx_buf->map = map;
3288
3289 /*
b6220144 3290 * Last Descriptor of Packet needs End Of Packet (EOP)
1f7e3916 3291 */
b6220144 3292 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs);
1f7e3916
SZ
3293
3294 /*
3295 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
3296 * that this frame is available to transmit.
3297 */
871c0e2b 3298 *idx = i;
1f7e3916
SZ
3299 ++txr->tx_packets;
3300
3301 return 0;
3302}
3303
3304static void
3305igb_start(struct ifnet *ifp)
3306{
3307 struct igb_softc *sc = ifp->if_softc;
27866bf1 3308 struct igb_tx_ring *txr = &sc->tx_rings[0];
1f7e3916 3309 struct mbuf *m_head;
871c0e2b 3310 int idx = -1, nsegs = 0;
1f7e3916 3311
7d235eb5 3312 ASSERT_SERIALIZED(&txr->tx_serialize);
1f7e3916 3313
9c0ecdcc 3314 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1f7e3916
SZ
3315 return;
3316
3317 if (!sc->link_active) {
3318 ifq_purge(&ifp->if_snd);
3319 return;
3320 }
3321
b6220144 3322 if (!IGB_IS_NOT_OACTIVE(txr))
1f7e3916
SZ
3323 igb_txeof(txr);
3324
3325 while (!ifq_is_empty(&ifp->if_snd)) {
b6220144 3326 if (IGB_IS_OACTIVE(txr)) {
1f7e3916
SZ
3327 ifp->if_flags |= IFF_OACTIVE;
3328 /* Set watchdog on */
3329 ifp->if_timer = 5;
3330 break;
3331 }
3332
3333 m_head = ifq_dequeue(&ifp->if_snd, NULL);
3334 if (m_head == NULL)
3335 break;
3336
871c0e2b 3337 if (igb_encap(txr, &m_head, &nsegs, &idx)) {
1f7e3916
SZ
3338 ifp->if_oerrors++;
3339 continue;
3340 }
3341
871c0e2b
SZ
3342 if (nsegs >= txr->wreg_nsegs) {
3343 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
3344 idx = -1;
3345 nsegs = 0;
3346 }
3347
1f7e3916
SZ
3348 /* Send a copy of the frame to the BPF listener */
3349 ETHER_BPF_MTAP(ifp, m_head);
3350 }
871c0e2b
SZ
3351 if (idx >= 0)
3352 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx);
1f7e3916
SZ
3353}
3354
3355static void
3356igb_watchdog(struct ifnet *ifp)
3357{
3358 struct igb_softc *sc = ifp->if_softc;
27866bf1 3359 struct igb_tx_ring *txr = &sc->tx_rings[0];
1f7e3916
SZ
3360
3361 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3362
3363 /*
3364 * If flow control has paused us since last checking
3365 * it invalidates the watchdog timing, so dont run it.
3366 */
3367 if (sc->pause_frames) {
3368 sc->pause_frames = 0;
3369 ifp->if_timer = 5;
3370 return;
3371 }
3372
3373 if_printf(ifp, "Watchdog timeout -- resetting\n");
3374 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
3375 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)),
3376 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me)));
3377 if_printf(ifp, "TX(%d) desc avail = %d, "
3378 "Next TX to Clean = %d\n",
3379 txr->me, txr->tx_avail, txr->next_to_clean);
3380
3381 ifp->if_oerrors++;
3382 sc->watchdog_events++;
3383
3384 igb_init(sc);
3385 if (!ifq_is_empty(&ifp->if_snd))
3386 if_devstart(ifp);
3387}
3388
3389static void
9c0ecdcc 3390igb_set_eitr(struct igb_softc *sc, int idx, int rate)
1f7e3916 3391{
9c0ecdcc 3392 uint32_t eitr = 0;
1f7e3916 3393
9c0ecdcc 3394 if (rate > 0) {
1f7e3916 3395 if (sc->hw.mac.type == e1000_82575) {
9c0ecdcc 3396 eitr = 1000000000 / 256 / rate;
1f7e3916
SZ
3397 /*
3398 * NOTE:
3399 * Document is wrong on the 2 bits left shift
3400 */
3401 } else {
9c0ecdcc 3402 eitr = 1000000 / rate;
d4beffa9
SZ
3403 eitr <<= IGB_EITR_INTVL_SHIFT;
3404 }
3405
3406 if (eitr == 0) {
3407 /* Don't disable it */
3408 eitr = 1 << IGB_EITR_INTVL_SHIFT;
3409 } else if (eitr > IGB_EITR_INTVL_MASK) {
3410 /* Don't allow it to be too large */
3411 eitr = IGB_EITR_INTVL_MASK;
1f7e3916 3412 }
1f7e3916
SZ
3413 }
3414 if (sc->hw.mac.type == e1000_82575)
9c0ecdcc 3415 eitr |= eitr << 16;
1f7e3916 3416 else
9c0ecdcc
SZ
3417 eitr |= E1000_EITR_CNT_IGNR;
3418 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr);
1f7e3916
SZ
3419}
3420
3421static int
3422igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3423{
3424 struct igb_softc *sc = (void *)arg1;
3425 struct ifnet *ifp = &sc->arpcom.ac_if;
3426 int error, intr_rate;
3427
3428 intr_rate = sc->intr_rate;
3429 error = sysctl_handle_int(oidp, &intr_rate, 0, req);
3430 if (error || req->newptr == NULL)
3431 return error;
3432 if (intr_rate < 0)
3433 return EINVAL;
3434
3435 ifnet_serialize_all(ifp);
3436
3437 sc->intr_rate = intr_rate;
3438 if (ifp->if_flags & IFF_RUNNING)
9c0ecdcc
SZ
3439 igb_set_eitr(sc, 0, sc->intr_rate);
3440
3441 if (bootverbose)
3442 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate);
1f7e3916
SZ
3443
3444 ifnet_deserialize_all(ifp);
3445
9c0ecdcc
SZ
3446 return 0;
3447}
3448
3449static int
3450igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS)
3451{
3452 struct igb_msix_data *msix = (void *)arg1;
3453 struct igb_softc *sc = msix->msix_sc;
3454 struct ifnet *ifp = &sc->arpcom.ac_if;
3455 int error, msix_rate;
3456
3457 msix_rate = msix->msix_rate;
3458 error = sysctl_handle_int(oidp, &msix_rate, 0, req);
3459 if (error || req->newptr == NULL)
3460 return error;
3461 if (msix_rate < 0)
3462 return EINVAL;
3463
3464 lwkt_serialize_enter(msix->msix_serialize);
3465
3466 msix->msix_rate = msix_rate;
3467 if (ifp->if_flags & IFF_RUNNING)
3468 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate);
3469
3470 if (bootverbose) {
3471 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc,
3472 msix->msix_rate);
3473 }
3474
3475 lwkt_serialize_exit(msix->msix_serialize);
3476
1f7e3916
SZ
3477 return 0;
3478}
b6220144
SZ
3479
3480static int
3481igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS)
3482{
3483 struct igb_softc *sc = (void *)arg1;
3484 struct ifnet *ifp = &sc->arpcom.ac_if;
27866bf1 3485 struct igb_tx_ring *txr = &sc->tx_rings[0];
b6220144
SZ
3486 int error, nsegs;
3487
3488 nsegs = txr->intr_nsegs;
3489 error = sysctl_handle_int(oidp, &nsegs, 0, req);
3490 if (error || req->newptr == NULL)
3491 return error;
3492 if (nsegs <= 0)
3493 return EINVAL;
3494
3495 ifnet_serialize_all(ifp);
3496
91b8700a 3497 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc ||
b6220144
SZ
3498 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) {
3499 error = EINVAL;
3500 } else {
3501 error = 0;
3502 txr->intr_nsegs = nsegs;
3503 }
3504
3505 ifnet_deserialize_all(ifp);
3506
3507 return error;
3508}
f6167a56 3509
d0f59cad
SZ
3510#ifdef IFPOLL_ENABLE
3511
3512static int
3513igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS)
3514{
3515 struct igb_softc *sc = (void *)arg1;
3516 struct ifnet *ifp = &sc->arpcom.ac_if;
3517 int error, off;
3518
3519 off = sc->rx_npoll_off;
3520 error = sysctl_handle_int(oidp, &off, 0, req);
3521 if (error || req->newptr == NULL)
3522 return error;
3523 if (off < 0)
3524 return EINVAL;
3525
3526 ifnet_serialize_all(ifp);
3527 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) {
3528 error = EINVAL;
3529 } else {
3530 error = 0;
3531 sc->rx_npoll_off = off;
3532 }
3533 ifnet_deserialize_all(ifp);
3534
3535 return error;
3536}
3537
3538static int
3539igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS)
3540{
3541 struct igb_softc *sc = (void *)arg1;
3542 struct ifnet *ifp = &sc->arpcom.ac_if;
3543 int error, off;
3544
3545 off = sc->tx_npoll_off;
3546 error = sysctl_handle_int(oidp, &off, 0, req);
3547 if (error || req->newptr == NULL)
3548 return error;
3549 if (off < 0)
3550 return EINVAL;
3551
3552 ifnet_serialize_all(ifp);
3553 if (off >= ncpus2) {
3554 error = EINVAL;
3555 } else {
3556 error = 0;
3557 sc->tx_npoll_off = off;
3558 }
3559 ifnet_deserialize_all(ifp);
3560
3561 return error;
3562}
3563
3564#endif /* IFPOLL_ENABLE */
3565
f6167a56
SZ
3566static void
3567igb_init_intr(struct igb_softc *sc)
3568{
be922da6 3569 igb_set_intr_mask(sc);
9c0ecdcc
SZ
3570
3571 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0)
f6167a56 3572 igb_init_unshared_intr(sc);
9c0ecdcc
SZ
3573
3574 if (sc->intr_type != PCI_INTR_TYPE_MSIX) {
3575 igb_set_eitr(sc, 0, sc->intr_rate);
3576 } else {
3577 int i;
3578
3579 for (i = 0; i < sc->msix_cnt; ++i)
3580 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate);
3581 }
f6167a56
SZ
3582}
3583
3584static void
3585igb_init_unshared_intr(struct igb_softc *sc)
3586{
3587 struct e1000_hw *hw = &sc->hw;
3588 const struct igb_rx_ring *rxr;
3589 const struct igb_tx_ring *txr;
3590 uint32_t ivar, index;
3591 int i;
3592
3593 /*
3594 * Enable extended mode
3595 */
3596 if (sc->hw.mac.type != e1000_82575) {
9c0ecdcc
SZ
3597 uint32_t gpie;
3598 int ivar_max;
3599
3600 gpie = E1000_GPIE_NSICR;
3601 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3602 gpie |= E1000_GPIE_MSIX_MODE |
3603 E1000_GPIE_EIAME |
3604 E1000_GPIE_PBA;
3605 }
3606 E1000_WRITE_REG(hw, E1000_GPIE, gpie);
3607
3608 /*
3609 * Clear IVARs
3610 */
3611 switch (sc->hw.mac.type) {
3612 case e1000_82580:
3613 ivar_max = IGB_MAX_IVAR_82580;
3614 break;
3615
3616 case e1000_i350:
3617 ivar_max = IGB_MAX_IVAR_I350;
3618 break;
3619
3620 case e1000_vfadapt:
3621 case e1000_vfadapt_i350:
3622 ivar_max = IGB_MAX_IVAR_VF;
3623 break;
3624
3625 case e1000_82576:
3626 ivar_max = IGB_MAX_IVAR_82576;
3627 break;
3628
3629 default:
3630 panic("unknown mac type %d\n", sc->hw.mac.type);
3631 }
3632 for (i = 0; i < ivar_max; ++i)
3633 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0);
3634 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0);
f6167a56
SZ
3635 } else {
3636 uint32_t tmp;
3637
9c0ecdcc
SZ
3638 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX,
3639 ("82575 w/ MSI-X"));
f6167a56
SZ
3640 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
3641 tmp |= E1000_CTRL_EXT_IRCA;
3642 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
3643 }
3644
3645 /*
3646 * Map TX/RX interrupts to EICR
3647 */
3648 switch (sc->hw.mac.type) {
3649 case e1000_82580:
3650 case e1000_i350:
3651 case e1000_vfadapt:
3652 case e1000_vfadapt_i350:
3653 /* RX entries */
be922da6 3654 for (i = 0; i < sc->rx_ring_inuse; ++i) {
f6167a56
SZ
3655 rxr = &sc->rx_rings[i];
3656
3657 index = i >> 1;
3658 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3659
3660 if (i & 1) {
3661 ivar &= 0xff00ffff;
3662 ivar |=
3663 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3664 } else {
3665 ivar &= 0xffffff00;
3666 ivar |=
3667 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3668 }
3669 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3670 }
3671 /* TX entries */
27866bf1 3672 for (i = 0; i < sc->tx_ring_cnt; ++i) {
f6167a56
SZ
3673 txr = &sc->tx_rings[i];
3674
3675 index = i >> 1;
3676 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3677
3678 if (i & 1) {
3679 ivar &= 0x00ffffff;
3680 ivar |=
3681 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3682 } else {
3683 ivar &= 0xffff00ff;
3684 ivar |=
3685 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3686 }
3687 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3688 }
9c0ecdcc
SZ
3689 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3690 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3691 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3692 }
f6167a56
SZ
3693 break;
3694
3695 case e1000_82576:
3696 /* RX entries */
be922da6 3697 for (i = 0; i < sc->rx_ring_inuse; ++i) {
f6167a56
SZ
3698 rxr = &sc->rx_rings[i];
3699
3700 index = i & 0x7; /* Each IVAR has two entries */
3701 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3702
3703 if (i < 8) {
3704 ivar &= 0xffffff00;
3705 ivar |=
3706 (rxr->rx_intr_bit | E1000_IVAR_VALID);
3707 } else {
3708 ivar &= 0xff00ffff;
3709 ivar |=
3710 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16;
3711 }
3712 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3713 }
3714 /* TX entries */
27866bf1 3715 for (i = 0; i < sc->tx_ring_cnt; ++i) {
f6167a56
SZ
3716 txr = &sc->tx_rings[i];
3717
3718 index = i & 0x7; /* Each IVAR has two entries */
3719 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
3720
3721 if (i < 8) {
3722 ivar &= 0xffff00ff;
3723 ivar |=
3724 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8;
3725 } else {
3726 ivar &= 0x00ffffff;
3727 ivar |=
3728 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24;
3729 }
3730 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
3731 }
9c0ecdcc
SZ
3732 if (sc->intr_type == PCI_INTR_TYPE_MSIX) {
3733 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8;
3734 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
3735 }
f6167a56
SZ
3736 break;
3737
3738 case e1000_82575:
3739 /*
3740 * Enable necessary interrupt bits.
3741 *
3742 * The name of the register is confusing; in addition to
3743 * configuring the first vector of MSI-X, it also configures
3744 * which bits of EICR could be set by the hardware even when
3745 * MSI or line interrupt is used; it thus controls interrupt
3746 * generation. It MUST be configured explicitly; the default
3747 * value mentioned in the datasheet is wrong: RX queue0 and
3748 * TX queue0 are NOT enabled by default.
3749 */
3750 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask);
3751 break;
3752
3753 default:
9c0ecdcc 3754 panic("unknown mac type %d\n", sc->hw.mac.type);
f6167a56 3755 }
f6167a56
SZ
3756}
3757
3758static int
3759igb_setup_intr(struct igb_softc *sc)
3760{
3761 struct ifnet *ifp = &sc->arpcom.ac_if;
9c0ecdcc 3762 int error;
f6167a56 3763
9c0ecdcc
SZ
3764 if (sc->intr_type == PCI_INTR_TYPE_MSIX)
3765 return igb_msix_setup(sc);
f6167a56 3766
f6167a56 3767 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE,
9c0ecdcc 3768 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr,
7d235eb5 3769 sc, &sc->intr_tag, &sc->main_serialize);
f6167a56
SZ
3770 if (error) {
3771 device_printf(sc->dev, "Failed to register interrupt handler");
3772 return error;
3773 }
3774
3775 ifp->if_cpuid = rman_get_cpuid(sc->intr_res);
3776 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
3777
3778 return 0;
3779}
3780
3781static void
9c0ecdcc 3782igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax)
f6167a56
SZ
3783{
3784 if (txr->sc->hw.mac.type == e1000_82575) {
3785 txr->tx_intr_bit = 0; /* unused */
3786 switch (txr->me) {
3787 case 0:
3788 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0;
3789 break;
3790 case 1:
3791 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1;
3792 break;
3793 case 2:
3794 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2;
3795 break;
3796 case 3:
3797 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3;
3798 break;
3799 default:
3800 panic("unsupported # of TX ring, %d\n", txr->me);
3801 }
3802 } else {
8d6600da
SZ
3803 int intr_bit = *intr_bit0;
3804
3805 txr->tx_intr_bit = intr_bit % intr_bitmax;
f6167a56 3806 txr->tx_intr_mask = 1 << txr->tx_intr_bit;
8d6600da
SZ
3807
3808 *intr_bit0 = intr_bit + 1;
f6167a56
SZ
3809 }
3810}
3811
3812static void
9c0ecdcc 3813igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax)
f6167a56
SZ
3814{
3815 if (rxr->sc->hw.mac.type == e1000_82575) {
3816 rxr->rx_intr_bit = 0; /* unused */
3817 switch (rxr->me) {
3818 case 0:
3819 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0;
3820 break;
3821 case 1:
3822 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1;
3823 break;
3824 case 2:
3825 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2;
3826 break;
3827 case 3:
3828 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3;
3829 break;
3830</