em(4): Don't write ITR, if the NIC is not running yet.
[dragonfly.git] / sys / dev / netif / em / if_em.c
CommitLineData
78195a76 1/*
78195a76
MD
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
9c80d176 4 * Copyright (c) 2001-2008, Intel Corporation
78195a76
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9c80d176 9 *
78195a76
MD
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
9c80d176 12 *
78195a76
MD
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
9c80d176 16 *
78195a76
MD
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
9c80d176 20 *
78195a76
MD
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
9c80d176 35 *
78195a76
MD
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
9c80d176 38 *
78195a76
MD
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
9c80d176 42 *
78195a76
MD
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
9c80d176 52 *
78195a76
MD
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
9c80d176 65 *
a75a1559 66 * $DragonFly: src/sys/dev/netif/em/if_em.c,v 1.80 2008/09/17 08:51:29 sephe Exp $
78195a76
MD
67 */
68/*
69 * SERIALIZATION API RULES:
70 *
71 * - If the driver uses the same serializer for the interrupt as for the
72 * ifnet, most of the serialization will be done automatically for the
9c80d176 73 * driver.
78195a76
MD
74 *
75 * - ifmedia entry points will be serialized by the ifmedia code using the
76 * ifnet serializer.
77 *
78 * - if_* entry points except for if_input will be serialized by the IF
79 * and protocol layers.
80 *
81 * - The device driver must be sure to serialize access from timeout code
82 * installed by the device driver.
83 *
84 * - The device driver typically holds the serializer at the time it wishes
9c80d176
SZ
85 * to call if_input.
86 *
87 * - We must call lwkt_serialize_handler_enable() prior to enabling the
88 * hardware interrupt and lwkt_serialize_handler_disable() after disabling
89 * the hardware interrupt in order to avoid handler execution races from
90 * scheduled interrupt threads.
78195a76
MD
91 *
92 * NOTE! Since callers into the device driver hold the ifnet serializer,
93 * the device driver may be holding a serializer at the time it calls
94 * if_input even if it is not serializer-aware.
95 */
2b71c8f1
SZ
96
97#include "opt_polling.h"
21fa6062 98#include "opt_serializer.h"
87307ba1
SZ
99
100#include <sys/param.h>
101#include <sys/bus.h>
102#include <sys/endian.h>
9db4b353 103#include <sys/interrupt.h>
87307ba1
SZ
104#include <sys/kernel.h>
105#include <sys/ktr.h>
106#include <sys/malloc.h>
107#include <sys/mbuf.h>
9c80d176 108#include <sys/proc.h>
87307ba1
SZ
109#include <sys/rman.h>
110#include <sys/serialize.h>
111#include <sys/socket.h>
112#include <sys/sockio.h>
113#include <sys/sysctl.h>
9c80d176 114#include <sys/systm.h>
87307ba1
SZ
115
116#include <net/bpf.h>
117#include <net/ethernet.h>
118#include <net/if.h>
119#include <net/if_arp.h>
120#include <net/if_dl.h>
121#include <net/if_media.h>
87307ba1
SZ
122#include <net/ifq_var.h>
123#include <net/vlan/if_vlan_var.h>
b637f170 124#include <net/vlan/if_vlan_ether.h>
87307ba1 125
87307ba1 126#include <netinet/in_systm.h>
9c80d176 127#include <netinet/in.h>
87307ba1
SZ
128#include <netinet/ip.h>
129#include <netinet/tcp.h>
130#include <netinet/udp.h>
984263bc 131
9c80d176
SZ
132#include <bus/pci/pcivar.h>
133#include <bus/pci/pcireg.h>
984263bc 134
9c80d176
SZ
135#include <dev/netif/ig_hal/e1000_api.h>
136#include <dev/netif/ig_hal/e1000_82571.h>
137#include <dev/netif/em/if_em.h>
984263bc 138
9c80d176
SZ
139#define EM_NAME "Intel(R) PRO/1000 Network Connection "
140#define EM_VER " 6.9.6"
141
142#define EM_DEVICE(id) \
143 { EM_VENDOR_ID, E1000_DEV_ID_##id, EM_NAME #id EM_VER }
144#define EM_DEVICE_NULL { 0, 0, NULL }
145
146static const struct em_vendor_info em_vendor_info_array[] = {
147 EM_DEVICE(82540EM),
148 EM_DEVICE(82540EM_LOM),
149 EM_DEVICE(82540EP),
150 EM_DEVICE(82540EP_LOM),
151 EM_DEVICE(82540EP_LP),
152
153 EM_DEVICE(82541EI),
154 EM_DEVICE(82541ER),
155 EM_DEVICE(82541ER_LOM),
156 EM_DEVICE(82541EI_MOBILE),
157 EM_DEVICE(82541GI),
158 EM_DEVICE(82541GI_LF),
159 EM_DEVICE(82541GI_MOBILE),
160
161 EM_DEVICE(82542),
162
163 EM_DEVICE(82543GC_FIBER),
164 EM_DEVICE(82543GC_COPPER),
165
166 EM_DEVICE(82544EI_COPPER),
167 EM_DEVICE(82544EI_FIBER),
168 EM_DEVICE(82544GC_COPPER),
169 EM_DEVICE(82544GC_LOM),
170
171 EM_DEVICE(82545EM_COPPER),
172 EM_DEVICE(82545EM_FIBER),
173 EM_DEVICE(82545GM_COPPER),
174 EM_DEVICE(82545GM_FIBER),
175 EM_DEVICE(82545GM_SERDES),
176
177 EM_DEVICE(82546EB_COPPER),
178 EM_DEVICE(82546EB_FIBER),
179 EM_DEVICE(82546EB_QUAD_COPPER),
180 EM_DEVICE(82546GB_COPPER),
181 EM_DEVICE(82546GB_FIBER),
182 EM_DEVICE(82546GB_SERDES),
183 EM_DEVICE(82546GB_PCIE),
184 EM_DEVICE(82546GB_QUAD_COPPER),
185 EM_DEVICE(82546GB_QUAD_COPPER_KSP3),
186
187 EM_DEVICE(82547EI),
188 EM_DEVICE(82547EI_MOBILE),
189 EM_DEVICE(82547GI),
190
191 EM_DEVICE(82571EB_COPPER),
192 EM_DEVICE(82571EB_FIBER),
193 EM_DEVICE(82571EB_SERDES),
194 EM_DEVICE(82571EB_SERDES_DUAL),
195 EM_DEVICE(82571EB_SERDES_QUAD),
196 EM_DEVICE(82571EB_QUAD_COPPER),
197 EM_DEVICE(82571EB_QUAD_COPPER_LP),
198 EM_DEVICE(82571EB_QUAD_FIBER),
199 EM_DEVICE(82571PT_QUAD_COPPER),
200
201 EM_DEVICE(82572EI_COPPER),
202 EM_DEVICE(82572EI_FIBER),
203 EM_DEVICE(82572EI_SERDES),
204 EM_DEVICE(82572EI),
205
206 EM_DEVICE(82573E),
207 EM_DEVICE(82573E_IAMT),
208 EM_DEVICE(82573L),
209
210 EM_DEVICE(80003ES2LAN_COPPER_SPT),
211 EM_DEVICE(80003ES2LAN_SERDES_SPT),
212 EM_DEVICE(80003ES2LAN_COPPER_DPT),
213 EM_DEVICE(80003ES2LAN_SERDES_DPT),
214
215 EM_DEVICE(ICH8_IGP_M_AMT),
216 EM_DEVICE(ICH8_IGP_AMT),
217 EM_DEVICE(ICH8_IGP_C),
218 EM_DEVICE(ICH8_IFE),
219 EM_DEVICE(ICH8_IFE_GT),
220 EM_DEVICE(ICH8_IFE_G),
221 EM_DEVICE(ICH8_IGP_M),
222
223 EM_DEVICE(ICH9_IGP_M_AMT),
224 EM_DEVICE(ICH9_IGP_AMT),
225 EM_DEVICE(ICH9_IGP_C),
226 EM_DEVICE(ICH9_IGP_M),
227 EM_DEVICE(ICH9_IGP_M_V),
228 EM_DEVICE(ICH9_IFE),
229 EM_DEVICE(ICH9_IFE_GT),
230 EM_DEVICE(ICH9_IFE_G),
231 EM_DEVICE(ICH9_BM),
232
233 EM_DEVICE(82574L),
234
235 EM_DEVICE(ICH10_R_BM_LM),
236 EM_DEVICE(ICH10_R_BM_LF),
237 EM_DEVICE(ICH10_R_BM_V),
238 EM_DEVICE(ICH10_D_BM_LM),
239 EM_DEVICE(ICH10_D_BM_LF),
984263bc 240
f647ad3d 241 /* required last entry */
9c80d176 242 EM_DEVICE_NULL
984263bc
MD
243};
244
f647ad3d
JS
245static int em_probe(device_t);
246static int em_attach(device_t);
247static int em_detach(device_t);
248static int em_shutdown(device_t);
87307ba1
SZ
249static int em_suspend(device_t);
250static int em_resume(device_t);
9c80d176
SZ
251
252static void em_init(void *);
253static void em_stop(struct adapter *);
f647ad3d 254static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
9c80d176
SZ
255static void em_start(struct ifnet *);
256#ifdef DEVICE_POLLING
257static void em_poll(struct ifnet *, enum poll_cmd, int);
258#endif
f647ad3d 259static void em_watchdog(struct ifnet *);
f647ad3d
JS
260static void em_media_status(struct ifnet *, struct ifmediareq *);
261static int em_media_change(struct ifnet *);
9c80d176
SZ
262static void em_timer(void *);
263
264static void em_intr(void *);
265static void em_rxeof(struct adapter *, int);
266static void em_txeof(struct adapter *);
9f60d74b 267static void em_tx_collect(struct adapter *);
9c80d176 268static void em_tx_purge(struct adapter *);
f647ad3d
JS
269static void em_enable_intr(struct adapter *);
270static void em_disable_intr(struct adapter *);
9c80d176
SZ
271
272static int em_dma_malloc(struct adapter *, bus_size_t,
273 struct em_dma_alloc *);
274static void em_dma_free(struct adapter *, struct em_dma_alloc *);
275static void em_init_tx_ring(struct adapter *);
276static int em_init_rx_ring(struct adapter *);
277static int em_create_tx_ring(struct adapter *);
278static int em_create_rx_ring(struct adapter *);
279static void em_destroy_tx_ring(struct adapter *, int);
280static void em_destroy_rx_ring(struct adapter *, int);
281static int em_newbuf(struct adapter *, int, int);
282static int em_encap(struct adapter *, struct mbuf **);
283static void em_rxcsum(struct adapter *, struct e1000_rx_desc *,
284 struct mbuf *);
002b3a05 285static int em_txcsum_pullup(struct adapter *, struct mbuf **);
9f60d74b 286static int em_txcsum(struct adapter *, struct mbuf *,
9c80d176
SZ
287 uint32_t *, uint32_t *);
288
289static int em_get_hw_info(struct adapter *);
290static int em_is_valid_eaddr(const uint8_t *);
291static int em_alloc_pci_res(struct adapter *);
292static void em_free_pci_res(struct adapter *);
293static int em_hw_init(struct adapter *);
294static void em_setup_ifp(struct adapter *);
295static void em_init_tx_unit(struct adapter *);
296static void em_init_rx_unit(struct adapter *);
297static void em_update_stats(struct adapter *);
f647ad3d
JS
298static void em_set_promisc(struct adapter *);
299static void em_disable_promisc(struct adapter *);
300static void em_set_multi(struct adapter *);
87307ba1 301static void em_update_link_status(struct adapter *);
f647ad3d 302static void em_smartspeed(struct adapter *);
9c80d176
SZ
303
304/* Hardware workarounds */
f647ad3d
JS
305static int em_82547_fifo_workaround(struct adapter *, int);
306static void em_82547_update_fifo_head(struct adapter *, int);
307static int em_82547_tx_fifo_reset(struct adapter *);
1eca7b82
SZ
308static void em_82547_move_tail(void *);
309static void em_82547_move_tail_serialized(struct adapter *);
9c80d176
SZ
310static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY);
311
f647ad3d 312static void em_print_debug_info(struct adapter *);
9c80d176
SZ
313static void em_print_nvm_info(struct adapter *);
314static void em_print_hw_stats(struct adapter *);
315
f647ad3d
JS
316static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
317static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
d0870c72 318static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
9f60d74b 319static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS);
9c80d176 320static void em_add_sysctl(struct adapter *adapter);
984263bc 321
9c80d176
SZ
322/* Management and WOL Support */
323static void em_get_mgmt(struct adapter *);
324static void em_rel_mgmt(struct adapter *);
325static void em_get_hw_control(struct adapter *);
326static void em_rel_hw_control(struct adapter *);
327static void em_enable_wol(device_t);
984263bc
MD
328
329static device_method_t em_methods[] = {
330 /* Device interface */
9c80d176
SZ
331 DEVMETHOD(device_probe, em_probe),
332 DEVMETHOD(device_attach, em_attach),
333 DEVMETHOD(device_detach, em_detach),
334 DEVMETHOD(device_shutdown, em_shutdown),
335 DEVMETHOD(device_suspend, em_suspend),
336 DEVMETHOD(device_resume, em_resume),
337 { 0, 0 }
984263bc
MD
338};
339
340static driver_t em_driver = {
9c80d176
SZ
341 "em",
342 em_methods,
343 sizeof(struct adapter),
984263bc
MD
344};
345
346static devclass_t em_devclass;
32832096
MD
347
348DECLARE_DUMMY_MODULE(if_em);
9c80d176 349MODULE_DEPEND(em, ig_hal, 1, 1, 1);
984263bc
MD
350DRIVER_MODULE(if_em, pci, em_driver, em_devclass, 0, 0);
351
91e8debf
SZ
352/*
353 * Tunables
354 */
9c80d176
SZ
355static int em_int_throttle_ceil = EM_DEFAULT_ITR;
356static int em_rxd = EM_DEFAULT_RXD;
357static int em_txd = EM_DEFAULT_TXD;
358static int em_smart_pwr_down = FALSE;
0d366ee7 359
9c80d176
SZ
360/* Controls whether promiscuous also shows bad packets */
361static int em_debug_sbp = FALSE;
0d366ee7 362
05580856
SZ
363static int em_82573_workaround = TRUE;
364
d0870c72 365TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
1eca7b82
SZ
366TUNABLE_INT("hw.em.rxd", &em_rxd);
367TUNABLE_INT("hw.em.txd", &em_txd);
368TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
9c80d176 369TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
05580856 370TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround);
9c80d176
SZ
371
372/* Global used in WOL setup with multiport cards */
373static int em_global_quad_port_a = 0;
374
375/* Set this to one to display debug statistics */
376static int em_display_debug_stats = 0;
0d366ee7 377
07855a48
MD
378#if !defined(KTR_IF_EM)
379#define KTR_IF_EM KTR_ALL
380#endif
381KTR_INFO_MASTER(if_em);
382KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0);
383KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0);
07855a48
MD
384KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0);
385KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0);
386KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0);
387#define logif(name) KTR_LOG(if_em_ ## name)
388
984263bc
MD
389static int
390em_probe(device_t dev)
391{
9c80d176
SZ
392 const struct em_vendor_info *ent;
393 uint16_t vid, did;
984263bc 394
9c80d176
SZ
395 vid = pci_get_vendor(dev);
396 did = pci_get_device(dev);
984263bc 397
9c80d176
SZ
398 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) {
399 if (vid == ent->vendor_id && did == ent->device_id) {
400 device_set_desc(dev, ent->desc);
dbcd0c9b 401 device_set_async_attach(dev, TRUE);
87307ba1 402 return (0);
984263bc 403 }
984263bc 404 }
87307ba1 405 return (ENXIO);
984263bc
MD
406}
407
984263bc
MD
408static int
409em_attach(device_t dev)
410{
9c80d176
SZ
411 struct adapter *adapter = device_get_softc(dev);
412 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d
JS
413 int tsize, rsize;
414 int error = 0;
9c80d176 415 uint16_t eeprom_data, device_id;
984263bc 416
9c80d176 417 adapter->dev = adapter->osdep.dev = dev;
f647ad3d 418
af82d4bb
JS
419 callout_init(&adapter->timer);
420 callout_init(&adapter->tx_fifo_timer);
421
9c80d176
SZ
422 /* Determine hardware and mac info */
423 error = em_get_hw_info(adapter);
424 if (error) {
425 device_printf(dev, "Identify hardware failed\n");
426 goto fail;
f647ad3d
JS
427 }
428
9c80d176
SZ
429 /* Setup PCI resources */
430 error = em_alloc_pci_res(adapter);
431 if (error) {
432 device_printf(dev, "Allocation of PCI resources failed\n");
433 goto fail;
434 }
984263bc 435
9c80d176
SZ
436 /*
437 * For ICH8 and family we need to map the flash memory,
438 * and this must happen after the MAC is identified.
439 */
440 if (adapter->hw.mac.type == e1000_ich8lan ||
441 adapter->hw.mac.type == e1000_ich10lan ||
442 adapter->hw.mac.type == e1000_ich9lan) {
443 adapter->flash_rid = EM_BAR_FLASH;
444
445 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
446 &adapter->flash_rid, RF_ACTIVE);
447 if (adapter->flash == NULL) {
448 device_printf(dev, "Mapping of Flash failed\n");
449 error = ENXIO;
450 goto fail;
451 }
452 adapter->osdep.flash_bus_space_tag =
453 rman_get_bustag(adapter->flash);
454 adapter->osdep.flash_bus_space_handle =
455 rman_get_bushandle(adapter->flash);
984263bc 456
9c80d176
SZ
457 /*
458 * This is used in the shared code
459 * XXX this goof is actually not used.
460 */
461 adapter->hw.flash_address = (uint8_t *)adapter->flash;
462 }
0d366ee7 463
9c80d176
SZ
464 /* Do Shared Code initialization */
465 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
466 device_printf(dev, "Setup of Shared code failed\n");
467 error = ENXIO;
468 goto fail;
f647ad3d 469 }
7ea52455 470
9c80d176
SZ
471 e1000_get_bus_info(&adapter->hw);
472
1eca7b82 473 /*
9c80d176 474 * Validate number of transmit and receive descriptors. It
1eca7b82 475 * must not exceed hardware maximum, and must be multiple
9c80d176 476 * of E1000_DBA_ALIGN.
1eca7b82 477 */
9c80d176
SZ
478 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 ||
479 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
480 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
481 em_txd < EM_MIN_TXD) {
1eca7b82 482 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
9c80d176 483 EM_DEFAULT_TXD, em_txd);
1eca7b82
SZ
484 adapter->num_tx_desc = EM_DEFAULT_TXD;
485 } else {
486 adapter->num_tx_desc = em_txd;
487 }
9c80d176
SZ
488 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 ||
489 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
490 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
491 em_rxd < EM_MIN_RXD) {
1eca7b82 492 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
9c80d176 493 EM_DEFAULT_RXD, em_rxd);
1eca7b82
SZ
494 adapter->num_rx_desc = EM_DEFAULT_RXD;
495 } else {
496 adapter->num_rx_desc = em_rxd;
497 }
498
9c80d176
SZ
499 adapter->hw.mac.autoneg = DO_AUTO_NEG;
500 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
501 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
502 adapter->rx_buffer_len = MCLBYTES;
e94c2bf4 503
9c80d176
SZ
504 /*
505 * Interrupt throttle rate
506 */
507 if (em_int_throttle_ceil == 0) {
508 adapter->int_throttle_ceil = 0;
509 } else {
510 int throttle = em_int_throttle_ceil;
f647ad3d 511
9c80d176
SZ
512 if (throttle < 0)
513 throttle = EM_DEFAULT_ITR;
0d366ee7 514
9c80d176
SZ
515 /* Recalculate the tunable value to get the exact frequency. */
516 throttle = 1000000000 / 256 / throttle;
664c7645
SZ
517
518 /* Upper 16bits of ITR is reserved and should be zero */
519 if (throttle & 0xffff0000)
520 throttle = 1000000000 / 256 / EM_DEFAULT_ITR;
521
9c80d176
SZ
522 adapter->int_throttle_ceil = 1000000000 / 256 / throttle;
523 }
984263bc 524
9c80d176
SZ
525 e1000_init_script_state_82541(&adapter->hw, TRUE);
526 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
527
528 /* Copper options */
529 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
530 adapter->hw.phy.mdix = AUTO_ALL_MODES;
531 adapter->hw.phy.disable_polarity_correction = FALSE;
532 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
533 }
534
535 /* Set the frame limits assuming standard ethernet sized frames. */
536 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
537 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN;
984263bc 538
9c80d176
SZ
539 /* This controls when hardware reports transmit completion status. */
540 adapter->hw.mac.report_tx_early = 1;
984263bc 541
87307ba1 542 /*
9c80d176 543 * Create top level busdma tag
984263bc 544 */
9c80d176
SZ
545 error = bus_dma_tag_create(NULL, 1, 0,
546 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
547 NULL, NULL,
548 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
549 0, &adapter->parent_dtag);
550 if (error) {
551 device_printf(dev, "could not create top level DMA tag\n");
af82d4bb 552 goto fail;
9c80d176 553 }
af82d4bb 554
9c80d176
SZ
555 /*
556 * Allocate Transmit Descriptor ring
557 */
558 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
1eca7b82 559 EM_DBA_ALIGN);
87307ba1
SZ
560 error = em_dma_malloc(adapter, tsize, &adapter->txdma);
561 if (error) {
9c80d176 562 device_printf(dev, "Unable to allocate tx_desc memory\n");
af82d4bb 563 goto fail;
984263bc 564 }
9c80d176 565 adapter->tx_desc_base = adapter->txdma.dma_vaddr;
984263bc 566
9c80d176
SZ
567 /*
568 * Allocate Receive Descriptor ring
569 */
570 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
1eca7b82 571 EM_DBA_ALIGN);
87307ba1
SZ
572 error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
573 if (error) {
9ccd8c1f 574 device_printf(dev, "Unable to allocate rx_desc memory\n");
af82d4bb 575 goto fail;
984263bc 576 }
9c80d176
SZ
577 adapter->rx_desc_base = adapter->rxdma.dma_vaddr;
578
579 /* Make sure we have a good EEPROM before we read from it */
580 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
581 /*
582 * Some PCI-E parts fail the first check due to
583 * the link being in sleep state, call it again,
584 * if it fails a second time its a real issue.
585 */
586 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
587 device_printf(dev,
588 "The EEPROM Checksum Is Not Valid\n");
589 error = EIO;
590 goto fail;
591 }
592 }
984263bc
MD
593
594 /* Initialize the hardware */
9c80d176
SZ
595 error = em_hw_init(adapter);
596 if (error) {
f647ad3d 597 device_printf(dev, "Unable to initialize the hardware\n");
af82d4bb 598 goto fail;
984263bc
MD
599 }
600
601 /* Copy the permanent MAC address out of the EEPROM */
9c80d176
SZ
602 if (e1000_read_mac_addr(&adapter->hw) < 0) {
603 device_printf(dev, "EEPROM read error while reading MAC"
604 " address\n");
984263bc 605 error = EIO;
af82d4bb 606 goto fail;
984263bc 607 }
9c80d176 608 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) {
87307ba1 609 device_printf(dev, "Invalid MAC address\n");
984263bc 610 error = EIO;
af82d4bb 611 goto fail;
984263bc
MD
612 }
613
9c80d176
SZ
614 /* Allocate transmit descriptors and buffers */
615 error = em_create_tx_ring(adapter);
616 if (error) {
617 device_printf(dev, "Could not setup transmit structures\n");
618 goto fail;
619 }
620
621 /* Allocate receive descriptors and buffers */
622 error = em_create_rx_ring(adapter);
623 if (error) {
624 device_printf(dev, "Could not setup receive structures\n");
625 goto fail;
626 }
627
628 /* Manually turn off all interrupts */
629 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
630
984263bc 631 /* Setup OS specific network interface */
9c80d176
SZ
632 em_setup_ifp(adapter);
633
634 /* Add sysctl tree, must after em_setup_ifp() */
635 em_add_sysctl(adapter);
984263bc
MD
636
637 /* Initialize statistics */
9c80d176
SZ
638 em_update_stats(adapter);
639
640 adapter->hw.mac.get_link_status = 1;
87307ba1 641 em_update_link_status(adapter);
984263bc 642
1eca7b82 643 /* Indicate SOL/IDER usage */
9c80d176
SZ
644 if (e1000_check_reset_block(&adapter->hw)) {
645 device_printf(dev,
646 "PHY reset is blocked due to SOL/IDER session.\n");
647 }
648
649 /* Determine if we have to control management hardware */
650 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
651
652 /*
653 * Setup Wake-on-Lan
654 */
655 switch (adapter->hw.mac.type) {
656 case e1000_82542:
657 case e1000_82543:
658 break;
659
660 case e1000_82546:
661 case e1000_82546_rev_3:
662 case e1000_82571:
663 case e1000_80003es2lan:
664 if (adapter->hw.bus.func == 1) {
665 e1000_read_nvm(&adapter->hw,
666 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
667 } else {
668 e1000_read_nvm(&adapter->hw,
669 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
670 }
671 eeprom_data &= EM_EEPROM_APME;
672 break;
673
674 default:
675 /* APME bit in EEPROM is mapped to WUC.APME */
676 eeprom_data =
677 E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME;
678 break;
679 }
680 if (eeprom_data)
681 adapter->wol = E1000_WUFC_MAG;
682 /*
683 * We have the eeprom settings, now apply the special cases
684 * where the eeprom may be wrong or the board won't support
685 * wake on lan on a particular port
686 */
687 device_id = pci_get_device(dev);
688 switch (device_id) {
689 case E1000_DEV_ID_82546GB_PCIE:
690 adapter->wol = 0;
691 break;
692
693 case E1000_DEV_ID_82546EB_FIBER:
694 case E1000_DEV_ID_82546GB_FIBER:
695 case E1000_DEV_ID_82571EB_FIBER:
696 /*
697 * Wake events only supported on port A for dual fiber
698 * regardless of eeprom setting
699 */
700 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
701 E1000_STATUS_FUNC_1)
702 adapter->wol = 0;
703 break;
704
705 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
706 case E1000_DEV_ID_82571EB_QUAD_COPPER:
707 case E1000_DEV_ID_82571EB_QUAD_FIBER:
708 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
709 /* if quad port adapter, disable WoL on all but port A */
710 if (em_global_quad_port_a != 0)
711 adapter->wol = 0;
712 /* Reset for multiple quad port adapters */
713 if (++em_global_quad_port_a == 4)
714 em_global_quad_port_a = 0;
715 break;
716 }
717
718 /* XXX disable wol */
719 adapter->wol = 0;
720
721 /* Do we need workaround for 82544 PCI-X adapter? */
722 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
723 adapter->hw.mac.type == e1000_82544)
f647ad3d 724 adapter->pcix_82544 = TRUE;
87307ba1 725 else
f647ad3d 726 adapter->pcix_82544 = FALSE;
af82d4bb 727
9c80d176
SZ
728 if (adapter->pcix_82544) {
729 /*
730 * 82544 on PCI-X may split one TX segment
731 * into two TX descs, so we double its number
732 * of spare TX desc here.
733 */
734 adapter->spare_tx_desc = 2 * EM_TX_SPARE;
735 } else {
736 adapter->spare_tx_desc = EM_TX_SPARE;
737 }
738
9f60d74b
SZ
739 /*
740 * Keep following relationship between spare_tx_desc, oact_tx_desc
741 * and tx_int_nsegs:
742 * (spare_tx_desc + EM_TX_RESERVED) <=
743 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs
744 */
745 adapter->oact_tx_desc = adapter->num_tx_desc / 8;
746 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX)
747 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX;
748 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED)
749 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED;
750
751 adapter->tx_int_nsegs = adapter->num_tx_desc / 16;
752 if (adapter->tx_int_nsegs < adapter->oact_tx_desc)
753 adapter->tx_int_nsegs = adapter->oact_tx_desc;
754
9c80d176
SZ
755 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE,
756 em_intr, adapter, &adapter->intr_tag,
757 ifp->if_serializer);
af82d4bb 758 if (error) {
9c80d176
SZ
759 device_printf(dev, "Failed to register interrupt handler");
760 ether_ifdetach(&adapter->arpcom.ac_if);
af82d4bb
JS
761 goto fail;
762 }
763
9c80d176 764 ifp->if_cpuid = ithread_cpuid(rman_get_start(adapter->intr_res));
9db4b353 765 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
9c80d176 766 return (0);
af82d4bb
JS
767fail:
768 em_detach(dev);
9c80d176 769 return (error);
984263bc
MD
770}
771
984263bc
MD
772static int
773em_detach(device_t dev)
774{
78195a76 775 struct adapter *adapter = device_get_softc(dev);
984263bc 776
af82d4bb 777 if (device_is_attached(dev)) {
9c80d176 778 struct ifnet *ifp = &adapter->arpcom.ac_if;
cdf89432
SZ
779
780 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 781
cdf89432 782 adapter->in_detach = 1;
af82d4bb 783 em_stop(adapter);
9c80d176
SZ
784
785 e1000_phy_hw_reset(&adapter->hw);
786
787 em_rel_mgmt(adapter);
788
789 if ((adapter->hw.mac.type == e1000_82573 ||
790 adapter->hw.mac.type == e1000_ich8lan ||
791 adapter->hw.mac.type == e1000_ich10lan ||
792 adapter->hw.mac.type == e1000_ich9lan) &&
793 e1000_check_mng_mode(&adapter->hw))
794 em_rel_hw_control(adapter);
795
796 if (adapter->wol) {
797 E1000_WRITE_REG(&adapter->hw, E1000_WUC,
798 E1000_WUC_PME_EN);
799 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
800 em_enable_wol(dev);
801 }
802
803 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag);
804
cdf89432
SZ
805 lwkt_serialize_exit(ifp->if_serializer);
806
807 ether_ifdetach(ifp);
7ea52455 808 }
cdf89432
SZ
809 bus_generic_detach(dev);
810
9c80d176
SZ
811 em_free_pci_res(adapter);
812
813 em_destroy_tx_ring(adapter, adapter->num_tx_desc);
814 em_destroy_rx_ring(adapter, adapter->num_rx_desc);
af82d4bb 815
984263bc 816 /* Free Transmit Descriptor ring */
9c80d176 817 if (adapter->tx_desc_base)
9ccd8c1f 818 em_dma_free(adapter, &adapter->txdma);
984263bc 819
984263bc 820 /* Free Receive Descriptor ring */
9c80d176 821 if (adapter->rx_desc_base)
9ccd8c1f 822 em_dma_free(adapter, &adapter->rxdma);
9c80d176
SZ
823
824 /* Free top level busdma tag */
825 if (adapter->parent_dtag != NULL)
826 bus_dma_tag_destroy(adapter->parent_dtag);
984263bc 827
1eca7b82 828 /* Free sysctl tree */
9c80d176 829 if (adapter->sysctl_tree != NULL)
1eca7b82 830 sysctl_ctx_free(&adapter->sysctl_ctx);
984263bc 831
87307ba1 832 return (0);
984263bc
MD
833}
834
984263bc
MD
835static int
836em_shutdown(device_t dev)
837{
9c80d176 838 return em_suspend(dev);
87307ba1
SZ
839}
840
87307ba1
SZ
841static int
842em_suspend(device_t dev)
843{
844 struct adapter *adapter = device_get_softc(dev);
9c80d176 845 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
846
847 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 848
87307ba1 849 em_stop(adapter);
9c80d176
SZ
850
851 em_rel_mgmt(adapter);
852
853 if ((adapter->hw.mac.type == e1000_82573 ||
854 adapter->hw.mac.type == e1000_ich8lan ||
855 adapter->hw.mac.type == e1000_ich10lan ||
856 adapter->hw.mac.type == e1000_ich9lan) &&
857 e1000_check_mng_mode(&adapter->hw))
858 em_rel_hw_control(adapter);
859
860 if (adapter->wol) {
861 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
862 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
863 em_enable_wol(dev);
864 }
865
87307ba1 866 lwkt_serialize_exit(ifp->if_serializer);
9c80d176
SZ
867
868 return bus_generic_suspend(dev);
87307ba1
SZ
869}
870
871static int
872em_resume(device_t dev)
873{
874 struct adapter *adapter = device_get_softc(dev);
9c80d176 875 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
876
877 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 878
87307ba1 879 em_init(adapter);
9c80d176 880 em_get_mgmt(adapter);
9db4b353 881 if_devstart(ifp);
9c80d176 882
87307ba1
SZ
883 lwkt_serialize_exit(ifp->if_serializer);
884
885 return bus_generic_resume(dev);
984263bc
MD
886}
887
984263bc
MD
888static void
889em_start(struct ifnet *ifp)
890{
f647ad3d 891 struct adapter *adapter = ifp->if_softc;
9c80d176 892 struct mbuf *m_head;
984263bc 893
1eca7b82 894 ASSERT_SERIALIZED(ifp->if_serializer);
78195a76 895
87307ba1
SZ
896 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
897 return;
9c80d176 898
9db4b353
SZ
899 if (!adapter->link_active) {
900 ifq_purge(&ifp->if_snd);
f647ad3d 901 return;
9db4b353 902 }
9c80d176 903
e26dc3e9 904 while (!ifq_is_empty(&ifp->if_snd)) {
9f60d74b
SZ
905 /* Now do we at least have a minimal? */
906 if (EM_IS_OACTIVE(adapter)) {
907 em_tx_collect(adapter);
9c80d176 908 if (EM_IS_OACTIVE(adapter)) {
9c80d176 909 ifp->if_flags |= IFF_OACTIVE;
9f60d74b 910 adapter->no_tx_desc_avail1++;
9c80d176
SZ
911 break;
912 }
913 }
914
915 logif(pkt_txqueue);
9db4b353 916 m_head = ifq_dequeue(&ifp->if_snd, NULL);
f647ad3d
JS
917 if (m_head == NULL)
918 break;
984263bc 919
9c80d176 920 if (em_encap(adapter, &m_head)) {
002b3a05 921 ifp->if_oerrors++;
9f60d74b
SZ
922 em_tx_collect(adapter);
923 continue;
f647ad3d 924 }
984263bc
MD
925
926 /* Send a copy of the frame to the BPF listener */
b637f170 927 ETHER_BPF_MTAP(ifp, m_head);
87307ba1
SZ
928
929 /* Set timeout in case hardware has problems transmitting. */
930 ifp->if_timer = EM_TX_TIMEOUT;
f647ad3d 931 }
984263bc
MD
932}
933
984263bc 934static int
bd4539cc 935em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc 936{
f647ad3d 937 struct adapter *adapter = ifp->if_softc;
9c80d176 938 struct ifreq *ifr = (struct ifreq *)data;
1eca7b82 939 uint16_t eeprom_data = 0;
9c80d176
SZ
940 int max_frame_size, mask, reinit;
941 int error = 0;
0d366ee7 942
f647ad3d 943 if (adapter->in_detach)
9c80d176
SZ
944 return (error);
945
946 ASSERT_SERIALIZED(ifp->if_serializer);
0d366ee7 947
984263bc 948 switch (command) {
984263bc 949 case SIOCSIFMTU:
9c80d176
SZ
950 switch (adapter->hw.mac.type) {
951 case e1000_82573:
1eca7b82
SZ
952 /*
953 * 82573 only supports jumbo frames
954 * if ASPM is disabled.
955 */
9c80d176
SZ
956 e1000_read_nvm(&adapter->hw,
957 NVM_INIT_3GIO_3, 1, &eeprom_data);
958 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1eca7b82
SZ
959 max_frame_size = ETHER_MAX_LEN;
960 break;
961 }
9c80d176
SZ
962 /* FALL THROUGH */
963
964 /* Limit Jumbo Frame size */
965 case e1000_82571:
966 case e1000_82572:
967 case e1000_ich9lan:
968 case e1000_ich10lan:
969 case e1000_82574:
970 case e1000_80003es2lan:
1eca7b82 971 max_frame_size = 9234;
7ea52455 972 break;
9c80d176
SZ
973
974 /* Adapters that do not support jumbo frames */
975 case e1000_82542:
976 case e1000_ich8lan:
7ea52455
SZ
977 max_frame_size = ETHER_MAX_LEN;
978 break;
9c80d176 979
7ea52455
SZ
980 default:
981 max_frame_size = MAX_JUMBO_FRAME_SIZE;
982 break;
983 }
9c80d176
SZ
984 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
985 ETHER_CRC_LEN) {
984263bc 986 error = EINVAL;
9c80d176 987 break;
984263bc 988 }
9c80d176
SZ
989
990 ifp->if_mtu = ifr->ifr_mtu;
991 adapter->max_frame_size =
992 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
993
994 if (ifp->if_flags & IFF_RUNNING)
995 em_init(adapter);
984263bc 996 break;
9c80d176 997
984263bc 998 case SIOCSIFFLAGS:
984263bc 999 if (ifp->if_flags & IFF_UP) {
9c80d176
SZ
1000 if ((ifp->if_flags & IFF_RUNNING)) {
1001 if ((ifp->if_flags ^ adapter->if_flags) &
1002 (IFF_PROMISC | IFF_ALLMULTI)) {
1003 em_disable_promisc(adapter);
1004 em_set_promisc(adapter);
1005 }
1006 } else {
78195a76 1007 em_init(adapter);
87307ba1 1008 }
9c80d176
SZ
1009 } else if (ifp->if_flags & IFF_RUNNING) {
1010 em_stop(adapter);
984263bc 1011 }
87307ba1 1012 adapter->if_flags = ifp->if_flags;
984263bc 1013 break;
9c80d176 1014
984263bc
MD
1015 case SIOCADDMULTI:
1016 case SIOCDELMULTI:
984263bc
MD
1017 if (ifp->if_flags & IFF_RUNNING) {
1018 em_disable_intr(adapter);
1019 em_set_multi(adapter);
9c80d176
SZ
1020 if (adapter->hw.mac.type == e1000_82542 &&
1021 adapter->hw.revision_id == E1000_REVISION_2)
1022 em_init_rx_unit(adapter);
1eca7b82 1023#ifdef DEVICE_POLLING
9c80d176 1024 if (!(ifp->if_flags & IFF_POLLING))
1eca7b82 1025#endif
9c80d176 1026 em_enable_intr(adapter);
984263bc
MD
1027 }
1028 break;
9c80d176 1029
984263bc 1030 case SIOCSIFMEDIA:
87307ba1 1031 /* Check SOL/IDER usage */
9c80d176
SZ
1032 if (e1000_check_reset_block(&adapter->hw)) {
1033 device_printf(adapter->dev, "Media change is"
1034 " blocked due to SOL/IDER session.\n");
87307ba1
SZ
1035 break;
1036 }
9c80d176
SZ
1037 /* FALL THROUGH */
1038
984263bc 1039 case SIOCGIFMEDIA:
984263bc
MD
1040 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1041 break;
9c80d176 1042
984263bc 1043 case SIOCSIFCAP:
9c80d176 1044 reinit = 0;
984263bc
MD
1045 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1046 if (mask & IFCAP_HWCSUM) {
9c80d176 1047 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
1eca7b82 1048 reinit = 1;
984263bc 1049 }
1eca7b82
SZ
1050 if (mask & IFCAP_VLAN_HWTAGGING) {
1051 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1052 reinit = 1;
1053 }
9c80d176 1054 if (reinit && (ifp->if_flags & IFF_RUNNING))
1eca7b82 1055 em_init(adapter);
984263bc 1056 break;
9c80d176 1057
984263bc 1058 default:
1eca7b82
SZ
1059 error = ether_ioctl(ifp, command, data);
1060 break;
984263bc 1061 }
87307ba1 1062 return (error);
984263bc
MD
1063}
1064
984263bc
MD
1065static void
1066em_watchdog(struct ifnet *ifp)
1067{
1eca7b82 1068 struct adapter *adapter = ifp->if_softc;
984263bc 1069
9c80d176
SZ
1070 ASSERT_SERIALIZED(ifp->if_serializer);
1071
1072 /*
1073 * The timer is set to 5 every time start queues a packet.
1074 * Then txeof keeps resetting it as long as it cleans at
1075 * least one descriptor.
1076 * Finally, anytime all descriptors are clean the timer is
1077 * set to 0.
1078 */
1079
9f60d74b
SZ
1080 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1081 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) {
1082 /*
1083 * If we reach here, all TX jobs are completed and
1084 * the TX engine should have been idled for some time.
1085 * We don't need to call if_devstart() here.
1086 */
1087 ifp->if_flags &= ~IFF_OACTIVE;
1088 ifp->if_timer = 0;
1089 return;
1090 }
1091
1eca7b82
SZ
1092 /*
1093 * If we are in this routine because of pause frames, then
984263bc
MD
1094 * don't reset the hardware.
1095 */
9c80d176
SZ
1096 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1097 E1000_STATUS_TXOFF) {
984263bc
MD
1098 ifp->if_timer = EM_TX_TIMEOUT;
1099 return;
1100 }
1101
9c80d176 1102 if (e1000_check_for_link(&adapter->hw) == 0)
f647ad3d 1103 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc 1104
9c80d176
SZ
1105 ifp->if_oerrors++;
1106 adapter->watchdog_events++;
1107
984263bc
MD
1108 em_init(adapter);
1109
9c80d176
SZ
1110 if (!ifq_is_empty(&ifp->if_snd))
1111 if_devstart(ifp);
984263bc
MD
1112}
1113
984263bc 1114static void
9c80d176 1115em_init(void *xsc)
984263bc 1116{
9c80d176
SZ
1117 struct adapter *adapter = xsc;
1118 struct ifnet *ifp = &adapter->arpcom.ac_if;
1119 device_t dev = adapter->dev;
eac00e59 1120 uint32_t pba;
984263bc 1121
87307ba1
SZ
1122 ASSERT_SERIALIZED(ifp->if_serializer);
1123
984263bc
MD
1124 em_stop(adapter);
1125
eac00e59
SZ
1126 /*
1127 * Packet Buffer Allocation (PBA)
1128 * Writing PBA sets the receive portion of the buffer
1129 * the remainder is used for the transmit buffer.
1eca7b82
SZ
1130 *
1131 * Devices before the 82547 had a Packet Buffer of 64K.
1132 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1133 * After the 82547 the buffer was reduced to 40K.
1134 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1135 * Note: default does not leave enough room for Jumbo Frame >10k.
eac00e59 1136 */
9c80d176
SZ
1137 switch (adapter->hw.mac.type) {
1138 case e1000_82547:
1139 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1140 if (adapter->max_frame_size > 8192)
eac00e59 1141 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
7ea52455
SZ
1142 else
1143 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
eac00e59
SZ
1144 adapter->tx_fifo_head = 0;
1145 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1146 adapter->tx_fifo_size =
9c80d176 1147 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
7ea52455 1148 break;
9c80d176 1149
87307ba1 1150 /* Total Packet Buffer on these is 48K */
9c80d176
SZ
1151 case e1000_82571:
1152 case e1000_82572:
1153 case e1000_80003es2lan:
7ea52455
SZ
1154 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1155 break;
9c80d176
SZ
1156
1157 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
7ea52455
SZ
1158 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1159 break;
9c80d176
SZ
1160
1161 case e1000_82574:
1162 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1eca7b82 1163 break;
9c80d176
SZ
1164
1165 case e1000_ich9lan:
1166 case e1000_ich10lan:
1167#define E1000_PBA_10K 0x000A
b0ff1d56
MS
1168 pba = E1000_PBA_10K;
1169 break;
9c80d176
SZ
1170
1171 case e1000_ich8lan:
1172 pba = E1000_PBA_8K;
1173 break;
1174
7ea52455
SZ
1175 default:
1176 /* Devices before 82547 had a Packet Buffer of 64K. */
9c80d176 1177 if (adapter->max_frame_size > 8192)
7ea52455
SZ
1178 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1179 else
1180 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
eac00e59 1181 }
7ea52455 1182
9c80d176
SZ
1183 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
1184
0d366ee7 1185 /* Get the latest mac address, User can use a LAA */
9c80d176
SZ
1186 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN);
1187
1188 /* Put the address into the Receive Address Array */
1189 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1190
1191 /*
1192 * With the 82571 adapter, RAR[0] may be overwritten
1193 * when the other port is reset, we make a duplicate
1194 * in RAR[14] for that eventuality, this assures
1195 * the interface continues to function.
1196 */
1197 if (adapter->hw.mac.type == e1000_82571) {
1198 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1199 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1200 E1000_RAR_ENTRIES - 1);
1201 }
0d366ee7 1202
984263bc 1203 /* Initialize the hardware */
9c80d176
SZ
1204 if (em_hw_init(adapter)) {
1205 device_printf(dev, "Unable to initialize the hardware\n");
1206 /* XXX em_stop()? */
984263bc
MD
1207 return;
1208 }
87307ba1 1209 em_update_link_status(adapter);
984263bc 1210
9c80d176
SZ
1211 /* Setup VLAN support, basic and offload if available */
1212 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
984263bc 1213
9c80d176
SZ
1214 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1215 uint32_t ctrl;
1216
1217 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1218 ctrl |= E1000_CTRL_VME;
1219 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
87307ba1
SZ
1220 }
1221
9c80d176
SZ
1222 /* Set hardware offload abilities */
1223 if (ifp->if_capenable & IFCAP_TXCSUM)
1224 ifp->if_hwassist = EM_CSUM_FEATURES;
1225 else
1226 ifp->if_hwassist = 0;
1227
1228 /* Configure for OS presence */
1229 em_get_mgmt(adapter);
1230
984263bc 1231 /* Prepare transmit descriptors and buffers */
9c80d176
SZ
1232 em_init_tx_ring(adapter);
1233 em_init_tx_unit(adapter);
984263bc
MD
1234
1235 /* Setup Multicast table */
1236 em_set_multi(adapter);
1237
1238 /* Prepare receive descriptors and buffers */
9c80d176
SZ
1239 if (em_init_rx_ring(adapter)) {
1240 device_printf(dev, "Could not setup receive structures\n");
984263bc 1241 em_stop(adapter);
984263bc
MD
1242 return;
1243 }
9c80d176 1244 em_init_rx_unit(adapter);
7ea52455 1245
87307ba1 1246 /* Don't lose promiscuous settings */
0d366ee7 1247 em_set_promisc(adapter);
984263bc 1248
984263bc
MD
1249 ifp->if_flags |= IFF_RUNNING;
1250 ifp->if_flags &= ~IFF_OACTIVE;
1251
9c80d176
SZ
1252 callout_reset(&adapter->timer, hz, em_timer, adapter);
1253 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1254
1255 /* MSI/X configuration for 82574 */
1256 if (adapter->hw.mac.type == e1000_82574) {
1257 int tmp;
1258
1259 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1260 tmp |= E1000_CTRL_EXT_PBA_CLR;
1261 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1262 /*
1263 * Set the IVAR - interrupt vector routing.
1264 * Each nibble represents a vector, high bit
1265 * is enable, other 3 bits are the MSIX table
1266 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1267 * Link (other) to 2, hence the magic number.
1268 */
1269 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1270 }
1eca7b82
SZ
1271
1272#ifdef DEVICE_POLLING
9c80d176
SZ
1273 /*
1274 * Only enable interrupts if we are not polling, make sure
1275 * they are off otherwise.
1276 */
1eca7b82
SZ
1277 if (ifp->if_flags & IFF_POLLING)
1278 em_disable_intr(adapter);
1279 else
9c80d176
SZ
1280#endif /* DEVICE_POLLING */
1281 em_enable_intr(adapter);
0d366ee7
MD
1282
1283 /* Don't reset the phy next time init gets called */
9c80d176 1284 adapter->hw.phy.reset_disable = TRUE;
984263bc
MD
1285}
1286
984263bc 1287#ifdef DEVICE_POLLING
f647ad3d
JS
1288
1289static void
984263bc
MD
1290em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1291{
f647ad3d
JS
1292 struct adapter *adapter = ifp->if_softc;
1293 uint32_t reg_icr;
984263bc 1294
78195a76
MD
1295 ASSERT_SERIALIZED(ifp->if_serializer);
1296
9c80d176 1297 switch (cmd) {
9c095379
MD
1298 case POLL_REGISTER:
1299 em_disable_intr(adapter);
1300 break;
9c80d176 1301
9c095379 1302 case POLL_DEREGISTER:
f647ad3d 1303 em_enable_intr(adapter);
9c095379 1304 break;
9c80d176 1305
9c095379 1306 case POLL_AND_CHECK_STATUS:
9c80d176 1307 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
f647ad3d 1308 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
9ccd8c1f 1309 callout_stop(&adapter->timer);
9c80d176 1310 adapter->hw.mac.get_link_status = 1;
87307ba1 1311 em_update_link_status(adapter);
9c80d176 1312 callout_reset(&adapter->timer, hz, em_timer, adapter);
f647ad3d 1313 }
9c80d176 1314 /* FALL THROUGH */
9c095379
MD
1315 case POLL_ONLY:
1316 if (ifp->if_flags & IFF_RUNNING) {
87307ba1
SZ
1317 em_rxeof(adapter, count);
1318 em_txeof(adapter);
1eca7b82 1319
9c095379 1320 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1321 if_devstart(ifp);
9c095379
MD
1322 }
1323 break;
f647ad3d 1324 }
984263bc 1325}
9c095379 1326
984263bc
MD
1327#endif /* DEVICE_POLLING */
1328
984263bc 1329static void
9c80d176 1330em_intr(void *xsc)
984263bc 1331{
9c80d176
SZ
1332 struct adapter *adapter = xsc;
1333 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1334 uint32_t reg_icr;
984263bc 1335
07855a48 1336 logif(intr_beg);
78195a76
MD
1337 ASSERT_SERIALIZED(ifp->if_serializer);
1338
9c80d176
SZ
1339 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1340
1341 if ((adapter->hw.mac.type >= e1000_82571 &&
1eca7b82
SZ
1342 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1343 reg_icr == 0) {
07855a48 1344 logif(intr_end);
984263bc 1345 return;
07855a48 1346 }
984263bc 1347
87307ba1 1348 /*
9c80d176
SZ
1349 * XXX: some laptops trigger several spurious interrupts
1350 * on em(4) when in the resume cycle. The ICR register
1351 * reports all-ones value in this case. Processing such
1352 * interrupts would lead to a freeze. I don't know why.
87307ba1
SZ
1353 */
1354 if (reg_icr == 0xffffffff) {
1355 logif(intr_end);
1356 return;
984263bc
MD
1357 }
1358
79938e61 1359 if (ifp->if_flags & IFF_RUNNING) {
9f60d74b
SZ
1360 if (reg_icr &
1361 (E1000_IMS_RXT0 | E1000_IMS_RXDMT0 | E1000_ICR_RXO))
1362 em_rxeof(adapter, -1);
1363 if (reg_icr & E1000_IMS_TXDW) {
1364 em_txeof(adapter);
1365 if (!ifq_is_empty(&ifp->if_snd))
1366 if_devstart(ifp);
1367 }
f647ad3d 1368 }
984263bc 1369
87307ba1
SZ
1370 /* Link status change */
1371 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1372 callout_stop(&adapter->timer);
9c80d176 1373 adapter->hw.mac.get_link_status = 1;
87307ba1 1374 em_update_link_status(adapter);
9c80d176
SZ
1375
1376 /* Deal with TX cruft when link lost */
1377 em_tx_purge(adapter);
1378
1379 callout_reset(&adapter->timer, hz, em_timer, adapter);
87307ba1
SZ
1380 }
1381
1382 if (reg_icr & E1000_ICR_RXO)
1383 adapter->rx_overruns++;
1384
07855a48 1385 logif(intr_end);
984263bc
MD
1386}
1387
984263bc
MD
1388static void
1389em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1390{
87307ba1 1391 struct adapter *adapter = ifp->if_softc;
1eca7b82 1392 u_char fiber_type = IFM_1000_SX;
984263bc 1393
78195a76
MD
1394 ASSERT_SERIALIZED(ifp->if_serializer);
1395
87307ba1 1396 em_update_link_status(adapter);
984263bc
MD
1397
1398 ifmr->ifm_status = IFM_AVALID;
1399 ifmr->ifm_active = IFM_ETHER;
1400
1401 if (!adapter->link_active)
1402 return;
1403
1404 ifmr->ifm_status |= IFM_ACTIVE;
1405
9c80d176
SZ
1406 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
1407 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1408 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
1409 fiber_type = IFM_1000_LX;
1410 ifmr->ifm_active |= fiber_type | IFM_FDX;
984263bc
MD
1411 } else {
1412 switch (adapter->link_speed) {
1413 case 10:
1414 ifmr->ifm_active |= IFM_10_T;
1415 break;
1416 case 100:
1417 ifmr->ifm_active |= IFM_100_TX;
1418 break;
9c80d176 1419
984263bc 1420 case 1000:
7f259627 1421 ifmr->ifm_active |= IFM_1000_T;
984263bc
MD
1422 break;
1423 }
1424 if (adapter->link_duplex == FULL_DUPLEX)
1425 ifmr->ifm_active |= IFM_FDX;
1426 else
1427 ifmr->ifm_active |= IFM_HDX;
1428 }
984263bc
MD
1429}
1430
984263bc
MD
1431static int
1432em_media_change(struct ifnet *ifp)
1433{
87307ba1
SZ
1434 struct adapter *adapter = ifp->if_softc;
1435 struct ifmedia *ifm = &adapter->media;
984263bc 1436
78195a76 1437 ASSERT_SERIALIZED(ifp->if_serializer);
9c095379 1438
87307ba1
SZ
1439 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1440 return (EINVAL);
1441
984263bc
MD
1442 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1443 case IFM_AUTO:
9c80d176
SZ
1444 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1445 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
984263bc 1446 break;
9c80d176 1447
1eca7b82 1448 case IFM_1000_LX:
984263bc 1449 case IFM_1000_SX:
7f259627 1450 case IFM_1000_T:
9c80d176
SZ
1451 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1452 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
984263bc 1453 break;
9c80d176 1454
984263bc 1455 case IFM_100_TX:
9c80d176
SZ
1456 adapter->hw.mac.autoneg = FALSE;
1457 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1458 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1459 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
984263bc 1460 else
9c80d176 1461 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
984263bc 1462 break;
9c80d176 1463
984263bc 1464 case IFM_10_T:
9c80d176
SZ
1465 adapter->hw.mac.autoneg = FALSE;
1466 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1467 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1468 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
984263bc 1469 else
9c80d176 1470 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
984263bc 1471 break;
9c80d176 1472
984263bc 1473 default:
f647ad3d 1474 if_printf(ifp, "Unsupported media type\n");
9c80d176 1475 break;
984263bc 1476 }
9c80d176 1477
f647ad3d 1478 /*
9c80d176 1479 * As the speed/duplex settings my have changed we need to
f647ad3d
JS
1480 * reset the PHY.
1481 */
9c80d176 1482 adapter->hw.phy.reset_disable = FALSE;
984263bc 1483
78195a76 1484 em_init(adapter);
984263bc 1485
9c80d176 1486 return (0);
9ccd8c1f
JS
1487}
1488
984263bc 1489static int
9c80d176 1490em_encap(struct adapter *adapter, struct mbuf **m_headp)
9ccd8c1f 1491{
9c80d176 1492 bus_dma_segment_t segs[EM_MAX_SCATTER];
1eca7b82 1493 bus_dmamap_t map;
9c80d176
SZ
1494 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1495 struct e1000_tx_desc *ctxd = NULL;
002b3a05 1496 struct mbuf *m_head = *m_headp;
9f60d74b 1497 uint32_t txd_upper, txd_lower, txd_used, cmd = 0;
9c80d176 1498 int maxsegs, nsegs, i, j, first, last = 0, error;
984263bc 1499
002b3a05
SZ
1500 if (__predict_false(m_head->m_len < EM_TXCSUM_MINHL) &&
1501 (m_head->m_flags & EM_CSUM_FEATURES)) {
1502 /*
1503 * Make sure that ethernet header and ip.ip_hl are in
1504 * contiguous memory, since if TXCSUM is enabled, later
1505 * TX context descriptor's setup need to access ip.ip_hl.
1506 */
1507 error = em_txcsum_pullup(adapter, m_headp);
1508 if (error) {
1509 KKASSERT(*m_headp == NULL);
1510 return error;
1511 }
1512 m_head = *m_headp;
1513 }
1514
9c80d176
SZ
1515 txd_upper = txd_lower = 0;
1516 txd_used = 0;
87307ba1
SZ
1517
1518 /*
9c80d176
SZ
1519 * Capture the first descriptor index, this descriptor
1520 * will have the index of the EOP which is the only one
1521 * that now gets a DONE bit writeback.
87307ba1 1522 */
9c80d176
SZ
1523 first = adapter->next_avail_tx_desc;
1524 tx_buffer = &adapter->tx_buffer_area[first];
1525 tx_buffer_mapped = tx_buffer;
1526 map = tx_buffer->map;
87307ba1 1527
9c80d176
SZ
1528 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED;
1529 KASSERT(maxsegs >= adapter->spare_tx_desc,
1530 ("not enough spare TX desc\n"));
1531 if (adapter->pcix_82544) {
1532 /* Half it; see the comment in em_attach() */
1533 maxsegs >>= 1;
9ccd8c1f 1534 }
9c80d176
SZ
1535 if (maxsegs > EM_MAX_SCATTER)
1536 maxsegs = EM_MAX_SCATTER;
984263bc 1537
9c80d176
SZ
1538 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp,
1539 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1540 if (error) {
1541 if (error == ENOBUFS)
1542 adapter->mbuf_alloc_failed++;
1543 else
1544 adapter->no_tx_dma_setup++;
984263bc 1545
9c80d176
SZ
1546 m_freem(*m_headp);
1547 *m_headp = NULL;
1548 return error;
7ea52455 1549 }
9c80d176 1550 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
984263bc 1551
9c80d176 1552 m_head = *m_headp;
9f60d74b 1553 adapter->tx_nsegs += nsegs;
9c80d176 1554
002b3a05 1555 if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) {
9c80d176 1556 /* TX csum offloading will consume one TX desc */
9f60d74b
SZ
1557 adapter->tx_nsegs += em_txcsum(adapter, m_head,
1558 &txd_upper, &txd_lower);
9c80d176 1559 }
984263bc 1560 i = adapter->next_avail_tx_desc;
87307ba1
SZ
1561
1562 /* Set up our transmit descriptors */
9c80d176 1563 for (j = 0; j < nsegs; j++) {
9ccd8c1f
JS
1564 /* If adapter is 82544 and on PCIX bus */
1565 if(adapter->pcix_82544) {
87307ba1
SZ
1566 DESC_ARRAY desc_array;
1567 uint32_t array_elements, counter;
1568
9c80d176 1569 /*
f647ad3d
JS
1570 * Check the Address and Length combination and
1571 * split the data accordingly
9ccd8c1f 1572 */
9c80d176
SZ
1573 array_elements = em_82544_fill_desc(segs[j].ds_addr,
1574 segs[j].ds_len, &desc_array);
9ccd8c1f 1575 for (counter = 0; counter < array_elements; counter++) {
9c80d176
SZ
1576 KKASSERT(txd_used < adapter->num_tx_desc_avail);
1577
9ccd8c1f 1578 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176
SZ
1579 ctxd = &adapter->tx_desc_base[i];
1580
1581 ctxd->buffer_addr = htole64(
1582 desc_array.descriptor[counter].address);
1583 ctxd->lower.data = htole32(
2af74b85 1584 E1000_TXD_CMD_IFCS | txd_lower |
9c80d176
SZ
1585 desc_array.descriptor[counter].length);
1586 ctxd->upper.data = htole32(txd_upper);
87307ba1
SZ
1587
1588 last = i;
9ccd8c1f
JS
1589 if (++i == adapter->num_tx_desc)
1590 i = 0;
1591
1592 tx_buffer->m_head = NULL;
1593 txd_used++;
9c80d176 1594 }
9ccd8c1f 1595 } else {
0d366ee7 1596 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176 1597 ctxd = &adapter->tx_desc_base[i];
9ccd8c1f 1598
9c80d176 1599 ctxd->buffer_addr = htole64(segs[j].ds_addr);
2af74b85 1600 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
9c80d176
SZ
1601 txd_lower | segs[j].ds_len);
1602 ctxd->upper.data = htole32(txd_upper);
984263bc 1603
87307ba1 1604 last = i;
0d366ee7
MD
1605 if (++i == adapter->num_tx_desc)
1606 i = 0;
984263bc 1607
0d366ee7 1608 tx_buffer->m_head = NULL;
0d366ee7 1609 }
984263bc 1610 }
9ccd8c1f 1611
984263bc 1612 adapter->next_avail_tx_desc = i;
9c80d176
SZ
1613 if (adapter->pcix_82544) {
1614 KKASSERT(adapter->num_tx_desc_avail > txd_used);
9ccd8c1f 1615 adapter->num_tx_desc_avail -= txd_used;
9c80d176
SZ
1616 } else {
1617 KKASSERT(adapter->num_tx_desc_avail > nsegs);
1618 adapter->num_tx_desc_avail -= nsegs;
1619 }
984263bc 1620
9c80d176 1621 /* Handle VLAN tag */
83790f85 1622 if (m_head->m_flags & M_VLANTAG) {
9c80d176
SZ
1623 /* Set the vlan id. */
1624 ctxd->upper.fields.special =
1625 htole16(m_head->m_pkthdr.ether_vlantag);
9ccd8c1f 1626
f647ad3d 1627 /* Tell hardware to add tag */
9c80d176 1628 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
f647ad3d 1629 }
984263bc
MD
1630
1631 tx_buffer->m_head = m_head;
9c80d176 1632 tx_buffer_mapped->map = tx_buffer->map;
1eca7b82 1633 tx_buffer->map = map;
9ccd8c1f 1634
9f60d74b
SZ
1635 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) {
1636 adapter->tx_nsegs = 0;
1637 cmd = E1000_TXD_CMD_RS;
1638
1639 adapter->tx_dd[adapter->tx_dd_tail] = last;
1640 EM_INC_TXDD_IDX(adapter->tx_dd_tail);
1641 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head);
1642 }
1643
9ccd8c1f 1644 /*
984263bc 1645 * Last Descriptor of Packet needs End Of Packet (EOP)
87307ba1
SZ
1646 * and Report Status (RS)
1647 */
9f60d74b 1648 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
87307ba1
SZ
1649
1650 /*
1651 * Keep track in the first buffer which descriptor will be
9c80d176 1652 * written back
984263bc 1653 */
9c80d176 1654 tx_buffer = &adapter->tx_buffer_area[first];
afa68aa1 1655
9c80d176
SZ
1656 /*
1657 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
984263bc
MD
1658 * that this frame is available to transmit.
1659 */
9c80d176 1660 if (adapter->hw.mac.type == e1000_82547 &&
984263bc 1661 adapter->link_duplex == HALF_DUPLEX) {
cfefda96 1662 em_82547_move_tail_serialized(adapter);
9ccd8c1f 1663 } else {
9c80d176
SZ
1664 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1665 if (adapter->hw.mac.type == e1000_82547) {
cfefda96 1666 em_82547_update_fifo_head(adapter,
9c80d176 1667 m_head->m_pkthdr.len);
984263bc
MD
1668 }
1669 }
87307ba1 1670 return (0);
984263bc
MD
1671}
1672
9c80d176 1673/*
984263bc 1674 * 82547 workaround to avoid controller hang in half-duplex environment.
87307ba1 1675 * The workaround is to avoid queuing a large packet that would span
9c80d176
SZ
1676 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1677 * in this case. We do that only when FIFO is quiescent.
1678 */
9c095379 1679static void
1eca7b82 1680em_82547_move_tail_serialized(struct adapter *adapter)
9c095379 1681{
9c80d176
SZ
1682 struct e1000_tx_desc *tx_desc;
1683 uint16_t hw_tdt, sw_tdt, length = 0;
1684 bool eop = 0;
984263bc 1685
9c80d176
SZ
1686 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer);
1687
1688 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
984263bc 1689 sw_tdt = adapter->next_avail_tx_desc;
f647ad3d 1690
984263bc
MD
1691 while (hw_tdt != sw_tdt) {
1692 tx_desc = &adapter->tx_desc_base[hw_tdt];
1693 length += tx_desc->lower.flags.length;
1694 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
87307ba1 1695 if (++hw_tdt == adapter->num_tx_desc)
984263bc
MD
1696 hw_tdt = 0;
1697
87307ba1 1698 if (eop) {
984263bc 1699 if (em_82547_fifo_workaround(adapter, length)) {
eac00e59 1700 adapter->tx_fifo_wrk_cnt++;
9ccd8c1f
JS
1701 callout_reset(&adapter->tx_fifo_timer, 1,
1702 em_82547_move_tail, adapter);
1703 break;
984263bc 1704 }
9c80d176 1705 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
9ccd8c1f
JS
1706 em_82547_update_fifo_head(adapter, length);
1707 length = 0;
984263bc 1708 }
9c80d176
SZ
1709 }
1710}
1711
1712static void
1713em_82547_move_tail(void *xsc)
1714{
1715 struct adapter *adapter = xsc;
1716 struct ifnet *ifp = &adapter->arpcom.ac_if;
1717
1718 lwkt_serialize_enter(ifp->if_serializer);
1719 em_82547_move_tail_serialized(adapter);
1720 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1721}
1722
1723static int
1724em_82547_fifo_workaround(struct adapter *adapter, int len)
1725{
1726 int fifo_space, fifo_pkt_len;
1727
1eca7b82 1728 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
984263bc
MD
1729
1730 if (adapter->link_duplex == HALF_DUPLEX) {
eac00e59 1731 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
984263bc
MD
1732
1733 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
f647ad3d 1734 if (em_82547_tx_fifo_reset(adapter))
87307ba1 1735 return (0);
f647ad3d 1736 else
87307ba1 1737 return (1);
984263bc
MD
1738 }
1739 }
87307ba1 1740 return (0);
984263bc
MD
1741}
1742
1743static void
1744em_82547_update_fifo_head(struct adapter *adapter, int len)
1745{
1eca7b82 1746 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
f647ad3d 1747
984263bc
MD
1748 /* tx_fifo_head is always 16 byte aligned */
1749 adapter->tx_fifo_head += fifo_pkt_len;
eac00e59
SZ
1750 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1751 adapter->tx_fifo_head -= adapter->tx_fifo_size;
984263bc
MD
1752}
1753
984263bc
MD
1754static int
1755em_82547_tx_fifo_reset(struct adapter *adapter)
7ea52455 1756{
984263bc
MD
1757 uint32_t tctl;
1758
9c80d176
SZ
1759 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1760 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1761 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1762 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1763 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1764 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1765 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
984263bc 1766 /* Disable TX unit */
9c80d176
SZ
1767 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1768 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1769 tctl & ~E1000_TCTL_EN);
984263bc
MD
1770
1771 /* Reset FIFO pointers */
9c80d176
SZ
1772 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1773 adapter->tx_head_addr);
1774 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1775 adapter->tx_head_addr);
1776 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1777 adapter->tx_head_addr);
1778 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1779 adapter->tx_head_addr);
984263bc
MD
1780
1781 /* Re-enable TX unit */
9c80d176 1782 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
1783 E1000_WRITE_FLUSH(&adapter->hw);
1784
1785 adapter->tx_fifo_head = 0;
eac00e59 1786 adapter->tx_fifo_reset_cnt++;
984263bc 1787
87307ba1 1788 return (TRUE);
eac00e59 1789 } else {
87307ba1 1790 return (FALSE);
984263bc
MD
1791 }
1792}
1793
1794static void
f647ad3d 1795em_set_promisc(struct adapter *adapter)
984263bc 1796{
9c80d176 1797 struct ifnet *ifp = &adapter->arpcom.ac_if;
1eca7b82 1798 uint32_t reg_rctl;
984263bc 1799
9c80d176 1800 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc
MD
1801
1802 if (ifp->if_flags & IFF_PROMISC) {
1803 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
9c80d176
SZ
1804 /* Turn this on if you want to see bad packets */
1805 if (em_debug_sbp)
1806 reg_rctl |= E1000_RCTL_SBP;
1807 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1808 } else if (ifp->if_flags & IFF_ALLMULTI) {
1809 reg_rctl |= E1000_RCTL_MPE;
1810 reg_rctl &= ~E1000_RCTL_UPE;
9c80d176 1811 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc 1812 }
984263bc
MD
1813}
1814
1815static void
f647ad3d 1816em_disable_promisc(struct adapter *adapter)
984263bc 1817{
f647ad3d 1818 uint32_t reg_rctl;
984263bc 1819
9c80d176 1820 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc 1821
9c80d176
SZ
1822 reg_rctl &= ~E1000_RCTL_UPE;
1823 reg_rctl &= ~E1000_RCTL_MPE;
1824 reg_rctl &= ~E1000_RCTL_SBP;
1825 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1826}
1827
984263bc 1828static void
f647ad3d 1829em_set_multi(struct adapter *adapter)
984263bc 1830{
9c80d176 1831 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1832 struct ifmultiaddr *ifma;
9c80d176
SZ
1833 uint32_t reg_rctl = 0;
1834 uint8_t mta[512]; /* Largest MTS is 4096 bits */
f647ad3d 1835 int mcnt = 0;
f647ad3d 1836
9c80d176
SZ
1837 if (adapter->hw.mac.type == e1000_82542 &&
1838 adapter->hw.revision_id == E1000_REVISION_2) {
1839 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1840 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1841 e1000_pci_clear_mwi(&adapter->hw);
f647ad3d 1842 reg_rctl |= E1000_RCTL_RST;
9c80d176 1843 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d
JS
1844 msec_delay(5);
1845 }
984263bc 1846
f647ad3d
JS
1847 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1848 if (ifma->ifma_addr->sa_family != AF_LINK)
1849 continue;
1850
1851 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1852 break;
984263bc 1853
f647ad3d 1854 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
9c80d176 1855 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
f647ad3d
JS
1856 mcnt++;
1857 }
1858
1859 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
9c80d176 1860 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1861 reg_rctl |= E1000_RCTL_MPE;
9c80d176 1862 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
7ea52455 1863 } else {
9c80d176
SZ
1864 e1000_update_mc_addr_list(&adapter->hw, mta,
1865 mcnt, 1, adapter->hw.mac.rar_entry_count);
7ea52455 1866 }
f647ad3d 1867
9c80d176
SZ
1868 if (adapter->hw.mac.type == e1000_82542 &&
1869 adapter->hw.revision_id == E1000_REVISION_2) {
1870 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1871 reg_rctl &= ~E1000_RCTL_RST;
9c80d176 1872 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d 1873 msec_delay(5);
9c80d176
SZ
1874 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1875 e1000_pci_set_mwi(&adapter->hw);
f647ad3d
JS
1876 }
1877}
984263bc 1878
9c80d176
SZ
1879/*
1880 * This routine checks for link status and updates statistics.
1881 */
984263bc 1882static void
9c80d176 1883em_timer(void *xsc)
984263bc 1884{
9c80d176
SZ
1885 struct adapter *adapter = xsc;
1886 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 1887
78195a76 1888 lwkt_serialize_enter(ifp->if_serializer);
984263bc 1889
87307ba1 1890 em_update_link_status(adapter);
9c80d176
SZ
1891 em_update_stats(adapter);
1892
1893 /* Reset LAA into RAR[0] on 82571 */
1894 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
1895 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1896
1897 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
984263bc 1898 em_print_hw_stats(adapter);
9c80d176 1899
984263bc
MD
1900 em_smartspeed(adapter);
1901
9c80d176 1902 callout_reset(&adapter->timer, hz, em_timer, adapter);
984263bc 1903
78195a76 1904 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1905}
1906
1907static void
87307ba1 1908em_update_link_status(struct adapter *adapter)
984263bc 1909{
9c80d176
SZ
1910 struct e1000_hw *hw = &adapter->hw;
1911 struct ifnet *ifp = &adapter->arpcom.ac_if;
1912 device_t dev = adapter->dev;
1913 uint32_t link_check = 0;
1914
1915 /* Get the cached link value or read phy for real */
1916 switch (hw->phy.media_type) {
1917 case e1000_media_type_copper:
1918 if (hw->mac.get_link_status) {
1919 /* Do the work to read phy */
1920 e1000_check_for_link(hw);
1921 link_check = !hw->mac.get_link_status;
1922 if (link_check) /* ESB2 fix */
1923 e1000_cfg_on_link_up(hw);
1924 } else {
1925 link_check = TRUE;
984263bc 1926 }
9c80d176
SZ
1927 break;
1928
1929 case e1000_media_type_fiber:
1930 e1000_check_for_link(hw);
1931 link_check =
1932 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
1933 break;
1934
1935 case e1000_media_type_internal_serdes:
1936 e1000_check_for_link(hw);
1937 link_check = adapter->hw.mac.serdes_has_link;
1938 break;
1939
1940 case e1000_media_type_unknown:
1941 default:
1942 break;
1943 }
1944
1945 /* Now check for a transition */
1946 if (link_check && adapter->link_active == 0) {
1947 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
1948 &adapter->link_duplex);
cb5a6be6
SZ
1949
1950 /*
1951 * Check if we should enable/disable SPEED_MODE bit on
1952 * 82571/82572
1953 */
1954 if (hw->mac.type == e1000_82571 ||
1955 hw->mac.type == e1000_82572) {
9c80d176
SZ
1956 int tarc0;
1957
1958 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
cb5a6be6
SZ
1959 if (adapter->link_speed != SPEED_1000)
1960 tarc0 &= ~SPEED_MODE_BIT;
1961 else
1962 tarc0 |= SPEED_MODE_BIT;
9c80d176 1963 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
984263bc 1964 }
9c80d176
SZ
1965 if (bootverbose) {
1966 device_printf(dev, "Link is up %d Mbps %s\n",
1967 adapter->link_speed,
1968 ((adapter->link_duplex == FULL_DUPLEX) ?
1969 "Full Duplex" : "Half Duplex"));
1970 }
1971 adapter->link_active = 1;
1972 adapter->smartspeed = 0;
1973 ifp->if_baudrate = adapter->link_speed * 1000000;
1974 ifp->if_link_state = LINK_STATE_UP;
1975 if_link_state_change(ifp);
1976 } else if (!link_check && adapter->link_active == 1) {
1977 ifp->if_baudrate = adapter->link_speed = 0;
1978 adapter->link_duplex = 0;
1979 if (bootverbose)
1980 device_printf(dev, "Link is Down\n");
1981 adapter->link_active = 0;
1982#if 0
1983 /* Link down, disable watchdog */
1984 if->if_timer = 0;
1985#endif
1986 ifp->if_link_state = LINK_STATE_DOWN;
1987 if_link_state_change(ifp);
984263bc 1988 }
984263bc
MD
1989}
1990
984263bc 1991static void
9c80d176 1992em_stop(struct adapter *adapter)
984263bc 1993{
9c80d176
SZ
1994 struct ifnet *ifp = &adapter->arpcom.ac_if;
1995 int i;
984263bc 1996
1eca7b82
SZ
1997 ASSERT_SERIALIZED(ifp->if_serializer);
1998
984263bc 1999 em_disable_intr(adapter);
9c80d176 2000
9ccd8c1f
JS
2001 callout_stop(&adapter->timer);
2002 callout_stop(&adapter->tx_fifo_timer);
984263bc 2003
984263bc 2004 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
af82d4bb 2005 ifp->if_timer = 0;
9c80d176
SZ
2006
2007 e1000_reset_hw(&adapter->hw);
2008 if (adapter->hw.mac.type >= e1000_82544)
2009 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2010
2011 for (i = 0; i < adapter->num_tx_desc; i++) {
2012 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i];
2013
2014 if (tx_buffer->m_head != NULL) {
2015 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2016 m_freem(tx_buffer->m_head);
2017 tx_buffer->m_head = NULL;
2018 }
9c80d176
SZ
2019 }
2020
2021 for (i = 0; i < adapter->num_rx_desc; i++) {
2022 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i];
2023
2024 if (rx_buffer->m_head != NULL) {
2025 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2026 m_freem(rx_buffer->m_head);
2027 rx_buffer->m_head = NULL;
2028 }
2029 }
c9ff32cc
SZ
2030
2031 if (adapter->fmp != NULL)
2032 m_freem(adapter->fmp);
2033 adapter->fmp = NULL;
2034 adapter->lmp = NULL;
51e6819f
SZ
2035
2036 adapter->csum_flags = 0;
2037 adapter->csum_ehlen = 0;
2038 adapter->csum_iphlen = 0;
9f60d74b
SZ
2039
2040 adapter->tx_dd_head = 0;
2041 adapter->tx_dd_tail = 0;
2042 adapter->tx_nsegs = 0;
984263bc
MD
2043}
2044
9c80d176
SZ
2045static int
2046em_get_hw_info(struct adapter *adapter)
984263bc
MD
2047{
2048 device_t dev = adapter->dev;
2049
984263bc
MD
2050 /* Save off the information about this board */
2051 adapter->hw.vendor_id = pci_get_vendor(dev);
2052 adapter->hw.device_id = pci_get_device(dev);
f647ad3d
JS
2053 adapter->hw.revision_id = pci_get_revid(dev);
2054 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
9c80d176 2055 adapter->hw.subsystem_device_id = pci_get_subdevice(dev);
984263bc 2056
9c80d176
SZ
2057 /* Do Shared Code Init and Setup */
2058 if (e1000_set_mac_type(&adapter->hw))
2059 return ENXIO;
2060 return 0;
984263bc
MD
2061}
2062
1eca7b82 2063static int
9c80d176 2064em_alloc_pci_res(struct adapter *adapter)
1eca7b82 2065{
9c80d176
SZ
2066 device_t dev = adapter->dev;
2067 int val, rid, error = E1000_SUCCESS;
2068
2069 /* Enable bus mastering */
2070 pci_enable_busmaster(dev);
1eca7b82 2071
9c80d176
SZ
2072 adapter->memory_rid = EM_BAR_MEM;
2073 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2074 &adapter->memory_rid, RF_ACTIVE);
2075 if (adapter->memory == NULL) {
1eca7b82 2076 device_printf(dev, "Unable to allocate bus resource: memory\n");
9c80d176 2077 return (ENXIO);
1eca7b82
SZ
2078 }
2079 adapter->osdep.mem_bus_space_tag =
9c80d176 2080 rman_get_bustag(adapter->memory);
1eca7b82 2081 adapter->osdep.mem_bus_space_handle =
9c80d176
SZ
2082 rman_get_bushandle(adapter->memory);
2083
2084 /* XXX This is quite goofy, it is not actually used */
1eca7b82
SZ
2085 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2086
9c80d176
SZ
2087 /* Only older adapters use IO mapping */
2088 if (adapter->hw.mac.type > e1000_82543 &&
2089 adapter->hw.mac.type < e1000_82571) {
1eca7b82 2090 /* Figure our where our IO BAR is ? */
9c80d176 2091 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) {
1eca7b82 2092 val = pci_read_config(dev, rid, 4);
87307ba1 2093 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1eca7b82
SZ
2094 adapter->io_rid = rid;
2095 break;
2096 }
2097 rid += 4;
87307ba1
SZ
2098 /* check for 64bit BAR */
2099 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2100 rid += 4;
1eca7b82 2101 }
9c80d176 2102 if (rid >= PCIR_CARDBUSCIS) {
87307ba1
SZ
2103 device_printf(dev, "Unable to locate IO BAR\n");
2104 return (ENXIO);
9c80d176
SZ
2105 }
2106 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
2107 &adapter->io_rid, RF_ACTIVE);
2108 if (adapter->ioport == NULL) {
1eca7b82 2109 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2110 "ioport\n");
2111 return (ENXIO);
1eca7b82 2112 }
87307ba1
SZ
2113 adapter->hw.io_base = 0;
2114 adapter->osdep.io_bus_space_tag =
9c80d176 2115 rman_get_bustag(adapter->ioport);
87307ba1 2116 adapter->osdep.io_bus_space_handle =
9c80d176 2117 rman_get_bushandle(adapter->ioport);
1eca7b82
SZ
2118 }
2119
9c80d176
SZ
2120 adapter->intr_rid = 0;
2121 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
2122 &adapter->intr_rid,
2123 RF_SHAREABLE | RF_ACTIVE);
2124 if (adapter->intr_res == NULL) {
1eca7b82 2125 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2126 "interrupt\n");
2127 return (ENXIO);
1eca7b82
SZ
2128 }
2129
9c80d176 2130 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1eca7b82 2131 adapter->hw.back = &adapter->osdep;
9c80d176 2132 return (error);
1eca7b82
SZ
2133}
2134
2135static void
9c80d176 2136em_free_pci_res(struct adapter *adapter)
1eca7b82 2137{
9c80d176 2138 device_t dev = adapter->dev;
1eca7b82 2139
9c80d176
SZ
2140 if (adapter->intr_res != NULL) {
2141 bus_release_resource(dev, SYS_RES_IRQ,
2142 adapter->intr_rid, adapter->intr_res);
1eca7b82 2143 }
9c80d176
SZ
2144
2145 if (adapter->memory != NULL) {
2146 bus_release_resource(dev, SYS_RES_MEMORY,
2147 adapter->memory_rid, adapter->memory);
1eca7b82
SZ
2148 }
2149
9c80d176
SZ
2150 if (adapter->flash != NULL) {
2151 bus_release_resource(dev, SYS_RES_MEMORY,
2152 adapter->flash_rid, adapter->flash);
1eca7b82
SZ
2153 }
2154
9c80d176
SZ
2155 if (adapter->ioport != NULL) {
2156 bus_release_resource(dev, SYS_RES_IOPORT,
2157 adapter->io_rid, adapter->ioport);
1eca7b82
SZ
2158 }
2159}
2160
984263bc 2161static int
9c80d176 2162em_hw_init(struct adapter *adapter)
984263bc 2163{
9c80d176
SZ
2164 device_t dev = adapter->dev;
2165 uint16_t rx_buffer_size;
7ea52455 2166
984263bc 2167 /* Issue a global reset */
9c80d176
SZ
2168 e1000_reset_hw(&adapter->hw);
2169
2170 /* Get control from any management/hw control */
2171 if ((adapter->hw.mac.type == e1000_82573 ||
2172 adapter->hw.mac.type == e1000_ich8lan ||
2173 adapter->hw.mac.type == e1000_ich10lan ||
2174 adapter->hw.mac.type == e1000_ich9lan) &&
2175 e1000_check_mng_mode(&adapter->hw))
2176 em_get_hw_control(adapter);
984263bc
MD
2177
2178 /* When hardware is reset, fifo_head is also reset */
2179 adapter->tx_fifo_head = 0;
2180
87307ba1 2181 /* Set up smart power down as default off on newer adapters. */
1eca7b82 2182 if (!em_smart_pwr_down &&
9c80d176
SZ
2183 (adapter->hw.mac.type == e1000_82571 ||
2184 adapter->hw.mac.type == e1000_82572)) {
1eca7b82
SZ
2185 uint16_t phy_tmp = 0;
2186
87307ba1 2187 /* Speed up time to link by disabling smart power down. */
9c80d176
SZ
2188 e1000_read_phy_reg(&adapter->hw,
2189 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1eca7b82 2190 phy_tmp &= ~IGP02E1000_PM_SPD;
9c80d176
SZ
2191 e1000_write_phy_reg(&adapter->hw,
2192 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1eca7b82
SZ
2193 }
2194
7ea52455 2195 /*
87307ba1
SZ
2196 * These parameters control the automatic generation (Tx) and
2197 * response (Rx) to Ethernet PAUSE frames.
7ea52455
SZ
2198 * - High water mark should allow for at least two frames to be
2199 * received after sending an XOFF.
2200 * - Low water mark works best when it is very near the high water mark.
2201 * This allows the receiver to restart by sending XON when it has
9c80d176
SZ
2202 * drained a bit. Here we use an arbitary value of 1500 which will
2203 * restart after one full frame is pulled from the buffer. There
7ea52455
SZ
2204 * could be several smaller frames in the buffer and if so they will
2205 * not trigger the XON until their total number reduces the buffer
2206 * by 1500.
2207 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2208 */
9c80d176
SZ
2209 rx_buffer_size =
2210 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10;
7ea52455 2211
9c80d176
SZ
2212 adapter->hw.fc.high_water = rx_buffer_size -
2213 roundup2(adapter->max_frame_size, 1024);
2214 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2215
2216 if (adapter->hw.mac.type == e1000_80003es2lan)
2217 adapter->hw.fc.pause_time = 0xFFFF;
1eca7b82 2218 else
9c80d176
SZ
2219 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2220 adapter->hw.fc.send_xon = TRUE;
2221 adapter->hw.fc.requested_mode = e1000_fc_full;
7ea52455 2222
9c80d176
SZ
2223 if (e1000_init_hw(&adapter->hw) < 0) {
2224 device_printf(dev, "Hardware Initialization Failed\n");
87307ba1 2225 return (EIO);
984263bc
MD
2226 }
2227
9c80d176 2228 e1000_check_for_link(&adapter->hw);
984263bc 2229
87307ba1 2230 return (0);
984263bc
MD
2231}
2232
984263bc 2233static void
9c80d176 2234em_setup_ifp(struct adapter *adapter)
984263bc 2235{
9c80d176 2236 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 2237
9c80d176
SZ
2238 if_initname(ifp, device_get_name(adapter->dev),
2239 device_get_unit(adapter->dev));
984263bc
MD
2240 ifp->if_softc = adapter;
2241 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9c80d176 2242 ifp->if_init = em_init;
984263bc
MD
2243 ifp->if_ioctl = em_ioctl;
2244 ifp->if_start = em_start;
9c095379
MD
2245#ifdef DEVICE_POLLING
2246 ifp->if_poll = em_poll;
2247#endif
984263bc 2248 ifp->if_watchdog = em_watchdog;
e26dc3e9 2249 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
19b1d5b8 2250 ifq_set_ready(&ifp->if_snd);
984263bc 2251
9c80d176 2252 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
984263bc 2253
9c80d176
SZ
2254 if (adapter->hw.mac.type >= e1000_82543)
2255 ifp->if_capabilities = IFCAP_HWCSUM;
e095c7aa 2256
9c80d176
SZ
2257 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2258 ifp->if_capenable = ifp->if_capabilities;
984263bc 2259
9c80d176
SZ
2260 if (ifp->if_capenable & IFCAP_TXCSUM)
2261 ifp->if_hwassist = EM_CSUM_FEATURES;
21fa6062 2262
f647ad3d
JS
2263 /*
2264 * Tell the upper layer(s) we support long frames.
2265 */
2266 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
984263bc 2267
87307ba1 2268 /*
984263bc
MD
2269 * Specify the media types supported by this adapter and register
2270 * callbacks to update media and link information
2271 */
9c80d176
SZ
2272 ifmedia_init(&adapter->media, IFM_IMASK,
2273 em_media_change, em_media_status);
2274 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2275 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
2276 u_char fiber_type = IFM_1000_SX; /* default type */
2277
2278 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
2279 fiber_type = IFM_1000_LX;
2280 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
984263bc 2281 0, NULL);
87307ba1 2282 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
984263bc
MD
2283 } else {
2284 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
87307ba1 2285 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
984263bc 2286 0, NULL);
87307ba1 2287 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
984263bc 2288 0, NULL);
87307ba1 2289 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
984263bc 2290 0, NULL);
9c80d176
SZ
2291 if (adapter->hw.phy.type != e1000_phy_ife) {
2292 ifmedia_add(&adapter->media,
2293 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2294 ifmedia_add(&adapter->media,
2295 IFM_ETHER | IFM_1000_T, 0, NULL);
2296 }
984263bc
MD
2297 }
2298 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2299 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
984263bc
MD
2300}
2301
9c80d176
SZ
2302
2303/*
2304 * Workaround for SmartSpeed on 82541 and 82547 controllers
2305 */
984263bc
MD
2306static void
2307em_smartspeed(struct adapter *adapter)
2308{
f647ad3d
JS
2309 uint16_t phy_tmp;
2310
9c80d176
SZ
2311 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp ||
2312 adapter->hw.mac.autoneg == 0 ||
2313 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
984263bc
MD
2314 return;
2315
f647ad3d
JS
2316 if (adapter->smartspeed == 0) {
2317 /*
2318 * If Master/Slave config fault is asserted twice,
9c80d176 2319 * we assume back-to-back
f647ad3d 2320 */
9c80d176 2321 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d
JS
2322 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2323 return;
9c80d176 2324 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d 2325 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
9c80d176
SZ
2326 e1000_read_phy_reg(&adapter->hw,
2327 PHY_1000T_CTRL, &phy_tmp);
f647ad3d
JS
2328 if (phy_tmp & CR_1000T_MS_ENABLE) {
2329 phy_tmp &= ~CR_1000T_MS_ENABLE;
9c80d176
SZ
2330 e1000_write_phy_reg(&adapter->hw,
2331 PHY_1000T_CTRL, phy_tmp);
f647ad3d 2332 adapter->smartspeed++;
9c80d176
SZ
2333 if (adapter->hw.mac.autoneg &&
2334 !e1000_phy_setup_autoneg(&adapter->hw) &&
2335 !e1000_read_phy_reg(&adapter->hw,
2336 PHY_CONTROL, &phy_tmp)) {
2337 phy_tmp |= MII_CR_AUTO_NEG_EN |
2338 MII_CR_RESTART_AUTO_NEG;
2339 e1000_write_phy_reg(&adapter->hw,
2340 PHY_CONTROL, phy_tmp);
f647ad3d
JS
2341 }
2342 }
2343 }
87307ba1 2344 return;
f647ad3d
JS
2345 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2346 /* If still no link, perhaps using 2/3 pair cable */
9c80d176 2347 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
f647ad3d 2348 phy_tmp |= CR_1000T_MS_ENABLE;
9c80d176
SZ
2349 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2350 if (adapter->hw.mac.autoneg &&
2351 !e1000_phy_setup_autoneg(&adapter->hw) &&
2352 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2353 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2354 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
f647ad3d
JS
2355 }
2356 }
9c80d176 2357
f647ad3d
JS
2358 /* Restart process after EM_SMARTSPEED_MAX iterations */
2359 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2360 adapter->smartspeed = 0;
984263bc
MD
2361}
2362
9ccd8c1f
JS
2363static int
2364em_dma_malloc(struct adapter *adapter, bus_size_t size,
87307ba1 2365 struct em_dma_alloc *dma)
9ccd8c1f 2366{
9c80d176
SZ
2367 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag,
2368 EM_DBA_ALIGN, size, BUS_DMA_WAITOK,
2369 &dma->dma_tag, &dma->dma_map,
2370 &dma->dma_paddr);
2371 if (dma->dma_vaddr == NULL)
2372 return ENOMEM;
2373 else
2374 return 0;
9ccd8c1f
JS
2375}
2376
2377static void
2378em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2379{
9c80d176
SZ
2380 if (dma->dma_tag == NULL)
2381 return;
2382 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2383 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2384 bus_dma_tag_destroy(dma->dma_tag);
984263bc
MD
2385}
2386
984263bc 2387static int
9c80d176 2388em_create_tx_ring(struct adapter *adapter)
984263bc 2389{
9c80d176 2390 device_t dev = adapter->dev;
1eca7b82 2391 struct em_buffer *tx_buffer;
1eca7b82
SZ
2392 int error, i;
2393
87307ba1
SZ
2394 adapter->tx_buffer_area =
2395 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2396 M_DEVBUF, M_WAITOK | M_ZERO);
984263bc 2397
9c80d176
SZ
2398 /*
2399 * Create DMA tags for tx buffers
2400 */
2401 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
2402 1, 0, /* alignment, bounds */
2403 BUS_SPACE_MAXADDR, /* lowaddr */
2404 BUS_SPACE_MAXADDR, /* highaddr */
2405 NULL, NULL, /* filter, filterarg */
2406 EM_TSO_SIZE, /* maxsize */
2407 EM_MAX_SCATTER, /* nsegments */
2408 EM_MAX_SEGSIZE, /* maxsegsize */
2409 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2410 BUS_DMA_ONEBPAGE, /* flags */
2411 &adapter->txtag);
2412 if (error) {
2413 device_printf(dev, "Unable to allocate TX DMA tag\n");
2414 kfree(adapter->tx_buffer_area, M_DEVBUF);
2415 adapter->tx_buffer_area = NULL;
2416 return error;
2417 }
2418
2419 /*
2420 * Create DMA maps for tx buffers
2421 */
1eca7b82 2422 for (i = 0; i < adapter->num_tx_desc; i++) {
9c80d176
SZ
2423 tx_buffer = &adapter->tx_buffer_area[i];
2424
2425 error = bus_dmamap_create(adapter->txtag,
2426 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2427 &tx_buffer->map);
1eca7b82 2428 if (error) {
9c80d176
SZ
2429 device_printf(dev, "Unable to create TX DMA map\n");
2430 em_destroy_tx_ring(adapter, i);
2431 return error;
1eca7b82 2432 }
1eca7b82 2433 }
9c80d176
SZ
2434 return (0);
2435}
9ccd8c1f 2436
9c80d176
SZ
2437static void
2438em_init_tx_ring(struct adapter *adapter)
2439{
2440 /* Clear the old ring contents */
2441 bzero(adapter->tx_desc_base,
2442 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2443
2444 /* Reset state */
87307ba1
SZ
2445 adapter->next_avail_tx_desc = 0;
2446 adapter->next_tx_to_clean = 0;
984263bc 2447 adapter->num_tx_desc_avail = adapter->num_tx_desc;
984263bc
MD
2448}
2449
984263bc 2450static void
9c80d176 2451em_init_tx_unit(struct adapter *adapter)
984263bc 2452{
9c80d176 2453 uint32_t tctl, tarc, tipg = 0;
9ccd8c1f
JS
2454 uint64_t bus_addr;
2455
984263bc 2456 /* Setup the Base and Length of the Tx Descriptor Ring */
9ccd8c1f 2457 bus_addr = adapter->txdma.dma_paddr;
9c80d176
SZ
2458 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2459 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2460 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2461 (uint32_t)(bus_addr >> 32));
2462 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2463 (uint32_t)bus_addr);
984263bc 2464 /* Setup the HW Tx Head and Tail descriptor pointers */
9c80d176
SZ
2465 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2466 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
984263bc 2467
984263bc 2468 /* Set the default values for the Tx Inter Packet Gap timer */
9c80d176
SZ
2469 switch (adapter->hw.mac.type) {
2470 case e1000_82542:
2471 tipg = DEFAULT_82542_TIPG_IPGT;
2472 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2473 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
984263bc 2474 break;
9c80d176
SZ
2475
2476 case e1000_80003es2lan:
2477 tipg = DEFAULT_82543_TIPG_IPGR1;
2478 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2479 E1000_TIPG_IPGR2_SHIFT;
1eca7b82 2480 break;
9c80d176 2481
984263bc 2482 default:
9c80d176
SZ
2483 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2484 adapter->hw.phy.media_type ==
2485 e1000_media_type_internal_serdes)
2486 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
984263bc 2487 else
9c80d176
SZ
2488 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2489 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2490 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2491 break;
2492 }
2493
2494 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
91e8debf
SZ
2495
2496 /* NOTE: 0 is not allowed for TIDV */
2497 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1);
2498 if(adapter->hw.mac.type >= e1000_82540)
2499 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0);
984263bc 2500
9c80d176
SZ
2501 if (adapter->hw.mac.type == e1000_82571 ||
2502 adapter->hw.mac.type == e1000_82572) {
2503 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2504 tarc |= SPEED_MODE_BIT;
2505 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2506 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
2507 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2508 tarc |= 1;
2509 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2510 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
2511 tarc |= 1;
2512 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
1eca7b82
SZ
2513 }
2514
984263bc 2515 /* Program the Transmit Control Register */
9c80d176
SZ
2516 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2517 tctl &= ~E1000_TCTL_CT;
2518 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2519 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2520
2521 if (adapter->hw.mac.type >= e1000_82571)
2522 tctl |= E1000_TCTL_MULR;
1eca7b82 2523
87307ba1 2524 /* This write will effectively turn on the transmit unit. */
9c80d176 2525 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
2526}
2527
984263bc 2528static void
9c80d176 2529em_destroy_tx_ring(struct adapter *adapter, int ndesc)
984263bc 2530{
f647ad3d
JS
2531 struct em_buffer *tx_buffer;
2532 int i;
984263bc 2533
9c80d176
SZ
2534 if (adapter->tx_buffer_area == NULL)
2535 return;
984263bc 2536
9c80d176
SZ
2537 for (i = 0; i < ndesc; i++) {
2538 tx_buffer = &adapter->tx_buffer_area[i];
1eca7b82 2539
9c80d176
SZ
2540 KKASSERT(tx_buffer->m_head == NULL);
2541 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
9ccd8c1f 2542 }
9c80d176
SZ
2543 bus_dma_tag_destroy(adapter->txtag);
2544
2545 kfree(adapter->tx_buffer_area, M_DEVBUF);
2546 adapter->tx_buffer_area = NULL;
984263bc
MD
2547}
2548
9c80d176
SZ
2549/*
2550 * The offload context needs to be set when we transfer the first
2551 * packet of a particular protocol (TCP/UDP). This routine has been
002b3a05 2552 * enhanced to deal with inserted VLAN headers.
51e6819f
SZ
2553 *
2554 * If the new packet's ether header length, ip header length and
2555 * csum offloading type are same as the previous packet, we should
2556 * avoid allocating a new csum context descriptor; mainly to take
2557 * advantage of the pipeline effect of the TX data read request.
9f60d74b
SZ
2558 *
2559 * This function returns number of TX descrptors allocated for
2560 * csum context.
9c80d176 2561 */
9f60d74b 2562static int
9c80d176
SZ
2563em_txcsum(struct adapter *adapter, struct mbuf *mp,
2564 uint32_t *txd_upper, uint32_t *txd_lower)
984263bc 2565{
9c80d176 2566 struct e1000_context_desc *TXD;
984263bc 2567 struct em_buffer *tx_buffer;
9c80d176 2568 struct ether_vlan_header *eh;
51e6819f
SZ
2569 struct ip *ip;
2570 int curr_txd, ehdrlen, csum_flags;
9c80d176
SZ
2571 uint32_t cmd, hdr_len, ip_hlen;
2572 uint16_t etype;
9c80d176 2573
9c80d176
SZ
2574 /*
2575 * Determine where frame payload starts.
2576 * Jump over vlan headers if already present,
2577 * helpful for QinQ too.
2578 */
252dfd0d
SZ
2579 KASSERT(mp->m_len >= ETHER_HDR_LEN,
2580 ("em_txcsum_pullup is not called (eh)?\n"));
9c80d176
SZ
2581 eh = mtod(mp, struct ether_vlan_header *);
2582 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
252dfd0d
SZ
2583 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN,
2584 ("em_txcsum_pullup is not called (evh)?\n"));
9c80d176
SZ
2585 etype = ntohs(eh->evl_proto);
2586 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN;
984263bc 2587 } else {
9c80d176
SZ
2588 etype = ntohs(eh->evl_encap_proto);
2589 ehdrlen = ETHER_HDR_LEN;
984263bc
MD
2590 }
2591
1eca7b82 2592 /*
002b3a05 2593 * We only support TCP/UDP for IPv4 for the moment.
9c80d176 2594 * TODO: Support SCTP too when it hits the tree.
984263bc 2595 */
51e6819f 2596 if (etype != ETHERTYPE_IP)
9f60d74b 2597 return 0;
002b3a05 2598
51e6819f 2599 KASSERT(mp->m_len >= ehdrlen + EM_IPVHL_SIZE,
252dfd0d 2600 ("em_txcsum_pullup is not called (eh+ip_vhl)?\n"));
9c80d176 2601
51e6819f
SZ
2602 /* NOTE: We could only safely access ip.ip_vhl part */
2603 ip = (struct ip *)(mp->m_data + ehdrlen);
2604 ip_hlen = ip->ip_hl << 2;
984263bc 2605
51e6819f
SZ
2606 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES;
2607
2608 if (adapter->csum_ehlen == ehdrlen &&
2609 adapter->csum_iphlen == ip_hlen &&
2610 adapter->csum_flags == csum_flags) {
2611 /*
2612 * Same csum offload context as the previous packets;
2613 * just return.
2614 */
2615 *txd_upper = adapter->csum_txd_upper;
2616 *txd_lower = adapter->csum_txd_lower;
9f60d74b 2617 return 0;
984263bc
MD
2618 }
2619
51e6819f
SZ
2620 /*
2621 * Setup a new csum offload context.
2622 */
2623
2624 curr_txd = adapter->next_avail_tx_desc;
2625 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2626 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd];
2627
2628 cmd = 0;
2629
2630 /* Setup of IP header checksum. */
2631 if (csum_flags & CSUM_IP) {
2632 /*
2633 * Start offset for header checksum calculation.
2634 * End offset for header checksum calculation.
2635 * Offset of place to put the checksum.
2636 */
2637 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2638 TXD->lower_setup.ip_fields.ipcse =
2639 htole16(ehdrlen + ip_hlen - 1);
2640 TXD->lower_setup.ip_fields.ipcso =
2641 ehdrlen + offsetof(struct ip, ip_sum);
2642 cmd |= E1000_TXD_CMD_IP;
2643 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2644 }
2645 hdr_len = ehdrlen + ip_hlen;
2646
2647 if (csum_flags & CSUM_TCP) {
002b3a05
SZ
2648 /*
2649 * Start offset for payload checksum calculation.
2650 * End offset for payload checksum calculation.
2651 * Offset of place to put the checksum.
2652 */
2653 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2654 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2655 TXD->upper_setup.tcp_fields.tucso =
2656 hdr_len + offsetof(struct tcphdr, th_sum);
2657 cmd |= E1000_TXD_CMD_TCP;
2658 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
51e6819f 2659 } else if (csum_flags & CSUM_UDP) {
002b3a05
SZ
2660 /*
2661 * Start offset for header checksum calculation.
2662 * End offset for header checksum calculation.
2663 * Offset of place to put the checksum.
2664 */
2665 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2666 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2667 TXD->upper_setup.tcp_fields.tucso =
2668 hdr_len + offsetof(struct udphdr, uh_sum);
2669 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
9c80d176
SZ
2670 }
2671
2672 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2673 E1000_TXD_DTYP_D; /* Data descr */
51e6819f
SZ
2674
2675 /* Save the information for this csum offloading context */
2676 adapter->csum_ehlen = ehdrlen;
2677 adapter->csum_iphlen = ip_hlen;
2678 adapter->csum_flags = csum_flags;
2679 adapter->csum_txd_upper = *txd_upper;
2680 adapter->csum_txd_lower = *txd_lower;
2681
9c80d176
SZ
2682 TXD->tcp_seg_setup.data = htole32(0);
2683 TXD->cmd_and_length =
2af74b85 2684 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
984263bc
MD
2685 tx_buffer->m_head = NULL;
2686
2687 if (++curr_txd == adapter->num_tx_desc)
2688 curr_txd = 0;
2689
9c80d176 2690 KKASSERT(adapter->num_tx_desc_avail > 0);
984263bc 2691 adapter->num_tx_desc_avail--;
9c80d176 2692
984263bc 2693 adapter->next_avail_tx_desc = curr_txd;
9f60d74b 2694 return 1;
984263bc
MD
2695}
2696
002b3a05
SZ
2697static int
2698em_txcsum_pullup(struct adapter *adapter, struct mbuf **m0)
2699{
2700 struct mbuf *m = *m0;
2701 struct ether_header *eh;
2702 int len;
2703
2704 adapter->tx_csum_try_pullup++;
2705
2706 len = ETHER_HDR_LEN + EM_IPVHL_SIZE;
2707
2708 if (__predict_false(!M_WRITABLE(m))) {
2709 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2710 adapter->tx_csum_drop1++;
2711 m_freem(m);
2712 *m0 = NULL;
2713 return ENOBUFS;
2714 }
2715 eh = mtod(m, struct ether_header *);
2716
2717 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2718 len += EVL_ENCAPLEN;
2719
2720 if (__predict_false(m->m_len < len)) {
2721 adapter->tx_csum_drop2++;
2722 m_freem(m);
2723 *m0 = NULL;
2724 return ENOBUFS;
2725 }
2726 return 0;
2727 }
2728
2729 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2730 adapter->tx_csum_pullup1++;
2731 m = m_pullup(m, ETHER_HDR_LEN);
2732 if (m == NULL) {
2733 adapter->tx_csum_pullup1_failed++;
2734 *m0 = NULL;
2735 return ENOBUFS;
2736 }
2737 *m0 = m;
2738 }
2739 eh = mtod(m, struct ether_header *);
2740
2741 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2742 len += EVL_ENCAPLEN;
2743
2744 if (__predict_false(m->m_len < len)) {
2745 adapter->tx_csum_pullup2++;
2746 m = m_pullup(m, len);
2747 if (m == NULL) {
2748 adapter->tx_csum_pullup2_failed++;
2749 *m0 = NULL;
2750 return ENOBUFS;
2751 }
2752 *m0 = m;
2753 }
2754 return 0;
2755}
2756
984263bc 2757static void
87307ba1 2758em_txeof(struct adapter *adapter)
984263bc 2759{
9c80d176 2760 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2761 struct e1000_tx_desc *tx_desc;
2762 struct em_buffer *tx_buffer;
2763 int first, num_avail;
2764
2765 if (adapter->tx_dd_head == adapter->tx_dd_tail)
2766 return;
984263bc 2767
f647ad3d
JS
2768 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2769 return;
984263bc 2770
9c80d176 2771 num_avail = adapter->num_tx_desc_avail;
87307ba1 2772 first = adapter->next_tx_to_clean;
9c80d176 2773
9f60d74b
SZ
2774 while (adapter->tx_dd_head != adapter->tx_dd_tail) {
2775 int dd_idx = adapter->tx_dd[adapter->tx_dd_head];
984263bc 2776
9f60d74b 2777 tx_desc = &adapter->tx_desc_base[dd_idx];
79938e61 2778
9f60d74b
SZ
2779 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2780 EM_INC_TXDD_IDX(adapter->tx_dd_head);
984263bc 2781
9f60d74b
SZ
2782 if (++dd_idx == adapter->num_tx_desc)
2783 dd_idx = 0;
9c80d176 2784
9f60d74b
SZ
2785 while (first != dd_idx) {
2786 tx_buffer = &adapter->tx_buffer_area[first];
2787 tx_desc = &adapter->tx_desc_base[first];
9c80d176 2788
9f60d74b
SZ
2789 tx_desc->upper.data = 0;
2790 tx_desc->lower.data = 0;
2791 tx_desc->buffer_addr = 0;
2792 num_avail++;
2793
2794 if (tx_buffer->m_head) {
2795 ifp->if_opackets++;
2796 bus_dmamap_unload(adapter->txtag,
2797 tx_buffer->map);
2798 m_freem(tx_buffer->m_head);
2799 tx_buffer->m_head = NULL;
2800 }
2801
2802 if (++first == adapter->num_tx_desc)
2803 first = 0;
2804 }
87307ba1
SZ
2805 } else {
2806 break;
2807 }
f647ad3d 2808 }
9f60d74b
SZ
2809 adapter->next_tx_to_clean = first;
2810 adapter->num_tx_desc_avail = num_avail;
2811
2812 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2813 adapter->tx_dd_head = 0;
2814 adapter->tx_dd_tail = 0;
2815 }
2816
2817 if (!EM_IS_OACTIVE(adapter)) {
2818 ifp->if_flags &= ~IFF_OACTIVE;
2819
2820 /* All clean, turn off the timer */
2821 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2822 ifp->if_timer = 0;
2823 }
2824}
2825
2826static void
2827em_tx_collect(struct adapter *adapter)
2828{
2829 struct ifnet *ifp = &adapter->arpcom.ac_if;
2830 struct e1000_tx_desc *tx_desc;
2831 struct em_buffer *tx_buffer;
2832 int tdh, first, num_avail, dd_idx = -1;
2833
2834 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2835 return;
2836
2837 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
2838 if (tdh == adapter->next_tx_to_clean)
2839 return;
2840
2841 if (adapter->tx_dd_head != adapter->tx_dd_tail)
2842 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2843
2844 num_avail = adapter->num_tx_desc_avail;
2845 first = adapter->next_tx_to_clean;
2846
2847 while (first != tdh) {
2848 tx_buffer = &adapter->tx_buffer_area[first];
2849 tx_desc = &adapter->tx_desc_base[first];
2850
2851 tx_desc->upper.data = 0;
2852 tx_desc->lower.data = 0;
2853 tx_desc->buffer_addr = 0;
2854 num_avail++;
2855
2856 if (tx_buffer->m_head) {
2857 ifp->if_opackets++;
2858 bus_dmamap_unload(adapter->txtag,
2859 tx_buffer->map);
2860 m_freem(tx_buffer->m_head);
2861 tx_buffer->m_head = NULL;
2862 }
2863
2864 if (first == dd_idx) {
2865 EM_INC_TXDD_IDX(adapter->tx_dd_head);
2866 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2867 adapter->tx_dd_head = 0;
2868 adapter->tx_dd_tail = 0;
2869 dd_idx = -1;
2870 } else {
2871 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2872 }
2873 }
2874
2875 if (++first == adapter->num_tx_desc)
2876 first = 0;
2877 }
2878 adapter->next_tx_to_clean = first;
9c80d176 2879 adapter->num_tx_desc_avail = num_avail;
984263bc 2880
9f60d74b 2881 if (!EM_IS_OACTIVE(adapter)) {
9c80d176 2882 ifp->if_flags &= ~IFF_OACTIVE;
afa68aa1 2883
9c80d176
SZ
2884 /* All clean, turn off the timer */
2885 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2886 ifp->if_timer = 0;
2887 }
2888}
984263bc 2889
9c80d176
SZ
2890/*
2891 * When Link is lost sometimes there is work still in the TX ring
2892 * which will result in a watchdog, rather than allow that do an
2893 * attempted cleanup and then reinit here. Note that this has been
2894 * seens mostly with fiber adapters.
2895 */
2896static void
2897em_tx_purge(struct adapter *adapter)
2898{
2899 struct ifnet *ifp = &adapter->arpcom.ac_if;
2900
2901 if (!adapter->link_active && ifp->if_timer) {
9f60d74b 2902 em_tx_collect(adapter);
9c80d176
SZ
2903 if (ifp->if_timer) {
2904 if_printf(ifp, "Link lost, TX pending, reinit\n");
f647ad3d 2905 ifp->if_timer = 0;
9c80d176
SZ
2906 em_init(adapter);
2907 }
f647ad3d 2908 }
984263bc
MD
2909}
2910
984263bc 2911static int
9c80d176 2912em_newbuf(struct adapter *adapter, int i, int init)
984263bc 2913{
9c80d176
SZ
2914 struct mbuf *m;
2915 bus_dma_segment_t seg;
2916 bus_dmamap_t map;
9ccd8c1f 2917 struct em_buffer *rx_buffer;
9c80d176
SZ
2918 int error, nseg;
2919
2920 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2921 if (m == NULL) {
2922 adapter->mbuf_cluster_failed++;
2923 if (init) {
2924 if_printf(&adapter->arpcom.ac_if,
2925 "Unable to allocate RX mbuf\n");
984263bc 2926 }
9c80d176 2927 return (ENOBUFS);
984263bc 2928 }
9c80d176 2929 m->m_len = m->m_pkthdr.len = MCLBYTES;
87307ba1 2930
9c80d176
SZ
2931 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2932 m_adj(m, ETHER_ALIGN);
9ccd8c1f 2933
9c80d176
SZ
2934 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
2935 adapter->rx_sparemap, m,
2936 &seg, 1, &nseg, BUS_DMA_NOWAIT);
9ccd8c1f 2937 if (error) {
9c80d176
SZ
2938 m_freem(m);
2939 if (init) {
2940 if_printf(&adapter->arpcom.ac_if,
2941 "Unable to load RX mbuf\n");
2942 }
87307ba1 2943 return (error);
9ccd8c1f 2944 }
984263bc 2945
9c80d176
SZ
2946 rx_buffer = &adapter->rx_buffer_area[i];
2947 if (rx_buffer->m_head != NULL)
2948 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2949
2950 map = rx_buffer->map;
2951 rx_buffer->map = adapter->rx_sparemap;
2952 adapter->rx_sparemap = map;
2953
2954 rx_buffer->m_head = m;
2955
2956 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr);
87307ba1 2957 return (0);
984263bc
MD
2958}
2959
984263bc 2960static int
9c80d176 2961em_create_rx_ring(struct adapter *adapter)
984263bc 2962{
9c80d176 2963 device_t dev = adapter->dev;
9ccd8c1f 2964 struct em_buffer *rx_buffer;
9c80d176
SZ
2965 int i, error;
2966
2967 adapter->rx_buffer_area =
2968 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
2969 M_DEVBUF, M_WAITOK | M_ZERO);
9ccd8c1f 2970
9c80d176
SZ
2971 /*
2972 * Create DMA tag for rx buffers
2973 */
2974 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
2975 1, 0, /* alignment, bounds */
2976 BUS_SPACE_MAXADDR, /* lowaddr */
2977 BUS_SPACE_MAXADDR, /* highaddr */
2978 NULL, NULL, /* filter, filterarg */
2979 MCLBYTES, /* maxsize */
2980 1, /* nsegments */
2981 MCLBYTES, /* maxsegsize */
2982 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
2983 &adapter->rxtag);
87307ba1 2984 if (error) {
9c80d176
SZ
2985 device_printf(dev, "Unable to allocate RX DMA tag\n");
2986 kfree(adapter->rx_buffer_area, M_DEVBUF);
2987 adapter->rx_buffer_area = NULL;
2988 return error;
2989 }
2990
2991 /*
2992 * Create spare DMA map for rx buffers
2993 */
2994 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
2995 &adapter->rx_sparemap);
2996 if (error) {
2997 device_printf(dev, "Unable to create spare RX DMA map\n");
2998 bus_dma_tag_destroy(adapter->rxtag);
2999 kfree(adapter->rx_buffer_area, M_DEVBUF);
3000 adapter->rx_buffer_area = NULL;
3001 return error;
9ccd8c1f 3002 }
9c80d176
SZ
3003
3004 /*
3005 * Create DMA maps for rx buffers
3006 */
3007 for (i = 0; i < adapter->num_rx_desc; i++) {
3008 rx_buffer = &adapter->rx_buffer_area[i];
3009
3010 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
9ccd8c1f 3011 &rx_buffer->map);
87307ba1 3012 if (error) {
9c80d176
SZ
3013 device_printf(dev, "Unable to create RX DMA map\n");
3014 em_destroy_rx_ring(adapter, i);
3015 return error;
9ccd8c1f 3016 }
984263bc 3017 }
87307ba1 3018 return (0);
984263bc
MD
3019}
3020
984263bc 3021static int
9c80d176 3022em_init_rx_ring(struct adapter *adapter)
984263bc 3023{
9c80d176 3024 int i, error;
984263bc 3025
9c80d176 3026 /* Reset descriptor ring */
87307ba1 3027 bzero(adapter->rx_desc_base,
9c80d176 3028 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
87307ba1 3029
9c80d176
SZ
3030 /* Allocate new ones. */
3031 for (i = 0; i < adapter->num_rx_desc; i++) {
3032 error = em_newbuf(adapter, i, 1);
3033 if (error)
3034 return (error);
3035 }
984263bc
MD
3036
3037 /* Setup our descriptor pointers */
f647ad3d 3038 adapter->next_rx_desc_to_check = 0;
87307ba1
SZ
3039
3040 return (0);
984263bc
MD
3041}
3042
984263bc 3043static void
9c80d176 3044em_init_rx_unit(struct adapter *adapter)
984263bc 3045{
9c80d176 3046 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 3047 uint64_t bus_addr;
9c80d176 3048 uint32_t rctl, rxcsum;
984263bc 3049
87307ba1
SZ
3050 /*
3051 * Make sure receives are disabled while setting
3052 * up the descriptor ring
3053 */
9c80d176
SZ
3054 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3055 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
984263bc 3056
9c80d176 3057 if (adapter->hw.mac.type >= e1000_82540) {
9c80d176
SZ
3058 /*
3059 * Set the interrupt throttling rate. Value is calculated
3060 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3061 */
3062 if (adapter->int_throttle_ceil) {
3063 E1000_WRITE_REG(&adapter->hw, E1000_ITR,
3064 1000000000 / 256 / adapter->int_throttle_ceil);
d0870c72 3065 } else {
9c80d176 3066 E1000_WRITE_REG(&adapter->hw, E1000_ITR, 0);
d0870c72 3067 }
f647ad3d 3068 }
984263bc 3069
9c80d176
SZ
3070 /* Disable accelerated ackknowledge */
3071 if (adapter->hw.mac.type == e1000_82574) {
3072 E1000_WRITE_REG(&adapter->hw,
3073 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3074 }
3075
984263bc 3076 /* Setup the Base and Length of the Rx Descriptor Ring */
9ccd8c1f 3077 bus_addr = adapter->rxdma.dma_paddr;
9c80d176
SZ
3078 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3079 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3080 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3081 (uint32_t)(bus_addr >> 32));
3082 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3083 (uint32_t)bus_addr);
984263bc 3084
984263bc 3085 /* Setup the Receive Control Register */
9c80d176
SZ
3086 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3087 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3088 E1000_RCTL_RDMTS_HALF |
3089 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
984263bc 3090
9c80d176
SZ
3091 /* Make sure VLAN Filters are off */
3092 rctl &= ~E1000_RCTL_VFE;
3093
3094 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3095 rctl |= E1000_RCTL_SBP;
3096 else
3097 rctl &= ~E1000_RCTL_SBP;
984263bc 3098
984263bc
MD
3099 switch (adapter->rx_buffer_len) {
3100 default:
9c80d176
SZ
3101 case 2048:
3102 rctl |= E1000_RCTL_SZ_2048;
3103 break;
3104
3105 case 4096:
3106 rctl |= E1000_RCTL_SZ_4096 |
3107 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3108 break;
9c80d176
SZ
3109
3110 case 8192:
3111 rctl |= E1000_RCTL_SZ_8192 |
3112 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3113 break;
9c80d176
SZ
3114
3115 case 16384:
3116 rctl |= E1000_RCTL_SZ_16384 |
3117 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc
MD
3118 break;
3119 }
3120
3121 if (ifp->if_mtu > ETHERMTU)
9c80d176
SZ
3122 rctl |= E1000_RCTL_LPE;
3123 else
3124 rctl &= ~E1000_RCTL_LPE;
984263bc 3125
9c80d176
SZ
3126 /* Receive Checksum Offload for TCP and UDP */
3127 if (ifp->if_capenable & IFCAP_RXCSUM) {
3128 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3129 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3130 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
984263bc
MD
3131 }
3132
9c80d176
SZ
3133 /*
3134 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3135 * long latencies are observed, like Lenovo X60. This
3136 * change eliminates the problem, but since having positive
3137 * values in RDTR is a known source of problems on other
3138 * platforms another solution is being sought.
3139 */
17e92400 3140 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) {
2cc36323
SZ
3141 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573);
3142 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573);
17e92400 3143 }
b7f8f318 3144
984263bc 3145 /* Enable Receives */
9c80d176 3146 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
87307ba1 3147
9c80d176
SZ
3148 /*
3149 * Setup the HW Rx Head and Tail Descriptor Pointers
3150 */
3151 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3152 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
984263bc
MD
3153}
3154
984263bc 3155static void
9c80d176 3156em_destroy_rx_ring(struct adapter *adapter, int ndesc)
984263bc 3157{
f647ad3d
JS
3158 struct em_buffer *rx_buffer;
3159 int i;
984263bc 3160
9c80d176
SZ
3161 if (adapter->rx_buffer_area == NULL)
3162 return;
984263bc 3163
9c80d176
SZ
3164 for (i = 0; i < ndesc; i++) {
3165 rx_buffer = &adapter->rx_buffer_area[i];
3166
3167 KKASSERT(rx_buffer->m_head == NULL);
3168 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
9ccd8c1f 3169 }
9c80d176
SZ
3170 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3171 bus_dma_tag_destroy(adapter->rxtag);
3172
3173 kfree(adapter->rx_buffer_area, M_DEVBUF);
3174 adapter->rx_buffer_area = NULL;
984263bc
MD
3175}
3176
984263bc 3177static void
87307ba1 3178em_rxeof(struct adapter *adapter, int count)
984263bc 3179{
9c80d176
SZ
3180 struct ifnet *ifp = &adapter->arpcom.ac_if;
3181 uint8_t status, accept_frame = 0, eop = 0;
f647ad3d 3182 uint16_t len, desc_len, prev_len_adj;
9c80d176
SZ
3183 struct e1000_rx_desc *current_desc;
3184 struct mbuf *mp;
f647ad3d 3185 int i;
68b67450 3186 struct mbuf_chain chain[MAXCPU];
984263bc 3187
984263bc 3188 i = adapter->next_rx_desc_to_check;
f647ad3d 3189 current_desc = &adapter->rx_desc_base[i];
984263bc 3190
87307ba1 3191 if (!(current_desc->status & E1000_RXD_STAT_DD))
984263bc 3192 return;
7ea52455 3193
57ccf5a2 3194 ether_input_chain_init(chain);
68b67450 3195
87307ba1 3196 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) {
9c80d176
SZ
3197 struct mbuf *m = NULL;
3198
07855a48 3199 logif(pkt_receive);
9c80d176 3200
984263bc 3201 mp = adapter->rx_buffer_area[i].m_head;
9c80d176
SZ
3202
3203 /*
3204 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
3205 * needs to access the last received byte in the mbuf.
3206 */
9ccd8c1f
JS
3207 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
3208 BUS_DMASYNC_POSTREAD);
984263bc
MD
3209
3210 accept_frame = 1;
0d366ee7 3211 prev_len_adj = 0;
9ccd8c1f 3212 desc_len = le16toh(current_desc->length);
9c80d176
SZ
3213 status = current_desc->status;
3214 if (status & E1000_RXD_STAT_EOP) {
984263bc
MD
3215 count--;
3216 eop = 1;
0d366ee7
MD
3217 if (desc_len < ETHER_CRC_LEN) {
3218 len = 0;
3219 prev_len_adj = ETHER_CRC_LEN - desc_len;
7ea52455 3220 } else {
0d366ee7
MD
3221 len = desc_len - ETHER_CRC_LEN;
3222 }
984263bc
MD
3223 } else {
3224 eop = 0;
9ccd8c1f 3225 len = desc_len;
984263bc
MD
3226 }
3227
3228 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
9c80d176 3229 uint8_t last_byte;
9ccd8c1f 3230 uint32_t pkt_len = desc_len;
984263bc
MD
3231
3232 if (adapter->fmp != NULL)
9c80d176 3233 pkt_len += adapter->fmp->m_pkthdr.len;
f647ad3d 3234
9ccd8c1f 3235 last_byte = *(mtod(mp, caddr_t) + desc_len - 1);
9c80d176
SZ
3236 if (TBI_ACCEPT(&adapter->hw, status,
3237 current_desc->errors, pkt_len, last_byte,
3238 adapter->min_frame_size, adapter->max_frame_size)) {
3239 e1000_tbi_adjust_stats_82543(&adapter->hw,
3240 &adapter->stats, pkt_len,
3241 adapter->hw.mac.addr,
3242 adapter->max_frame_size);
f647ad3d
JS
3243 if (len > 0)
3244 len--;
7ea52455 3245 } else {
984263bc
MD
3246 accept_frame = 0;
3247 }
3248 }
3249
3250 if (accept_frame) {
9c80d176
SZ
3251 if (em_newbuf(adapter, i, 0) != 0) {
3252 ifp->if_iqdrops++;
3253 goto discard;
984263bc
MD
3254 }
3255
3256 /* Assign correct length to the current fragment */
3257 mp->m_len = len;
3258
3259 if (adapter->fmp == NULL) {
3260 mp->m_pkthdr.len = len;
9c80d176 3261 adapter->fmp = mp; /* Store the first mbuf */
984263bc
MD
3262 adapter->lmp = mp;
3263 } else {
9c80d176
SZ
3264 /*
3265 * Chain mbuf's together
3266 */
3267
3268 /*
87307ba1
SZ
3269 * Adjust length of previous mbuf in chain if
3270 * we received less than 4 bytes in the last
3271 * descriptor.
0d366ee7
MD
3272 */
3273 if (prev_len_adj > 0) {
3274 adapter->lmp->m_len -= prev_len_adj;
9c80d176
SZ
3275 adapter->fmp->m_pkthdr.len -=
3276 prev_len_adj;
0d366ee7 3277 }
984263bc
MD
3278 adapter->lmp->m_next = mp;
3279<