em: Change TXCSUM does not require reinitialize hardware
[dragonfly.git] / sys / dev / netif / em / if_em.c
CommitLineData
78195a76 1/*
78195a76
MD
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
9c80d176 4 * Copyright (c) 2001-2008, Intel Corporation
78195a76
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9c80d176 9 *
78195a76
MD
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
9c80d176 12 *
78195a76
MD
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
9c80d176 16 *
78195a76
MD
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
9c80d176 20 *
78195a76
MD
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
9c80d176 35 *
78195a76
MD
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
9c80d176 38 *
78195a76
MD
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
9c80d176 42 *
78195a76
MD
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
9c80d176 52 *
78195a76
MD
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
9c80d176 65 *
78195a76
MD
66 */
67/*
68 * SERIALIZATION API RULES:
69 *
70 * - If the driver uses the same serializer for the interrupt as for the
71 * ifnet, most of the serialization will be done automatically for the
9c80d176 72 * driver.
78195a76
MD
73 *
74 * - ifmedia entry points will be serialized by the ifmedia code using the
75 * ifnet serializer.
76 *
77 * - if_* entry points except for if_input will be serialized by the IF
78 * and protocol layers.
79 *
80 * - The device driver must be sure to serialize access from timeout code
81 * installed by the device driver.
82 *
83 * - The device driver typically holds the serializer at the time it wishes
9c80d176
SZ
84 * to call if_input.
85 *
86 * - We must call lwkt_serialize_handler_enable() prior to enabling the
87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling
88 * the hardware interrupt in order to avoid handler execution races from
89 * scheduled interrupt threads.
78195a76
MD
90 *
91 * NOTE! Since callers into the device driver hold the ifnet serializer,
92 * the device driver may be holding a serializer at the time it calls
93 * if_input even if it is not serializer-aware.
94 */
2b71c8f1
SZ
95
96#include "opt_polling.h"
87307ba1
SZ
97
98#include <sys/param.h>
99#include <sys/bus.h>
100#include <sys/endian.h>
9db4b353 101#include <sys/interrupt.h>
87307ba1
SZ
102#include <sys/kernel.h>
103#include <sys/ktr.h>
104#include <sys/malloc.h>
105#include <sys/mbuf.h>
9c80d176 106#include <sys/proc.h>
87307ba1
SZ
107#include <sys/rman.h>
108#include <sys/serialize.h>
109#include <sys/socket.h>
110#include <sys/sockio.h>
111#include <sys/sysctl.h>
9c80d176 112#include <sys/systm.h>
87307ba1
SZ
113
114#include <net/bpf.h>
115#include <net/ethernet.h>
116#include <net/if.h>
117#include <net/if_arp.h>
118#include <net/if_dl.h>
119#include <net/if_media.h>
87307ba1
SZ
120#include <net/ifq_var.h>
121#include <net/vlan/if_vlan_var.h>
b637f170 122#include <net/vlan/if_vlan_ether.h>
87307ba1 123
87307ba1 124#include <netinet/in_systm.h>
9c80d176 125#include <netinet/in.h>
87307ba1
SZ
126#include <netinet/ip.h>
127#include <netinet/tcp.h>
128#include <netinet/udp.h>
984263bc 129
9c80d176
SZ
130#include <bus/pci/pcivar.h>
131#include <bus/pci/pcireg.h>
984263bc 132
9c80d176
SZ
133#include <dev/netif/ig_hal/e1000_api.h>
134#include <dev/netif/ig_hal/e1000_82571.h>
135#include <dev/netif/em/if_em.h>
984263bc 136
9c80d176 137#define EM_NAME "Intel(R) PRO/1000 Network Connection "
6d5e2922 138#define EM_VER " 7.2.4"
9c80d176 139
96ced48a
SZ
140#define _EM_DEVICE(id, ret) \
141 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER }
142#define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100)
143#define EM_DEVICE(id) _EM_DEVICE(id, 0)
144#define EM_DEVICE_NULL { 0, 0, 0, NULL }
9c80d176
SZ
145
146static const struct em_vendor_info em_vendor_info_array[] = {
147 EM_DEVICE(82540EM),
148 EM_DEVICE(82540EM_LOM),
149 EM_DEVICE(82540EP),
150 EM_DEVICE(82540EP_LOM),
151 EM_DEVICE(82540EP_LP),
152
153 EM_DEVICE(82541EI),
154 EM_DEVICE(82541ER),
155 EM_DEVICE(82541ER_LOM),
156 EM_DEVICE(82541EI_MOBILE),
157 EM_DEVICE(82541GI),
158 EM_DEVICE(82541GI_LF),
159 EM_DEVICE(82541GI_MOBILE),
160
161 EM_DEVICE(82542),
162
163 EM_DEVICE(82543GC_FIBER),
164 EM_DEVICE(82543GC_COPPER),
165
166 EM_DEVICE(82544EI_COPPER),
167 EM_DEVICE(82544EI_FIBER),
168 EM_DEVICE(82544GC_COPPER),
169 EM_DEVICE(82544GC_LOM),
170
171 EM_DEVICE(82545EM_COPPER),
172 EM_DEVICE(82545EM_FIBER),
173 EM_DEVICE(82545GM_COPPER),
174 EM_DEVICE(82545GM_FIBER),
175 EM_DEVICE(82545GM_SERDES),
176
177 EM_DEVICE(82546EB_COPPER),
178 EM_DEVICE(82546EB_FIBER),
179 EM_DEVICE(82546EB_QUAD_COPPER),
180 EM_DEVICE(82546GB_COPPER),
181 EM_DEVICE(82546GB_FIBER),
182 EM_DEVICE(82546GB_SERDES),
183 EM_DEVICE(82546GB_PCIE),
184 EM_DEVICE(82546GB_QUAD_COPPER),
185 EM_DEVICE(82546GB_QUAD_COPPER_KSP3),
186
187 EM_DEVICE(82547EI),
188 EM_DEVICE(82547EI_MOBILE),
189 EM_DEVICE(82547GI),
190
96ced48a
SZ
191 EM_EMX_DEVICE(82571EB_COPPER),
192 EM_EMX_DEVICE(82571EB_FIBER),
193 EM_EMX_DEVICE(82571EB_SERDES),
194 EM_EMX_DEVICE(82571EB_SERDES_DUAL),
195 EM_EMX_DEVICE(82571EB_SERDES_QUAD),
196 EM_EMX_DEVICE(82571EB_QUAD_COPPER),
75a5634e 197 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP),
96ced48a
SZ
198 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP),
199 EM_EMX_DEVICE(82571EB_QUAD_FIBER),
200 EM_EMX_DEVICE(82571PT_QUAD_COPPER),
201
202 EM_EMX_DEVICE(82572EI_COPPER),
203 EM_EMX_DEVICE(82572EI_FIBER),
204 EM_EMX_DEVICE(82572EI_SERDES),
205 EM_EMX_DEVICE(82572EI),
206
207 EM_EMX_DEVICE(82573E),
208 EM_EMX_DEVICE(82573E_IAMT),
209 EM_EMX_DEVICE(82573L),
210
2d0e5700
SZ
211 EM_DEVICE(82583V),
212
96ced48a
SZ
213 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT),
214 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT),
215 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT),
216 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT),
9c80d176
SZ
217
218 EM_DEVICE(ICH8_IGP_M_AMT),
219 EM_DEVICE(ICH8_IGP_AMT),
220 EM_DEVICE(ICH8_IGP_C),
221 EM_DEVICE(ICH8_IFE),
222 EM_DEVICE(ICH8_IFE_GT),
223 EM_DEVICE(ICH8_IFE_G),
224 EM_DEVICE(ICH8_IGP_M),
2d0e5700 225 EM_DEVICE(ICH8_82567V_3),
9c80d176
SZ
226
227 EM_DEVICE(ICH9_IGP_M_AMT),
228 EM_DEVICE(ICH9_IGP_AMT),
229 EM_DEVICE(ICH9_IGP_C),
230 EM_DEVICE(ICH9_IGP_M),
231 EM_DEVICE(ICH9_IGP_M_V),
232 EM_DEVICE(ICH9_IFE),
233 EM_DEVICE(ICH9_IFE_GT),
234 EM_DEVICE(ICH9_IFE_G),
235 EM_DEVICE(ICH9_BM),
236
96ced48a 237 EM_EMX_DEVICE(82574L),
2d0e5700 238 EM_EMX_DEVICE(82574LA),
9c80d176
SZ
239
240 EM_DEVICE(ICH10_R_BM_LM),
241 EM_DEVICE(ICH10_R_BM_LF),
242 EM_DEVICE(ICH10_R_BM_V),
243 EM_DEVICE(ICH10_D_BM_LM),
244 EM_DEVICE(ICH10_D_BM_LF),
2d0e5700
SZ
245 EM_DEVICE(ICH10_D_BM_V),
246
247 EM_DEVICE(PCH_M_HV_LM),
248 EM_DEVICE(PCH_M_HV_LC),
249 EM_DEVICE(PCH_D_HV_DM),
250 EM_DEVICE(PCH_D_HV_DC),
251
252 EM_DEVICE(PCH2_LV_LM),
253 EM_DEVICE(PCH2_LV_V),
984263bc 254
f647ad3d 255 /* required last entry */
9c80d176 256 EM_DEVICE_NULL
984263bc
MD
257};
258
f647ad3d
JS
259static int em_probe(device_t);
260static int em_attach(device_t);
261static int em_detach(device_t);
262static int em_shutdown(device_t);
87307ba1
SZ
263static int em_suspend(device_t);
264static int em_resume(device_t);
9c80d176
SZ
265
266static void em_init(void *);
267static void em_stop(struct adapter *);
f647ad3d 268static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
9c80d176
SZ
269static void em_start(struct ifnet *);
270#ifdef DEVICE_POLLING
271static void em_poll(struct ifnet *, enum poll_cmd, int);
272#endif
f647ad3d 273static void em_watchdog(struct ifnet *);
f647ad3d
JS
274static void em_media_status(struct ifnet *, struct ifmediareq *);
275static int em_media_change(struct ifnet *);
9c80d176
SZ
276static void em_timer(void *);
277
278static void em_intr(void *);
87ab432b
SZ
279static void em_intr_mask(void *);
280static void em_intr_body(struct adapter *, boolean_t);
9c80d176
SZ
281static void em_rxeof(struct adapter *, int);
282static void em_txeof(struct adapter *);
9f60d74b 283static void em_tx_collect(struct adapter *);
9c80d176 284static void em_tx_purge(struct adapter *);
f647ad3d
JS
285static void em_enable_intr(struct adapter *);
286static void em_disable_intr(struct adapter *);
9c80d176
SZ
287
288static int em_dma_malloc(struct adapter *, bus_size_t,
289 struct em_dma_alloc *);
290static void em_dma_free(struct adapter *, struct em_dma_alloc *);
291static void em_init_tx_ring(struct adapter *);
292static int em_init_rx_ring(struct adapter *);
293static int em_create_tx_ring(struct adapter *);
294static int em_create_rx_ring(struct adapter *);
295static void em_destroy_tx_ring(struct adapter *, int);
296static void em_destroy_rx_ring(struct adapter *, int);
297static int em_newbuf(struct adapter *, int, int);
298static int em_encap(struct adapter *, struct mbuf **);
299static void em_rxcsum(struct adapter *, struct e1000_rx_desc *,
300 struct mbuf *);
002b3a05 301static int em_txcsum_pullup(struct adapter *, struct mbuf **);
9f60d74b 302static int em_txcsum(struct adapter *, struct mbuf *,
9c80d176
SZ
303 uint32_t *, uint32_t *);
304
305static int em_get_hw_info(struct adapter *);
306static int em_is_valid_eaddr(const uint8_t *);
307static int em_alloc_pci_res(struct adapter *);
308static void em_free_pci_res(struct adapter *);
2d0e5700 309static int em_reset(struct adapter *);
9c80d176
SZ
310static void em_setup_ifp(struct adapter *);
311static void em_init_tx_unit(struct adapter *);
312static void em_init_rx_unit(struct adapter *);
313static void em_update_stats(struct adapter *);
f647ad3d
JS
314static void em_set_promisc(struct adapter *);
315static void em_disable_promisc(struct adapter *);
316static void em_set_multi(struct adapter *);
87307ba1 317static void em_update_link_status(struct adapter *);
f647ad3d 318static void em_smartspeed(struct adapter *);
2d0e5700 319static void em_set_itr(struct adapter *, uint32_t);
6d5e2922 320static void em_disable_aspm(struct adapter *);
9c80d176
SZ
321
322/* Hardware workarounds */
f647ad3d
JS
323static int em_82547_fifo_workaround(struct adapter *, int);
324static void em_82547_update_fifo_head(struct adapter *, int);
325static int em_82547_tx_fifo_reset(struct adapter *);
1eca7b82
SZ
326static void em_82547_move_tail(void *);
327static void em_82547_move_tail_serialized(struct adapter *);
9c80d176
SZ
328static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY);
329
f647ad3d 330static void em_print_debug_info(struct adapter *);
9c80d176
SZ
331static void em_print_nvm_info(struct adapter *);
332static void em_print_hw_stats(struct adapter *);
333
f647ad3d
JS
334static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
335static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
d0870c72 336static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
9f60d74b 337static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS);
9c80d176 338static void em_add_sysctl(struct adapter *adapter);
984263bc 339
9c80d176
SZ
340/* Management and WOL Support */
341static void em_get_mgmt(struct adapter *);
342static void em_rel_mgmt(struct adapter *);
343static void em_get_hw_control(struct adapter *);
344static void em_rel_hw_control(struct adapter *);
345static void em_enable_wol(device_t);
984263bc
MD
346
347static device_method_t em_methods[] = {
348 /* Device interface */
9c80d176
SZ
349 DEVMETHOD(device_probe, em_probe),
350 DEVMETHOD(device_attach, em_attach),
351 DEVMETHOD(device_detach, em_detach),
352 DEVMETHOD(device_shutdown, em_shutdown),
353 DEVMETHOD(device_suspend, em_suspend),
354 DEVMETHOD(device_resume, em_resume),
355 { 0, 0 }
984263bc
MD
356};
357
358static driver_t em_driver = {
9c80d176
SZ
359 "em",
360 em_methods,
361 sizeof(struct adapter),
984263bc
MD
362};
363
364static devclass_t em_devclass;
32832096
MD
365
366DECLARE_DUMMY_MODULE(if_em);
9c80d176 367MODULE_DEPEND(em, ig_hal, 1, 1, 1);
aa2b9d05 368DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL);
984263bc 369
91e8debf
SZ
370/*
371 * Tunables
372 */
9c80d176
SZ
373static int em_int_throttle_ceil = EM_DEFAULT_ITR;
374static int em_rxd = EM_DEFAULT_RXD;
375static int em_txd = EM_DEFAULT_TXD;
053f3ae6 376static int em_smart_pwr_down = 0;
0d366ee7 377
9c80d176
SZ
378/* Controls whether promiscuous also shows bad packets */
379static int em_debug_sbp = FALSE;
0d366ee7 380
053f3ae6
SZ
381static int em_82573_workaround = 1;
382static int em_msi_enable = 1;
05580856 383
d0870c72 384TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
1eca7b82
SZ
385TUNABLE_INT("hw.em.rxd", &em_rxd);
386TUNABLE_INT("hw.em.txd", &em_txd);
387TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
9c80d176 388TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
05580856 389TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround);
053f3ae6 390TUNABLE_INT("hw.em.msi.enable", &em_msi_enable);
9c80d176
SZ
391
392/* Global used in WOL setup with multiport cards */
393static int em_global_quad_port_a = 0;
394
395/* Set this to one to display debug statistics */
396static int em_display_debug_stats = 0;
0d366ee7 397
07855a48
MD
398#if !defined(KTR_IF_EM)
399#define KTR_IF_EM KTR_ALL
400#endif
401KTR_INFO_MASTER(if_em);
5bf48697
AE
402KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin");
403KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end");
404KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet");
405KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet");
406KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean");
07855a48
MD
407#define logif(name) KTR_LOG(if_em_ ## name)
408
984263bc
MD
409static int
410em_probe(device_t dev)
411{
9c80d176
SZ
412 const struct em_vendor_info *ent;
413 uint16_t vid, did;
984263bc 414
9c80d176
SZ
415 vid = pci_get_vendor(dev);
416 did = pci_get_device(dev);
984263bc 417
9c80d176
SZ
418 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) {
419 if (vid == ent->vendor_id && did == ent->device_id) {
420 device_set_desc(dev, ent->desc);
dbcd0c9b 421 device_set_async_attach(dev, TRUE);
96ced48a 422 return (ent->ret);
984263bc 423 }
984263bc 424 }
87307ba1 425 return (ENXIO);
984263bc
MD
426}
427
984263bc
MD
428static int
429em_attach(device_t dev)
430{
9c80d176
SZ
431 struct adapter *adapter = device_get_softc(dev);
432 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d
JS
433 int tsize, rsize;
434 int error = 0;
2d0e5700 435 uint16_t eeprom_data, device_id, apme_mask;
87ab432b 436 driver_intr_t *intr_func;
984263bc 437
9c80d176 438 adapter->dev = adapter->osdep.dev = dev;
f647ad3d 439
bf0ecf68
MD
440 callout_init_mp(&adapter->timer);
441 callout_init_mp(&adapter->tx_fifo_timer);
af82d4bb 442
9c80d176
SZ
443 /* Determine hardware and mac info */
444 error = em_get_hw_info(adapter);
445 if (error) {
446 device_printf(dev, "Identify hardware failed\n");
447 goto fail;
f647ad3d
JS
448 }
449
9c80d176
SZ
450 /* Setup PCI resources */
451 error = em_alloc_pci_res(adapter);
452 if (error) {
453 device_printf(dev, "Allocation of PCI resources failed\n");
454 goto fail;
455 }
984263bc 456
9c80d176
SZ
457 /*
458 * For ICH8 and family we need to map the flash memory,
459 * and this must happen after the MAC is identified.
460 */
461 if (adapter->hw.mac.type == e1000_ich8lan ||
2d0e5700 462 adapter->hw.mac.type == e1000_ich9lan ||
9c80d176 463 adapter->hw.mac.type == e1000_ich10lan ||
2d0e5700
SZ
464 adapter->hw.mac.type == e1000_pchlan ||
465 adapter->hw.mac.type == e1000_pch2lan) {
9c80d176
SZ
466 adapter->flash_rid = EM_BAR_FLASH;
467
468 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
469 &adapter->flash_rid, RF_ACTIVE);
470 if (adapter->flash == NULL) {
471 device_printf(dev, "Mapping of Flash failed\n");
472 error = ENXIO;
473 goto fail;
474 }
475 adapter->osdep.flash_bus_space_tag =
476 rman_get_bustag(adapter->flash);
477 adapter->osdep.flash_bus_space_handle =
478 rman_get_bushandle(adapter->flash);
984263bc 479
9c80d176
SZ
480 /*
481 * This is used in the shared code
482 * XXX this goof is actually not used.
483 */
484 adapter->hw.flash_address = (uint8_t *)adapter->flash;
485 }
0d366ee7 486
9c80d176
SZ
487 /* Do Shared Code initialization */
488 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
489 device_printf(dev, "Setup of Shared code failed\n");
490 error = ENXIO;
491 goto fail;
f647ad3d 492 }
7ea52455 493
9c80d176
SZ
494 e1000_get_bus_info(&adapter->hw);
495
1eca7b82 496 /*
9c80d176 497 * Validate number of transmit and receive descriptors. It
1eca7b82 498 * must not exceed hardware maximum, and must be multiple
9c80d176 499 * of E1000_DBA_ALIGN.
1eca7b82 500 */
9c80d176
SZ
501 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 ||
502 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
503 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
504 em_txd < EM_MIN_TXD) {
1eca7b82 505 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
9c80d176 506 EM_DEFAULT_TXD, em_txd);
1eca7b82
SZ
507 adapter->num_tx_desc = EM_DEFAULT_TXD;
508 } else {
509 adapter->num_tx_desc = em_txd;
510 }
9c80d176
SZ
511 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 ||
512 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
513 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
514 em_rxd < EM_MIN_RXD) {
1eca7b82 515 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
9c80d176 516 EM_DEFAULT_RXD, em_rxd);
1eca7b82
SZ
517 adapter->num_rx_desc = EM_DEFAULT_RXD;
518 } else {
519 adapter->num_rx_desc = em_rxd;
520 }
521
9c80d176
SZ
522 adapter->hw.mac.autoneg = DO_AUTO_NEG;
523 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
524 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
525 adapter->rx_buffer_len = MCLBYTES;
e94c2bf4 526
9c80d176
SZ
527 /*
528 * Interrupt throttle rate
529 */
530 if (em_int_throttle_ceil == 0) {
531 adapter->int_throttle_ceil = 0;
532 } else {
533 int throttle = em_int_throttle_ceil;
f647ad3d 534
9c80d176
SZ
535 if (throttle < 0)
536 throttle = EM_DEFAULT_ITR;
0d366ee7 537
9c80d176
SZ
538 /* Recalculate the tunable value to get the exact frequency. */
539 throttle = 1000000000 / 256 / throttle;
664c7645
SZ
540
541 /* Upper 16bits of ITR is reserved and should be zero */
542 if (throttle & 0xffff0000)
543 throttle = 1000000000 / 256 / EM_DEFAULT_ITR;
544
9c80d176
SZ
545 adapter->int_throttle_ceil = 1000000000 / 256 / throttle;
546 }
984263bc 547
9c80d176
SZ
548 e1000_init_script_state_82541(&adapter->hw, TRUE);
549 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
550
551 /* Copper options */
552 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
553 adapter->hw.phy.mdix = AUTO_ALL_MODES;
554 adapter->hw.phy.disable_polarity_correction = FALSE;
555 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
556 }
557
558 /* Set the frame limits assuming standard ethernet sized frames. */
559 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
560 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN;
984263bc 561
9c80d176
SZ
562 /* This controls when hardware reports transmit completion status. */
563 adapter->hw.mac.report_tx_early = 1;
984263bc 564
87307ba1 565 /*
9c80d176 566 * Create top level busdma tag
984263bc 567 */
9c80d176
SZ
568 error = bus_dma_tag_create(NULL, 1, 0,
569 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
570 NULL, NULL,
571 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
572 0, &adapter->parent_dtag);
573 if (error) {
574 device_printf(dev, "could not create top level DMA tag\n");
af82d4bb 575 goto fail;
9c80d176 576 }
af82d4bb 577
9c80d176
SZ
578 /*
579 * Allocate Transmit Descriptor ring
580 */
581 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
1eca7b82 582 EM_DBA_ALIGN);
87307ba1
SZ
583 error = em_dma_malloc(adapter, tsize, &adapter->txdma);
584 if (error) {
9c80d176 585 device_printf(dev, "Unable to allocate tx_desc memory\n");
af82d4bb 586 goto fail;
984263bc 587 }
9c80d176 588 adapter->tx_desc_base = adapter->txdma.dma_vaddr;
984263bc 589
9c80d176
SZ
590 /*
591 * Allocate Receive Descriptor ring
592 */
593 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
1eca7b82 594 EM_DBA_ALIGN);
87307ba1
SZ
595 error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
596 if (error) {
9ccd8c1f 597 device_printf(dev, "Unable to allocate rx_desc memory\n");
af82d4bb 598 goto fail;
984263bc 599 }
9c80d176
SZ
600 adapter->rx_desc_base = adapter->rxdma.dma_vaddr;
601
2d0e5700
SZ
602 /* Allocate multicast array memory. */
603 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
604 M_DEVBUF, M_WAITOK);
605
606 /* Indicate SOL/IDER usage */
607 if (e1000_check_reset_block(&adapter->hw)) {
608 device_printf(dev,
609 "PHY reset is blocked due to SOL/IDER session.\n");
610 }
611
612 /*
613 * Start from a known state, this is important in reading the
614 * nvm and mac from that.
615 */
616 e1000_reset_hw(&adapter->hw);
617
9c80d176
SZ
618 /* Make sure we have a good EEPROM before we read from it */
619 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
620 /*
621 * Some PCI-E parts fail the first check due to
622 * the link being in sleep state, call it again,
623 * if it fails a second time its a real issue.
624 */
625 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
626 device_printf(dev,
627 "The EEPROM Checksum Is Not Valid\n");
628 error = EIO;
629 goto fail;
630 }
631 }
984263bc 632
984263bc 633 /* Copy the permanent MAC address out of the EEPROM */
9c80d176
SZ
634 if (e1000_read_mac_addr(&adapter->hw) < 0) {
635 device_printf(dev, "EEPROM read error while reading MAC"
636 " address\n");
984263bc 637 error = EIO;
af82d4bb 638 goto fail;
984263bc 639 }
9c80d176 640 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) {
87307ba1 641 device_printf(dev, "Invalid MAC address\n");
984263bc 642 error = EIO;
af82d4bb 643 goto fail;
984263bc
MD
644 }
645
9c80d176
SZ
646 /* Allocate transmit descriptors and buffers */
647 error = em_create_tx_ring(adapter);
648 if (error) {
649 device_printf(dev, "Could not setup transmit structures\n");
650 goto fail;
651 }
652
653 /* Allocate receive descriptors and buffers */
654 error = em_create_rx_ring(adapter);
655 if (error) {
656 device_printf(dev, "Could not setup receive structures\n");
657 goto fail;
658 }
659
660 /* Manually turn off all interrupts */
661 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
662
9c80d176 663 /* Determine if we have to control management hardware */
79878f87
SZ
664 if (e1000_enable_mng_pass_thru(&adapter->hw))
665 adapter->flags |= EM_FLAG_HAS_MGMT;
9c80d176
SZ
666
667 /*
668 * Setup Wake-on-Lan
669 */
2d0e5700
SZ
670 apme_mask = EM_EEPROM_APME;
671 eeprom_data = 0;
9c80d176
SZ
672 switch (adapter->hw.mac.type) {
673 case e1000_82542:
674 case e1000_82543:
675 break;
676
2d0e5700
SZ
677 case e1000_82573:
678 case e1000_82583:
79878f87 679 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700
SZ
680 /* FALL THROUGH */
681
9c80d176
SZ
682 case e1000_82546:
683 case e1000_82546_rev_3:
684 case e1000_82571:
2d0e5700 685 case e1000_82572:
9c80d176
SZ
686 case e1000_80003es2lan:
687 if (adapter->hw.bus.func == 1) {
688 e1000_read_nvm(&adapter->hw,
689 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
690 } else {
691 e1000_read_nvm(&adapter->hw,
692 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
693 }
2d0e5700
SZ
694 break;
695
696 case e1000_ich8lan:
697 case e1000_ich9lan:
698 case e1000_ich10lan:
699 case e1000_pchlan:
700 case e1000_pch2lan:
701 apme_mask = E1000_WUC_APME;
79878f87 702 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700 703 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
9c80d176
SZ
704 break;
705
706 default:
2d0e5700
SZ
707 e1000_read_nvm(&adapter->hw,
708 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
9c80d176
SZ
709 break;
710 }
2d0e5700
SZ
711 if (eeprom_data & apme_mask)
712 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
713
9c80d176
SZ
714 /*
715 * We have the eeprom settings, now apply the special cases
716 * where the eeprom may be wrong or the board won't support
717 * wake on lan on a particular port
718 */
719 device_id = pci_get_device(dev);
720 switch (device_id) {
721 case E1000_DEV_ID_82546GB_PCIE:
722 adapter->wol = 0;
723 break;
724
725 case E1000_DEV_ID_82546EB_FIBER:
726 case E1000_DEV_ID_82546GB_FIBER:
727 case E1000_DEV_ID_82571EB_FIBER:
728 /*
729 * Wake events only supported on port A for dual fiber
730 * regardless of eeprom setting
731 */
732 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
733 E1000_STATUS_FUNC_1)
734 adapter->wol = 0;
735 break;
736
737 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
738 case E1000_DEV_ID_82571EB_QUAD_COPPER:
739 case E1000_DEV_ID_82571EB_QUAD_FIBER:
740 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
741 /* if quad port adapter, disable WoL on all but port A */
742 if (em_global_quad_port_a != 0)
743 adapter->wol = 0;
744 /* Reset for multiple quad port adapters */
745 if (++em_global_quad_port_a == 4)
746 em_global_quad_port_a = 0;
747 break;
748 }
749
750 /* XXX disable wol */
751 adapter->wol = 0;
752
2d0e5700
SZ
753 /* Setup OS specific network interface */
754 em_setup_ifp(adapter);
755
756 /* Add sysctl tree, must after em_setup_ifp() */
757 em_add_sysctl(adapter);
758
759 /* Reset the hardware */
760 error = em_reset(adapter);
761 if (error) {
762 device_printf(dev, "Unable to reset the hardware\n");
763 goto fail;
764 }
765
766 /* Initialize statistics */
767 em_update_stats(adapter);
768
769 adapter->hw.mac.get_link_status = 1;
770 em_update_link_status(adapter);
771
9c80d176
SZ
772 /* Do we need workaround for 82544 PCI-X adapter? */
773 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
774 adapter->hw.mac.type == e1000_82544)
f647ad3d 775 adapter->pcix_82544 = TRUE;
87307ba1 776 else
f647ad3d 777 adapter->pcix_82544 = FALSE;
af82d4bb 778
9c80d176
SZ
779 if (adapter->pcix_82544) {
780 /*
781 * 82544 on PCI-X may split one TX segment
782 * into two TX descs, so we double its number
783 * of spare TX desc here.
784 */
785 adapter->spare_tx_desc = 2 * EM_TX_SPARE;
786 } else {
787 adapter->spare_tx_desc = EM_TX_SPARE;
788 }
789
9f60d74b
SZ
790 /*
791 * Keep following relationship between spare_tx_desc, oact_tx_desc
792 * and tx_int_nsegs:
793 * (spare_tx_desc + EM_TX_RESERVED) <=
794 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs
795 */
796 adapter->oact_tx_desc = adapter->num_tx_desc / 8;
797 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX)
798 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX;
799 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED)
800 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED;
801
802 adapter->tx_int_nsegs = adapter->num_tx_desc / 16;
803 if (adapter->tx_int_nsegs < adapter->oact_tx_desc)
804 adapter->tx_int_nsegs = adapter->oact_tx_desc;
805
2d0e5700 806 /* Non-AMT based hardware can now take control from firmware */
79878f87
SZ
807 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
808 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571)
2d0e5700
SZ
809 em_get_hw_control(adapter);
810
87ab432b
SZ
811 /*
812 * Missing Interrupt Following ICR read:
813 *
a835687d
SZ
814 * 82571/82572 specification update errata #76
815 * 82573 specification update errata #31
816 * 82574 specification update errata #12
817 * 82583 specification update errata #4
87ab432b
SZ
818 */
819 intr_func = em_intr;
820 if ((adapter->flags & EM_FLAG_SHARED_INTR) &&
821 (adapter->hw.mac.type == e1000_82571 ||
822 adapter->hw.mac.type == e1000_82572 ||
823 adapter->hw.mac.type == e1000_82573 ||
824 adapter->hw.mac.type == e1000_82574 ||
825 adapter->hw.mac.type == e1000_82583))
826 intr_func = em_intr_mask;
827
9c80d176 828 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE,
87ab432b 829 intr_func, adapter, &adapter->intr_tag,
9c80d176 830 ifp->if_serializer);
af82d4bb 831 if (error) {
9c80d176
SZ
832 device_printf(dev, "Failed to register interrupt handler");
833 ether_ifdetach(&adapter->arpcom.ac_if);
af82d4bb
JS
834 goto fail;
835 }
836
a749d1d2 837 ifp->if_cpuid = rman_get_cpuid(adapter->intr_res);
9db4b353 838 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
9c80d176 839 return (0);
af82d4bb
JS
840fail:
841 em_detach(dev);
9c80d176 842 return (error);
984263bc
MD
843}
844
984263bc
MD
845static int
846em_detach(device_t dev)
847{
78195a76 848 struct adapter *adapter = device_get_softc(dev);
984263bc 849
af82d4bb 850 if (device_is_attached(dev)) {
9c80d176 851 struct ifnet *ifp = &adapter->arpcom.ac_if;
cdf89432
SZ
852
853 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 854
af82d4bb 855 em_stop(adapter);
9c80d176
SZ
856
857 e1000_phy_hw_reset(&adapter->hw);
858
859 em_rel_mgmt(adapter);
2d0e5700 860 em_rel_hw_control(adapter);
9c80d176
SZ
861
862 if (adapter->wol) {
863 E1000_WRITE_REG(&adapter->hw, E1000_WUC,
864 E1000_WUC_PME_EN);
865 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
866 em_enable_wol(dev);
867 }
868
869 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag);
870
cdf89432
SZ
871 lwkt_serialize_exit(ifp->if_serializer);
872
873 ether_ifdetach(ifp);
a19a8754 874 } else if (adapter->memory != NULL) {
2d0e5700 875 em_rel_hw_control(adapter);
7ea52455 876 }
cdf89432
SZ
877 bus_generic_detach(dev);
878
9c80d176
SZ
879 em_free_pci_res(adapter);
880
881 em_destroy_tx_ring(adapter, adapter->num_tx_desc);
882 em_destroy_rx_ring(adapter, adapter->num_rx_desc);
af82d4bb 883
984263bc 884 /* Free Transmit Descriptor ring */
9c80d176 885 if (adapter->tx_desc_base)
9ccd8c1f 886 em_dma_free(adapter, &adapter->txdma);
984263bc 887
984263bc 888 /* Free Receive Descriptor ring */
9c80d176 889 if (adapter->rx_desc_base)
9ccd8c1f 890 em_dma_free(adapter, &adapter->rxdma);
9c80d176
SZ
891
892 /* Free top level busdma tag */
893 if (adapter->parent_dtag != NULL)
894 bus_dma_tag_destroy(adapter->parent_dtag);
984263bc 895
1eca7b82 896 /* Free sysctl tree */
9c80d176 897 if (adapter->sysctl_tree != NULL)
1eca7b82 898 sysctl_ctx_free(&adapter->sysctl_ctx);
984263bc 899
a19a8754
SZ
900 if (adapter->mta != NULL)
901 kfree(adapter->mta, M_DEVBUF);
902
87307ba1 903 return (0);
984263bc
MD
904}
905
984263bc
MD
906static int
907em_shutdown(device_t dev)
908{
9c80d176 909 return em_suspend(dev);
87307ba1
SZ
910}
911
87307ba1
SZ
912static int
913em_suspend(device_t dev)
914{
915 struct adapter *adapter = device_get_softc(dev);
9c80d176 916 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
917
918 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 919
87307ba1 920 em_stop(adapter);
9c80d176
SZ
921
922 em_rel_mgmt(adapter);
2d0e5700 923 em_rel_hw_control(adapter);
9c80d176 924
2d0e5700 925 if (adapter->wol) {
9c80d176
SZ
926 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
927 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
928 em_enable_wol(dev);
2d0e5700 929 }
9c80d176 930
87307ba1 931 lwkt_serialize_exit(ifp->if_serializer);
9c80d176
SZ
932
933 return bus_generic_suspend(dev);
87307ba1
SZ
934}
935
936static int
937em_resume(device_t dev)
938{
939 struct adapter *adapter = device_get_softc(dev);
9c80d176 940 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
941
942 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 943
87307ba1 944 em_init(adapter);
9c80d176 945 em_get_mgmt(adapter);
9db4b353 946 if_devstart(ifp);
9c80d176 947
87307ba1
SZ
948 lwkt_serialize_exit(ifp->if_serializer);
949
950 return bus_generic_resume(dev);
984263bc
MD
951}
952
984263bc
MD
953static void
954em_start(struct ifnet *ifp)
955{
f647ad3d 956 struct adapter *adapter = ifp->if_softc;
9c80d176 957 struct mbuf *m_head;
984263bc 958
1eca7b82 959 ASSERT_SERIALIZED(ifp->if_serializer);
78195a76 960
87307ba1
SZ
961 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
962 return;
9c80d176 963
9db4b353
SZ
964 if (!adapter->link_active) {
965 ifq_purge(&ifp->if_snd);
f647ad3d 966 return;
9db4b353 967 }
9c80d176 968
e26dc3e9 969 while (!ifq_is_empty(&ifp->if_snd)) {
9f60d74b
SZ
970 /* Now do we at least have a minimal? */
971 if (EM_IS_OACTIVE(adapter)) {
972 em_tx_collect(adapter);
9c80d176 973 if (EM_IS_OACTIVE(adapter)) {
9c80d176 974 ifp->if_flags |= IFF_OACTIVE;
9f60d74b 975 adapter->no_tx_desc_avail1++;
9c80d176
SZ
976 break;
977 }
978 }
979
980 logif(pkt_txqueue);
9db4b353 981 m_head = ifq_dequeue(&ifp->if_snd, NULL);
f647ad3d
JS
982 if (m_head == NULL)
983 break;
984263bc 984
9c80d176 985 if (em_encap(adapter, &m_head)) {
002b3a05 986 ifp->if_oerrors++;
9f60d74b
SZ
987 em_tx_collect(adapter);
988 continue;
f647ad3d 989 }
984263bc
MD
990
991 /* Send a copy of the frame to the BPF listener */
b637f170 992 ETHER_BPF_MTAP(ifp, m_head);
87307ba1
SZ
993
994 /* Set timeout in case hardware has problems transmitting. */
995 ifp->if_timer = EM_TX_TIMEOUT;
f647ad3d 996 }
984263bc
MD
997}
998
984263bc 999static int
bd4539cc 1000em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc 1001{
f647ad3d 1002 struct adapter *adapter = ifp->if_softc;
9c80d176 1003 struct ifreq *ifr = (struct ifreq *)data;
1eca7b82 1004 uint16_t eeprom_data = 0;
9c80d176
SZ
1005 int max_frame_size, mask, reinit;
1006 int error = 0;
0d366ee7 1007
9c80d176 1008 ASSERT_SERIALIZED(ifp->if_serializer);
0d366ee7 1009
984263bc 1010 switch (command) {
984263bc 1011 case SIOCSIFMTU:
9c80d176
SZ
1012 switch (adapter->hw.mac.type) {
1013 case e1000_82573:
1eca7b82
SZ
1014 /*
1015 * 82573 only supports jumbo frames
1016 * if ASPM is disabled.
1017 */
9c80d176
SZ
1018 e1000_read_nvm(&adapter->hw,
1019 NVM_INIT_3GIO_3, 1, &eeprom_data);
1020 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1eca7b82
SZ
1021 max_frame_size = ETHER_MAX_LEN;
1022 break;
1023 }
9c80d176
SZ
1024 /* FALL THROUGH */
1025
1026 /* Limit Jumbo Frame size */
1027 case e1000_82571:
1028 case e1000_82572:
1029 case e1000_ich9lan:
1030 case e1000_ich10lan:
2d0e5700 1031 case e1000_pch2lan:
9c80d176 1032 case e1000_82574:
6d5e2922 1033 case e1000_82583:
9c80d176 1034 case e1000_80003es2lan:
1eca7b82 1035 max_frame_size = 9234;
7ea52455 1036 break;
9c80d176 1037
2d0e5700
SZ
1038 case e1000_pchlan:
1039 max_frame_size = 4096;
1040 break;
1041
9c80d176
SZ
1042 /* Adapters that do not support jumbo frames */
1043 case e1000_82542:
1044 case e1000_ich8lan:
7ea52455
SZ
1045 max_frame_size = ETHER_MAX_LEN;
1046 break;
9c80d176 1047
7ea52455
SZ
1048 default:
1049 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1050 break;
1051 }
9c80d176
SZ
1052 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1053 ETHER_CRC_LEN) {
984263bc 1054 error = EINVAL;
9c80d176 1055 break;
984263bc 1056 }
9c80d176
SZ
1057
1058 ifp->if_mtu = ifr->ifr_mtu;
1059 adapter->max_frame_size =
1060 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1061
1062 if (ifp->if_flags & IFF_RUNNING)
1063 em_init(adapter);
984263bc 1064 break;
9c80d176 1065
984263bc 1066 case SIOCSIFFLAGS:
984263bc 1067 if (ifp->if_flags & IFF_UP) {
9c80d176
SZ
1068 if ((ifp->if_flags & IFF_RUNNING)) {
1069 if ((ifp->if_flags ^ adapter->if_flags) &
1070 (IFF_PROMISC | IFF_ALLMULTI)) {
1071 em_disable_promisc(adapter);
1072 em_set_promisc(adapter);
1073 }
1074 } else {
78195a76 1075 em_init(adapter);
87307ba1 1076 }
9c80d176
SZ
1077 } else if (ifp->if_flags & IFF_RUNNING) {
1078 em_stop(adapter);
984263bc 1079 }
87307ba1 1080 adapter->if_flags = ifp->if_flags;
984263bc 1081 break;
9c80d176 1082
984263bc
MD
1083 case SIOCADDMULTI:
1084 case SIOCDELMULTI:
984263bc
MD
1085 if (ifp->if_flags & IFF_RUNNING) {
1086 em_disable_intr(adapter);
1087 em_set_multi(adapter);
9c80d176
SZ
1088 if (adapter->hw.mac.type == e1000_82542 &&
1089 adapter->hw.revision_id == E1000_REVISION_2)
1090 em_init_rx_unit(adapter);
1eca7b82 1091#ifdef DEVICE_POLLING
9c80d176 1092 if (!(ifp->if_flags & IFF_POLLING))
1eca7b82 1093#endif
9c80d176 1094 em_enable_intr(adapter);
984263bc
MD
1095 }
1096 break;
9c80d176 1097
984263bc 1098 case SIOCSIFMEDIA:
87307ba1 1099 /* Check SOL/IDER usage */
9c80d176
SZ
1100 if (e1000_check_reset_block(&adapter->hw)) {
1101 device_printf(adapter->dev, "Media change is"
1102 " blocked due to SOL/IDER session.\n");
87307ba1
SZ
1103 break;
1104 }
9c80d176
SZ
1105 /* FALL THROUGH */
1106
984263bc 1107 case SIOCGIFMEDIA:
984263bc
MD
1108 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1109 break;
9c80d176 1110
984263bc 1111 case SIOCSIFCAP:
9c80d176 1112 reinit = 0;
984263bc 1113 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
f54de229
SZ
1114 if (mask & IFCAP_RXCSUM) {
1115 ifp->if_capenable ^= IFCAP_RXCSUM;
1eca7b82 1116 reinit = 1;
984263bc 1117 }
f54de229
SZ
1118 if (mask & IFCAP_TXCSUM) {
1119 ifp->if_capenable ^= IFCAP_TXCSUM;
1120 if (ifp->if_capenable & IFCAP_TXCSUM)
1121 ifp->if_hwassist |= EM_CSUM_FEATURES;
1122 else
1123 ifp->if_hwassist &= ~EM_CSUM_FEATURES;
1124 }
1eca7b82
SZ
1125 if (mask & IFCAP_VLAN_HWTAGGING) {
1126 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1127 reinit = 1;
1128 }
9c80d176 1129 if (reinit && (ifp->if_flags & IFF_RUNNING))
1eca7b82 1130 em_init(adapter);
984263bc 1131 break;
9c80d176 1132
984263bc 1133 default:
1eca7b82
SZ
1134 error = ether_ioctl(ifp, command, data);
1135 break;
984263bc 1136 }
87307ba1 1137 return (error);
984263bc
MD
1138}
1139
984263bc
MD
1140static void
1141em_watchdog(struct ifnet *ifp)
1142{
1eca7b82 1143 struct adapter *adapter = ifp->if_softc;
984263bc 1144
9c80d176
SZ
1145 ASSERT_SERIALIZED(ifp->if_serializer);
1146
1147 /*
1148 * The timer is set to 5 every time start queues a packet.
1149 * Then txeof keeps resetting it as long as it cleans at
1150 * least one descriptor.
1151 * Finally, anytime all descriptors are clean the timer is
1152 * set to 0.
1153 */
1154
9f60d74b
SZ
1155 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1156 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) {
1157 /*
1158 * If we reach here, all TX jobs are completed and
1159 * the TX engine should have been idled for some time.
1160 * We don't need to call if_devstart() here.
1161 */
1162 ifp->if_flags &= ~IFF_OACTIVE;
1163 ifp->if_timer = 0;
1164 return;
1165 }
1166
1eca7b82
SZ
1167 /*
1168 * If we are in this routine because of pause frames, then
984263bc
MD
1169 * don't reset the hardware.
1170 */
9c80d176
SZ
1171 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1172 E1000_STATUS_TXOFF) {
984263bc
MD
1173 ifp->if_timer = EM_TX_TIMEOUT;
1174 return;
1175 }
1176
9c80d176 1177 if (e1000_check_for_link(&adapter->hw) == 0)
f647ad3d 1178 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc 1179
9c80d176
SZ
1180 ifp->if_oerrors++;
1181 adapter->watchdog_events++;
1182
984263bc
MD
1183 em_init(adapter);
1184
9c80d176
SZ
1185 if (!ifq_is_empty(&ifp->if_snd))
1186 if_devstart(ifp);
984263bc
MD
1187}
1188
984263bc 1189static void
9c80d176 1190em_init(void *xsc)
984263bc 1191{
9c80d176
SZ
1192 struct adapter *adapter = xsc;
1193 struct ifnet *ifp = &adapter->arpcom.ac_if;
1194 device_t dev = adapter->dev;
eac00e59 1195 uint32_t pba;
984263bc 1196
87307ba1
SZ
1197 ASSERT_SERIALIZED(ifp->if_serializer);
1198
984263bc
MD
1199 em_stop(adapter);
1200
eac00e59
SZ
1201 /*
1202 * Packet Buffer Allocation (PBA)
1203 * Writing PBA sets the receive portion of the buffer
1204 * the remainder is used for the transmit buffer.
1eca7b82
SZ
1205 *
1206 * Devices before the 82547 had a Packet Buffer of 64K.
1207 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1208 * After the 82547 the buffer was reduced to 40K.
1209 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1210 * Note: default does not leave enough room for Jumbo Frame >10k.
eac00e59 1211 */
9c80d176
SZ
1212 switch (adapter->hw.mac.type) {
1213 case e1000_82547:
1214 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1215 if (adapter->max_frame_size > 8192)
eac00e59 1216 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
7ea52455
SZ
1217 else
1218 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
eac00e59
SZ
1219 adapter->tx_fifo_head = 0;
1220 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1221 adapter->tx_fifo_size =
9c80d176 1222 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
7ea52455 1223 break;
9c80d176 1224
87307ba1 1225 /* Total Packet Buffer on these is 48K */
9c80d176
SZ
1226 case e1000_82571:
1227 case e1000_82572:
1228 case e1000_80003es2lan:
7ea52455
SZ
1229 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1230 break;
9c80d176
SZ
1231
1232 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
7ea52455
SZ
1233 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1234 break;
9c80d176
SZ
1235
1236 case e1000_82574:
2d0e5700 1237 case e1000_82583:
9c80d176 1238 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1eca7b82 1239 break;
9c80d176 1240
2d0e5700
SZ
1241 case e1000_ich8lan:
1242 pba = E1000_PBA_8K;
1243 break;
1244
9c80d176
SZ
1245 case e1000_ich9lan:
1246 case e1000_ich10lan:
1247#define E1000_PBA_10K 0x000A
b0ff1d56
MS
1248 pba = E1000_PBA_10K;
1249 break;
9c80d176 1250
2d0e5700
SZ
1251 case e1000_pchlan:
1252 case e1000_pch2lan:
1253 pba = E1000_PBA_26K;
9c80d176
SZ
1254 break;
1255
7ea52455
SZ
1256 default:
1257 /* Devices before 82547 had a Packet Buffer of 64K. */
9c80d176 1258 if (adapter->max_frame_size > 8192)
7ea52455
SZ
1259 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1260 else
1261 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
eac00e59 1262 }
9c80d176 1263 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
a4a205fa 1264
0d366ee7 1265 /* Get the latest mac address, User can use a LAA */
9c80d176
SZ
1266 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN);
1267
1268 /* Put the address into the Receive Address Array */
1269 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1270
1271 /*
1272 * With the 82571 adapter, RAR[0] may be overwritten
1273 * when the other port is reset, we make a duplicate
1274 * in RAR[14] for that eventuality, this assures
1275 * the interface continues to function.
1276 */
1277 if (adapter->hw.mac.type == e1000_82571) {
1278 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1279 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1280 E1000_RAR_ENTRIES - 1);
1281 }
0d366ee7 1282
2d0e5700
SZ
1283 /* Reset the hardware */
1284 if (em_reset(adapter)) {
1285 device_printf(dev, "Unable to reset the hardware\n");
9c80d176 1286 /* XXX em_stop()? */
984263bc
MD
1287 return;
1288 }
87307ba1 1289 em_update_link_status(adapter);
984263bc 1290
9c80d176
SZ
1291 /* Setup VLAN support, basic and offload if available */
1292 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
984263bc 1293
9c80d176
SZ
1294 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1295 uint32_t ctrl;
1296
1297 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1298 ctrl |= E1000_CTRL_VME;
1299 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
87307ba1
SZ
1300 }
1301
9c80d176
SZ
1302 /* Configure for OS presence */
1303 em_get_mgmt(adapter);
1304
984263bc 1305 /* Prepare transmit descriptors and buffers */
9c80d176
SZ
1306 em_init_tx_ring(adapter);
1307 em_init_tx_unit(adapter);
984263bc
MD
1308
1309 /* Setup Multicast table */
1310 em_set_multi(adapter);
1311
1312 /* Prepare receive descriptors and buffers */
9c80d176
SZ
1313 if (em_init_rx_ring(adapter)) {
1314 device_printf(dev, "Could not setup receive structures\n");
984263bc 1315 em_stop(adapter);
984263bc
MD
1316 return;
1317 }
9c80d176 1318 em_init_rx_unit(adapter);
7ea52455 1319
87307ba1 1320 /* Don't lose promiscuous settings */
0d366ee7 1321 em_set_promisc(adapter);
984263bc 1322
984263bc
MD
1323 ifp->if_flags |= IFF_RUNNING;
1324 ifp->if_flags &= ~IFF_OACTIVE;
1325
9c80d176
SZ
1326 callout_reset(&adapter->timer, hz, em_timer, adapter);
1327 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1328
1329 /* MSI/X configuration for 82574 */
1330 if (adapter->hw.mac.type == e1000_82574) {
1331 int tmp;
1332
1333 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1334 tmp |= E1000_CTRL_EXT_PBA_CLR;
1335 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1336 /*
2d0e5700 1337 * XXX MSIX
9c80d176
SZ
1338 * Set the IVAR - interrupt vector routing.
1339 * Each nibble represents a vector, high bit
1340 * is enable, other 3 bits are the MSIX table
1341 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1342 * Link (other) to 2, hence the magic number.
1343 */
1344 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1345 }
1eca7b82
SZ
1346
1347#ifdef DEVICE_POLLING
9c80d176
SZ
1348 /*
1349 * Only enable interrupts if we are not polling, make sure
1350 * they are off otherwise.
1351 */
1eca7b82
SZ
1352 if (ifp->if_flags & IFF_POLLING)
1353 em_disable_intr(adapter);
1354 else
9c80d176
SZ
1355#endif /* DEVICE_POLLING */
1356 em_enable_intr(adapter);
0d366ee7 1357
2d0e5700 1358 /* AMT based hardware can now take control from firmware */
79878f87
SZ
1359 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
1360 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) &&
2d0e5700
SZ
1361 adapter->hw.mac.type >= e1000_82571)
1362 em_get_hw_control(adapter);
1363
0d366ee7 1364 /* Don't reset the phy next time init gets called */
9c80d176 1365 adapter->hw.phy.reset_disable = TRUE;
984263bc
MD
1366}
1367
984263bc 1368#ifdef DEVICE_POLLING
f647ad3d
JS
1369
1370static void
984263bc
MD
1371em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1372{
f647ad3d
JS
1373 struct adapter *adapter = ifp->if_softc;
1374 uint32_t reg_icr;
984263bc 1375
78195a76
MD
1376 ASSERT_SERIALIZED(ifp->if_serializer);
1377
9c80d176 1378 switch (cmd) {
9c095379
MD
1379 case POLL_REGISTER:
1380 em_disable_intr(adapter);
1381 break;
9c80d176 1382
9c095379 1383 case POLL_DEREGISTER:
f647ad3d 1384 em_enable_intr(adapter);
9c095379 1385 break;
9c80d176 1386
9c095379 1387 case POLL_AND_CHECK_STATUS:
9c80d176 1388 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
f647ad3d 1389 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
9ccd8c1f 1390 callout_stop(&adapter->timer);
9c80d176 1391 adapter->hw.mac.get_link_status = 1;
87307ba1 1392 em_update_link_status(adapter);
9c80d176 1393 callout_reset(&adapter->timer, hz, em_timer, adapter);
f647ad3d 1394 }
9c80d176 1395 /* FALL THROUGH */
9c095379
MD
1396 case POLL_ONLY:
1397 if (ifp->if_flags & IFF_RUNNING) {
87307ba1
SZ
1398 em_rxeof(adapter, count);
1399 em_txeof(adapter);
1eca7b82 1400
9c095379 1401 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1402 if_devstart(ifp);
9c095379
MD
1403 }
1404 break;
f647ad3d 1405 }
984263bc 1406}
9c095379 1407
984263bc
MD
1408#endif /* DEVICE_POLLING */
1409
984263bc 1410static void
9c80d176 1411em_intr(void *xsc)
984263bc 1412{
87ab432b
SZ
1413 em_intr_body(xsc, TRUE);
1414}
1415
1416static void
1417em_intr_body(struct adapter *adapter, boolean_t chk_asserted)
1418{
9c80d176 1419 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1420 uint32_t reg_icr;
984263bc 1421
07855a48 1422 logif(intr_beg);
78195a76
MD
1423 ASSERT_SERIALIZED(ifp->if_serializer);
1424
9c80d176
SZ
1425 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1426
87ab432b
SZ
1427 if (chk_asserted &&
1428 ((adapter->hw.mac.type >= e1000_82571 &&
1429 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1430 reg_icr == 0)) {
07855a48 1431 logif(intr_end);
984263bc 1432 return;
07855a48 1433 }
984263bc 1434
87307ba1 1435 /*
9c80d176
SZ
1436 * XXX: some laptops trigger several spurious interrupts
1437 * on em(4) when in the resume cycle. The ICR register
1438 * reports all-ones value in this case. Processing such
1439 * interrupts would lead to a freeze. I don't know why.
87307ba1
SZ
1440 */
1441 if (reg_icr == 0xffffffff) {
1442 logif(intr_end);
1443 return;
984263bc
MD
1444 }
1445
79938e61 1446 if (ifp->if_flags & IFF_RUNNING) {
9f60d74b 1447 if (reg_icr &
6643d744 1448 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO))
9f60d74b 1449 em_rxeof(adapter, -1);
6643d744 1450 if (reg_icr & E1000_ICR_TXDW) {
9f60d74b
SZ
1451 em_txeof(adapter);
1452 if (!ifq_is_empty(&ifp->if_snd))
1453 if_devstart(ifp);
1454 }
f647ad3d 1455 }
984263bc 1456
87307ba1
SZ
1457 /* Link status change */
1458 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1459 callout_stop(&adapter->timer);
9c80d176 1460 adapter->hw.mac.get_link_status = 1;
87307ba1 1461 em_update_link_status(adapter);
9c80d176
SZ
1462
1463 /* Deal with TX cruft when link lost */
1464 em_tx_purge(adapter);
1465
1466 callout_reset(&adapter->timer, hz, em_timer, adapter);
87307ba1
SZ
1467 }
1468
1469 if (reg_icr & E1000_ICR_RXO)
1470 adapter->rx_overruns++;
1471
07855a48 1472 logif(intr_end);
984263bc
MD
1473}
1474
984263bc 1475static void
87ab432b
SZ
1476em_intr_mask(void *xsc)
1477{
1478 struct adapter *adapter = xsc;
1479
1480 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
1481 /*
1482 * NOTE:
1483 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1484 * so don't check it.
1485 */
1486 em_intr_body(adapter, FALSE);
1487 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
1488}
1489
1490static void
984263bc
MD
1491em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1492{
87307ba1 1493 struct adapter *adapter = ifp->if_softc;
1eca7b82 1494 u_char fiber_type = IFM_1000_SX;
984263bc 1495
78195a76
MD
1496 ASSERT_SERIALIZED(ifp->if_serializer);
1497
87307ba1 1498 em_update_link_status(adapter);
984263bc
MD
1499
1500 ifmr->ifm_status = IFM_AVALID;
1501 ifmr->ifm_active = IFM_ETHER;
1502
1503 if (!adapter->link_active)
1504 return;
1505
1506 ifmr->ifm_status |= IFM_ACTIVE;
1507
9c80d176
SZ
1508 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
1509 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1510 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
1511 fiber_type = IFM_1000_LX;
1512 ifmr->ifm_active |= fiber_type | IFM_FDX;
984263bc
MD
1513 } else {
1514 switch (adapter->link_speed) {
1515 case 10:
1516 ifmr->ifm_active |= IFM_10_T;
1517 break;
1518 case 100:
1519 ifmr->ifm_active |= IFM_100_TX;
1520 break;
9c80d176 1521
984263bc 1522 case 1000:
7f259627 1523 ifmr->ifm_active |= IFM_1000_T;
984263bc
MD
1524 break;
1525 }
1526 if (adapter->link_duplex == FULL_DUPLEX)
1527 ifmr->ifm_active |= IFM_FDX;
1528 else
1529 ifmr->ifm_active |= IFM_HDX;
1530 }
984263bc
MD
1531}
1532
984263bc
MD
1533static int
1534em_media_change(struct ifnet *ifp)
1535{
87307ba1
SZ
1536 struct adapter *adapter = ifp->if_softc;
1537 struct ifmedia *ifm = &adapter->media;
984263bc 1538
78195a76 1539 ASSERT_SERIALIZED(ifp->if_serializer);
9c095379 1540
87307ba1
SZ
1541 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1542 return (EINVAL);
1543
984263bc
MD
1544 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1545 case IFM_AUTO:
9c80d176
SZ
1546 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1547 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
984263bc 1548 break;
9c80d176 1549
1eca7b82 1550 case IFM_1000_LX:
984263bc 1551 case IFM_1000_SX:
7f259627 1552 case IFM_1000_T:
9c80d176
SZ
1553 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1554 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
984263bc 1555 break;
9c80d176 1556
984263bc 1557 case IFM_100_TX:
9c80d176
SZ
1558 adapter->hw.mac.autoneg = FALSE;
1559 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1560 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1561 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
984263bc 1562 else
9c80d176 1563 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
984263bc 1564 break;
9c80d176 1565
984263bc 1566 case IFM_10_T:
9c80d176
SZ
1567 adapter->hw.mac.autoneg = FALSE;
1568 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1569 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1570 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
984263bc 1571 else
9c80d176 1572 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
984263bc 1573 break;
9c80d176 1574
984263bc 1575 default:
f647ad3d 1576 if_printf(ifp, "Unsupported media type\n");
9c80d176 1577 break;
984263bc 1578 }
9c80d176 1579
f647ad3d 1580 /*
9c80d176 1581 * As the speed/duplex settings my have changed we need to
f647ad3d
JS
1582 * reset the PHY.
1583 */
9c80d176 1584 adapter->hw.phy.reset_disable = FALSE;
984263bc 1585
78195a76 1586 em_init(adapter);
984263bc 1587
9c80d176 1588 return (0);
9ccd8c1f
JS
1589}
1590
984263bc 1591static int
9c80d176 1592em_encap(struct adapter *adapter, struct mbuf **m_headp)
9ccd8c1f 1593{
9c80d176 1594 bus_dma_segment_t segs[EM_MAX_SCATTER];
1eca7b82 1595 bus_dmamap_t map;
9c80d176
SZ
1596 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1597 struct e1000_tx_desc *ctxd = NULL;
002b3a05 1598 struct mbuf *m_head = *m_headp;
9f60d74b 1599 uint32_t txd_upper, txd_lower, txd_used, cmd = 0;
9c80d176 1600 int maxsegs, nsegs, i, j, first, last = 0, error;
984263bc 1601
3752657e 1602 if (m_head->m_len < EM_TXCSUM_MINHL &&
002b3a05
SZ
1603 (m_head->m_flags & EM_CSUM_FEATURES)) {
1604 /*
1605 * Make sure that ethernet header and ip.ip_hl are in
1606 * contiguous memory, since if TXCSUM is enabled, later
1607 * TX context descriptor's setup need to access ip.ip_hl.
1608 */
1609 error = em_txcsum_pullup(adapter, m_headp);
1610 if (error) {
1611 KKASSERT(*m_headp == NULL);
1612 return error;
1613 }
1614 m_head = *m_headp;
1615 }
1616
9c80d176
SZ
1617 txd_upper = txd_lower = 0;
1618 txd_used = 0;
87307ba1
SZ
1619
1620 /*
9c80d176
SZ
1621 * Capture the first descriptor index, this descriptor
1622 * will have the index of the EOP which is the only one
1623 * that now gets a DONE bit writeback.
87307ba1 1624 */
9c80d176
SZ
1625 first = adapter->next_avail_tx_desc;
1626 tx_buffer = &adapter->tx_buffer_area[first];
1627 tx_buffer_mapped = tx_buffer;
1628 map = tx_buffer->map;
87307ba1 1629
9c80d176
SZ
1630 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED;
1631 KASSERT(maxsegs >= adapter->spare_tx_desc,
ed20d0e3 1632 ("not enough spare TX desc"));
9c80d176
SZ
1633 if (adapter->pcix_82544) {
1634 /* Half it; see the comment in em_attach() */
1635 maxsegs >>= 1;
9ccd8c1f 1636 }
9c80d176
SZ
1637 if (maxsegs > EM_MAX_SCATTER)
1638 maxsegs = EM_MAX_SCATTER;
984263bc 1639
9c80d176
SZ
1640 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp,
1641 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1642 if (error) {
1643 if (error == ENOBUFS)
1644 adapter->mbuf_alloc_failed++;
1645 else
1646 adapter->no_tx_dma_setup++;
984263bc 1647
9c80d176
SZ
1648 m_freem(*m_headp);
1649 *m_headp = NULL;
1650 return error;
7ea52455 1651 }
9c80d176 1652 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
984263bc 1653
9c80d176 1654 m_head = *m_headp;
9f60d74b 1655 adapter->tx_nsegs += nsegs;
9c80d176 1656
002b3a05 1657 if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) {
9c80d176 1658 /* TX csum offloading will consume one TX desc */
9f60d74b
SZ
1659 adapter->tx_nsegs += em_txcsum(adapter, m_head,
1660 &txd_upper, &txd_lower);
9c80d176 1661 }
984263bc 1662 i = adapter->next_avail_tx_desc;
87307ba1
SZ
1663
1664 /* Set up our transmit descriptors */
9c80d176 1665 for (j = 0; j < nsegs; j++) {
9ccd8c1f
JS
1666 /* If adapter is 82544 and on PCIX bus */
1667 if(adapter->pcix_82544) {
87307ba1
SZ
1668 DESC_ARRAY desc_array;
1669 uint32_t array_elements, counter;
1670
9c80d176 1671 /*
f647ad3d
JS
1672 * Check the Address and Length combination and
1673 * split the data accordingly
9ccd8c1f 1674 */
9c80d176
SZ
1675 array_elements = em_82544_fill_desc(segs[j].ds_addr,
1676 segs[j].ds_len, &desc_array);
9ccd8c1f 1677 for (counter = 0; counter < array_elements; counter++) {
9c80d176
SZ
1678 KKASSERT(txd_used < adapter->num_tx_desc_avail);
1679
9ccd8c1f 1680 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176
SZ
1681 ctxd = &adapter->tx_desc_base[i];
1682
1683 ctxd->buffer_addr = htole64(
1684 desc_array.descriptor[counter].address);
1685 ctxd->lower.data = htole32(
2af74b85 1686 E1000_TXD_CMD_IFCS | txd_lower |
9c80d176
SZ
1687 desc_array.descriptor[counter].length);
1688 ctxd->upper.data = htole32(txd_upper);
87307ba1
SZ
1689
1690 last = i;
9ccd8c1f
JS
1691 if (++i == adapter->num_tx_desc)
1692 i = 0;
1693
9ccd8c1f 1694 txd_used++;
9c80d176 1695 }
9ccd8c1f 1696 } else {
0d366ee7 1697 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176 1698 ctxd = &adapter->tx_desc_base[i];
9ccd8c1f 1699
9c80d176 1700 ctxd->buffer_addr = htole64(segs[j].ds_addr);
2af74b85 1701 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
9c80d176
SZ
1702 txd_lower | segs[j].ds_len);
1703 ctxd->upper.data = htole32(txd_upper);
984263bc 1704
87307ba1 1705 last = i;
0d366ee7
MD
1706 if (++i == adapter->num_tx_desc)
1707 i = 0;
0d366ee7 1708 }
984263bc 1709 }
9ccd8c1f 1710
984263bc 1711 adapter->next_avail_tx_desc = i;
9c80d176
SZ
1712 if (adapter->pcix_82544) {
1713 KKASSERT(adapter->num_tx_desc_avail > txd_used);
9ccd8c1f 1714 adapter->num_tx_desc_avail -= txd_used;
9c80d176
SZ
1715 } else {
1716 KKASSERT(adapter->num_tx_desc_avail > nsegs);
1717 adapter->num_tx_desc_avail -= nsegs;
1718 }
984263bc 1719
9c80d176 1720 /* Handle VLAN tag */
83790f85 1721 if (m_head->m_flags & M_VLANTAG) {
9c80d176
SZ
1722 /* Set the vlan id. */
1723 ctxd->upper.fields.special =
1724 htole16(m_head->m_pkthdr.ether_vlantag);
9ccd8c1f 1725
f647ad3d 1726 /* Tell hardware to add tag */
9c80d176 1727 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
f647ad3d 1728 }
984263bc
MD
1729
1730 tx_buffer->m_head = m_head;
9c80d176 1731 tx_buffer_mapped->map = tx_buffer->map;
1eca7b82 1732 tx_buffer->map = map;
9ccd8c1f 1733
9f60d74b
SZ
1734 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) {
1735 adapter->tx_nsegs = 0;
4e4e8481
SZ
1736
1737 /*
1738 * Report Status (RS) is turned on
1739 * every tx_int_nsegs descriptors.
1740 */
9f60d74b
SZ
1741 cmd = E1000_TXD_CMD_RS;
1742
b4b0a2b4
SZ
1743 /*
1744 * Keep track of the descriptor, which will
1745 * be written back by hardware.
1746 */
9f60d74b
SZ
1747 adapter->tx_dd[adapter->tx_dd_tail] = last;
1748 EM_INC_TXDD_IDX(adapter->tx_dd_tail);
1749 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head);
1750 }
1751
9ccd8c1f 1752 /*
984263bc 1753 * Last Descriptor of Packet needs End Of Packet (EOP)
87307ba1 1754 */
9f60d74b 1755 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
87307ba1
SZ
1756
1757 /*
9c80d176 1758 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
984263bc
MD
1759 * that this frame is available to transmit.
1760 */
9c80d176 1761 if (adapter->hw.mac.type == e1000_82547 &&
984263bc 1762 adapter->link_duplex == HALF_DUPLEX) {
cfefda96 1763 em_82547_move_tail_serialized(adapter);
9ccd8c1f 1764 } else {
9c80d176
SZ
1765 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1766 if (adapter->hw.mac.type == e1000_82547) {
cfefda96 1767 em_82547_update_fifo_head(adapter,
9c80d176 1768 m_head->m_pkthdr.len);
984263bc
MD
1769 }
1770 }
87307ba1 1771 return (0);
984263bc
MD
1772}
1773
9c80d176 1774/*
984263bc 1775 * 82547 workaround to avoid controller hang in half-duplex environment.
87307ba1 1776 * The workaround is to avoid queuing a large packet that would span
9c80d176
SZ
1777 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1778 * in this case. We do that only when FIFO is quiescent.
1779 */
9c095379 1780static void
1eca7b82 1781em_82547_move_tail_serialized(struct adapter *adapter)
9c095379 1782{
9c80d176
SZ
1783 struct e1000_tx_desc *tx_desc;
1784 uint16_t hw_tdt, sw_tdt, length = 0;
1785 bool eop = 0;
984263bc 1786
9c80d176
SZ
1787 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer);
1788
1789 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
984263bc 1790 sw_tdt = adapter->next_avail_tx_desc;
f647ad3d 1791
984263bc
MD
1792 while (hw_tdt != sw_tdt) {
1793 tx_desc = &adapter->tx_desc_base[hw_tdt];
1794 length += tx_desc->lower.flags.length;
1795 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
87307ba1 1796 if (++hw_tdt == adapter->num_tx_desc)
984263bc
MD
1797 hw_tdt = 0;
1798
87307ba1 1799 if (eop) {
984263bc 1800 if (em_82547_fifo_workaround(adapter, length)) {
eac00e59 1801 adapter->tx_fifo_wrk_cnt++;
9ccd8c1f
JS
1802 callout_reset(&adapter->tx_fifo_timer, 1,
1803 em_82547_move_tail, adapter);
1804 break;
984263bc 1805 }
9c80d176 1806 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
9ccd8c1f
JS
1807 em_82547_update_fifo_head(adapter, length);
1808 length = 0;
984263bc 1809 }
9c80d176
SZ
1810 }
1811}
1812
1813static void
1814em_82547_move_tail(void *xsc)
1815{
1816 struct adapter *adapter = xsc;
1817 struct ifnet *ifp = &adapter->arpcom.ac_if;
1818
1819 lwkt_serialize_enter(ifp->if_serializer);
1820 em_82547_move_tail_serialized(adapter);
1821 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1822}
1823
1824static int
1825em_82547_fifo_workaround(struct adapter *adapter, int len)
1826{
1827 int fifo_space, fifo_pkt_len;
1828
1eca7b82 1829 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
984263bc
MD
1830
1831 if (adapter->link_duplex == HALF_DUPLEX) {
eac00e59 1832 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
984263bc
MD
1833
1834 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
f647ad3d 1835 if (em_82547_tx_fifo_reset(adapter))
87307ba1 1836 return (0);
f647ad3d 1837 else
87307ba1 1838 return (1);
984263bc
MD
1839 }
1840 }
87307ba1 1841 return (0);
984263bc
MD
1842}
1843
1844static void
1845em_82547_update_fifo_head(struct adapter *adapter, int len)
1846{
1eca7b82 1847 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
f647ad3d 1848
984263bc
MD
1849 /* tx_fifo_head is always 16 byte aligned */
1850 adapter->tx_fifo_head += fifo_pkt_len;
eac00e59
SZ
1851 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1852 adapter->tx_fifo_head -= adapter->tx_fifo_size;
984263bc
MD
1853}
1854
984263bc
MD
1855static int
1856em_82547_tx_fifo_reset(struct adapter *adapter)
7ea52455 1857{
984263bc
MD
1858 uint32_t tctl;
1859
9c80d176
SZ
1860 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1861 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1862 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1863 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1864 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1865 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1866 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
984263bc 1867 /* Disable TX unit */
9c80d176
SZ
1868 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1869 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1870 tctl & ~E1000_TCTL_EN);
984263bc
MD
1871
1872 /* Reset FIFO pointers */
9c80d176
SZ
1873 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1874 adapter->tx_head_addr);
1875 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1876 adapter->tx_head_addr);
1877 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1878 adapter->tx_head_addr);
1879 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1880 adapter->tx_head_addr);
984263bc
MD
1881
1882 /* Re-enable TX unit */
9c80d176 1883 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
1884 E1000_WRITE_FLUSH(&adapter->hw);
1885
1886 adapter->tx_fifo_head = 0;
eac00e59 1887 adapter->tx_fifo_reset_cnt++;
984263bc 1888
87307ba1 1889 return (TRUE);
eac00e59 1890 } else {
87307ba1 1891 return (FALSE);
984263bc
MD
1892 }
1893}
1894
1895static void
f647ad3d 1896em_set_promisc(struct adapter *adapter)
984263bc 1897{
9c80d176 1898 struct ifnet *ifp = &adapter->arpcom.ac_if;
1eca7b82 1899 uint32_t reg_rctl;
984263bc 1900
9c80d176 1901 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc
MD
1902
1903 if (ifp->if_flags & IFF_PROMISC) {
1904 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
9c80d176
SZ
1905 /* Turn this on if you want to see bad packets */
1906 if (em_debug_sbp)
1907 reg_rctl |= E1000_RCTL_SBP;
1908 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1909 } else if (ifp->if_flags & IFF_ALLMULTI) {
1910 reg_rctl |= E1000_RCTL_MPE;
1911 reg_rctl &= ~E1000_RCTL_UPE;
9c80d176 1912 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc 1913 }
984263bc
MD
1914}
1915
1916static void
f647ad3d 1917em_disable_promisc(struct adapter *adapter)
984263bc 1918{
f647ad3d 1919 uint32_t reg_rctl;
984263bc 1920
9c80d176 1921 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc 1922
9c80d176
SZ
1923 reg_rctl &= ~E1000_RCTL_UPE;
1924 reg_rctl &= ~E1000_RCTL_MPE;
1925 reg_rctl &= ~E1000_RCTL_SBP;
1926 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1927}
1928
984263bc 1929static void
f647ad3d 1930em_set_multi(struct adapter *adapter)
984263bc 1931{
9c80d176 1932 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1933 struct ifmultiaddr *ifma;
9c80d176 1934 uint32_t reg_rctl = 0;
2d0e5700 1935 uint8_t *mta;
f647ad3d 1936 int mcnt = 0;
f647ad3d 1937
2d0e5700
SZ
1938 mta = adapter->mta;
1939 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1940
9c80d176
SZ
1941 if (adapter->hw.mac.type == e1000_82542 &&
1942 adapter->hw.revision_id == E1000_REVISION_2) {
1943 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1944 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1945 e1000_pci_clear_mwi(&adapter->hw);
f647ad3d 1946 reg_rctl |= E1000_RCTL_RST;
9c80d176 1947 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d
JS
1948 msec_delay(5);
1949 }
984263bc 1950
441d34b2 1951 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
f647ad3d
JS
1952 if (ifma->ifma_addr->sa_family != AF_LINK)
1953 continue;
1954
1955 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1956 break;
984263bc 1957
f647ad3d 1958 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
9c80d176 1959 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
f647ad3d
JS
1960 mcnt++;
1961 }
1962
1963 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
9c80d176 1964 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1965 reg_rctl |= E1000_RCTL_MPE;
9c80d176 1966 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
7ea52455 1967 } else {
6a5a645e 1968 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
7ea52455 1969 }
f647ad3d 1970
9c80d176
SZ
1971 if (adapter->hw.mac.type == e1000_82542 &&
1972 adapter->hw.revision_id == E1000_REVISION_2) {
1973 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1974 reg_rctl &= ~E1000_RCTL_RST;
9c80d176 1975 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d 1976 msec_delay(5);
9c80d176
SZ
1977 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1978 e1000_pci_set_mwi(&adapter->hw);
f647ad3d
JS
1979 }
1980}
984263bc 1981
9c80d176
SZ
1982/*
1983 * This routine checks for link status and updates statistics.
1984 */
984263bc 1985static void
9c80d176 1986em_timer(void *xsc)
984263bc 1987{
9c80d176
SZ
1988 struct adapter *adapter = xsc;
1989 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 1990
78195a76 1991 lwkt_serialize_enter(ifp->if_serializer);
984263bc 1992
87307ba1 1993 em_update_link_status(adapter);
9c80d176
SZ
1994 em_update_stats(adapter);
1995
1996 /* Reset LAA into RAR[0] on 82571 */
1997 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
1998 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1999
2000 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
984263bc 2001 em_print_hw_stats(adapter);
9c80d176 2002
984263bc
MD
2003 em_smartspeed(adapter);
2004
9c80d176 2005 callout_reset(&adapter->timer, hz, em_timer, adapter);
984263bc 2006
78195a76 2007 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
2008}
2009
2010static void
87307ba1 2011em_update_link_status(struct adapter *adapter)
984263bc 2012{
9c80d176
SZ
2013 struct e1000_hw *hw = &adapter->hw;
2014 struct ifnet *ifp = &adapter->arpcom.ac_if;
2015 device_t dev = adapter->dev;
2016 uint32_t link_check = 0;
2017
2018 /* Get the cached link value or read phy for real */
2019 switch (hw->phy.media_type) {
2020 case e1000_media_type_copper:
2021 if (hw->mac.get_link_status) {
2022 /* Do the work to read phy */
2023 e1000_check_for_link(hw);
2024 link_check = !hw->mac.get_link_status;
2025 if (link_check) /* ESB2 fix */
2026 e1000_cfg_on_link_up(hw);
2027 } else {
2028 link_check = TRUE;
984263bc 2029 }
9c80d176
SZ
2030 break;
2031
2032 case e1000_media_type_fiber:
2033 e1000_check_for_link(hw);
2034 link_check =
2035 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
2036 break;
2037
2038 case e1000_media_type_internal_serdes:
2039 e1000_check_for_link(hw);
2040 link_check = adapter->hw.mac.serdes_has_link;
2041 break;
2042
2043 case e1000_media_type_unknown:
2044 default:
2045 break;
2046 }
2047
2048 /* Now check for a transition */
2049 if (link_check && adapter->link_active == 0) {
2050 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2051 &adapter->link_duplex);
cb5a6be6
SZ
2052
2053 /*
2054 * Check if we should enable/disable SPEED_MODE bit on
2055 * 82571/82572
2056 */
2d0e5700
SZ
2057 if (adapter->link_speed != SPEED_1000 &&
2058 (hw->mac.type == e1000_82571 ||
2059 hw->mac.type == e1000_82572)) {
9c80d176
SZ
2060 int tarc0;
2061
2062 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2d0e5700 2063 tarc0 &= ~SPEED_MODE_BIT;
9c80d176 2064 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
984263bc 2065 }
9c80d176
SZ
2066 if (bootverbose) {
2067 device_printf(dev, "Link is up %d Mbps %s\n",
2068 adapter->link_speed,
2069 ((adapter->link_duplex == FULL_DUPLEX) ?
2070 "Full Duplex" : "Half Duplex"));
2071 }
2072 adapter->link_active = 1;
2073 adapter->smartspeed = 0;
2074 ifp->if_baudrate = adapter->link_speed * 1000000;
2075 ifp->if_link_state = LINK_STATE_UP;
2076 if_link_state_change(ifp);
2077 } else if (!link_check && adapter->link_active == 1) {
2078 ifp->if_baudrate = adapter->link_speed = 0;
2079 adapter->link_duplex = 0;
2080 if (bootverbose)
2081 device_printf(dev, "Link is Down\n");
2082 adapter->link_active = 0;
2083#if 0
2084 /* Link down, disable watchdog */
2085 if->if_timer = 0;
2086#endif
2087 ifp->if_link_state = LINK_STATE_DOWN;
2088 if_link_state_change(ifp);
984263bc 2089 }
984263bc
MD
2090}
2091
984263bc 2092static void
9c80d176 2093em_stop(struct adapter *adapter)
984263bc 2094{
9c80d176
SZ
2095 struct ifnet *ifp = &adapter->arpcom.ac_if;
2096 int i;
984263bc 2097
1eca7b82
SZ
2098 ASSERT_SERIALIZED(ifp->if_serializer);
2099
984263bc 2100 em_disable_intr(adapter);
9c80d176 2101
9ccd8c1f
JS
2102 callout_stop(&adapter->timer);
2103 callout_stop(&adapter->tx_fifo_timer);
984263bc 2104
984263bc 2105 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
af82d4bb 2106 ifp->if_timer = 0;
9c80d176
SZ
2107
2108 e1000_reset_hw(&adapter->hw);
2109 if (adapter->hw.mac.type >= e1000_82544)
2110 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2111
2112 for (i = 0; i < adapter->num_tx_desc; i++) {
2113 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i];
2114
2115 if (tx_buffer->m_head != NULL) {
2116 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2117 m_freem(tx_buffer->m_head);
2118 tx_buffer->m_head = NULL;
2119 }
9c80d176
SZ
2120 }
2121
2122 for (i = 0; i < adapter->num_rx_desc; i++) {
2123 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i];
2124
2125 if (rx_buffer->m_head != NULL) {
2126 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2127 m_freem(rx_buffer->m_head);
2128 rx_buffer->m_head = NULL;
2129 }
2130 }
c9ff32cc
SZ
2131
2132 if (adapter->fmp != NULL)
2133 m_freem(adapter->fmp);
2134 adapter->fmp = NULL;
2135 adapter->lmp = NULL;
51e6819f
SZ
2136
2137 adapter->csum_flags = 0;
2138 adapter->csum_ehlen = 0;
2139 adapter->csum_iphlen = 0;
9f60d74b
SZ
2140
2141 adapter->tx_dd_head = 0;
2142 adapter->tx_dd_tail = 0;
2143 adapter->tx_nsegs = 0;
984263bc
MD
2144}
2145
9c80d176
SZ
2146static int
2147em_get_hw_info(struct adapter *adapter)
984263bc
MD
2148{
2149 device_t dev = adapter->dev;
2150
984263bc
MD
2151 /* Save off the information about this board */
2152 adapter->hw.vendor_id = pci_get_vendor(dev);
2153 adapter->hw.device_id = pci_get_device(dev);
f647ad3d
JS
2154 adapter->hw.revision_id = pci_get_revid(dev);
2155 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
9c80d176 2156 adapter->hw.subsystem_device_id = pci_get_subdevice(dev);
984263bc 2157
9c80d176
SZ
2158 /* Do Shared Code Init and Setup */
2159 if (e1000_set_mac_type(&adapter->hw))
2160 return ENXIO;
2161 return 0;
984263bc
MD
2162}
2163
1eca7b82 2164static int
9c80d176 2165em_alloc_pci_res(struct adapter *adapter)
1eca7b82 2166{
9c80d176 2167 device_t dev = adapter->dev;
053f3ae6 2168 u_int intr_flags;
84e26aaa 2169 int val, rid, msi_enable;
9c80d176
SZ
2170
2171 /* Enable bus mastering */
2172 pci_enable_busmaster(dev);
1eca7b82 2173
9c80d176
SZ
2174 adapter->memory_rid = EM_BAR_MEM;
2175 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2176 &adapter->memory_rid, RF_ACTIVE);
2177 if (adapter->memory == NULL) {
1eca7b82 2178 device_printf(dev, "Unable to allocate bus resource: memory\n");
9c80d176 2179 return (ENXIO);
1eca7b82
SZ
2180 }
2181 adapter->osdep.mem_bus_space_tag =
9c80d176 2182 rman_get_bustag(adapter->memory);
1eca7b82 2183 adapter->osdep.mem_bus_space_handle =
9c80d176
SZ
2184 rman_get_bushandle(adapter->memory);
2185
2186 /* XXX This is quite goofy, it is not actually used */
1eca7b82
SZ
2187 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2188
9c80d176
SZ
2189 /* Only older adapters use IO mapping */
2190 if (adapter->hw.mac.type > e1000_82543 &&
2191 adapter->hw.mac.type < e1000_82571) {
1eca7b82 2192 /* Figure our where our IO BAR is ? */
9c80d176 2193 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) {
1eca7b82 2194 val = pci_read_config(dev, rid, 4);
87307ba1 2195 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1eca7b82
SZ
2196 adapter->io_rid = rid;
2197 break;
2198 }
2199 rid += 4;
87307ba1
SZ
2200 /* check for 64bit BAR */
2201 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2202 rid += 4;
1eca7b82 2203 }
9c80d176 2204 if (rid >= PCIR_CARDBUSCIS) {
87307ba1
SZ
2205 device_printf(dev, "Unable to locate IO BAR\n");
2206 return (ENXIO);
9c80d176
SZ
2207 }
2208 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
2209 &adapter->io_rid, RF_ACTIVE);
2210 if (adapter->ioport == NULL) {
1eca7b82 2211 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2212 "ioport\n");
2213 return (ENXIO);
1eca7b82 2214 }
87307ba1
SZ
2215 adapter->hw.io_base = 0;
2216 adapter->osdep.io_bus_space_tag =
9c80d176 2217 rman_get_bustag(adapter->ioport);
87307ba1 2218 adapter->osdep.io_bus_space_handle =
9c80d176 2219 rman_get_bushandle(adapter->ioport);
1eca7b82
SZ
2220 }
2221
84e26aaa 2222 /*
a835687d
SZ
2223 * Don't enable MSI-X on 82574, see:
2224 * 82574 specification update errata #15
2225 *
84e26aaa 2226 * Don't enable MSI on PCI/PCI-X chips, see:
a835687d
SZ
2227 * 82540 specification update errata #6
2228 * 82545 specification update errata #4
84e26aaa
SZ
2229 *
2230 * Don't enable MSI on 82571/82572, see:
a835687d 2231 * 82571/82572 specification update errata #63
84e26aaa
SZ
2232 */
2233 msi_enable = em_msi_enable;
2234 if (msi_enable &&
2235 (!pci_is_pcie(dev) ||
2236 adapter->hw.mac.type == e1000_82571 ||
2237 adapter->hw.mac.type == e1000_82572))
2238 msi_enable = 0;
2239
2240 adapter->intr_type = pci_alloc_1intr(dev, msi_enable,
053f3ae6
SZ
2241 &adapter->intr_rid, &intr_flags);
2242
87ab432b
SZ
2243 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) {
2244 int unshared;
2245
2246 unshared = device_getenv_int(dev, "irq.unshared", 0);
2247 if (!unshared) {
2248 adapter->flags |= EM_FLAG_SHARED_INTR;
2249 if (bootverbose)
2250 device_printf(dev, "IRQ shared\n");
2251 } else {
2252 intr_flags &= ~RF_SHAREABLE;
2253 if (bootverbose)
2254 device_printf(dev, "IRQ unshared\n");
2255 }
2256 }
2257
9c80d176 2258 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
053f3ae6 2259 &adapter->intr_rid, intr_flags);
9c80d176 2260 if (adapter->intr_res == NULL) {
1eca7b82 2261 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2262 "interrupt\n");
2263 return (ENXIO);
1eca7b82
SZ
2264 }
2265
9c80d176 2266 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1eca7b82 2267 adapter->hw.back = &adapter->osdep;
a483bd34 2268 return (0);
1eca7b82
SZ
2269}
2270
2271static void
9c80d176 2272em_free_pci_res(struct adapter *adapter)
1eca7b82 2273{
9c80d176 2274 device_t dev = adapter->dev;
1eca7b82 2275
9c80d176
SZ
2276 if (adapter->intr_res != NULL) {
2277 bus_release_resource(dev, SYS_RES_IRQ,
2278 adapter->intr_rid, adapter->intr_res);
1eca7b82 2279 }
9c80d176 2280
053f3ae6
SZ
2281 if (adapter->intr_type == PCI_INTR_TYPE_MSI)
2282 pci_release_msi(dev);
2283
9c80d176
SZ
2284 if (adapter->memory != NULL) {
2285 bus_release_resource(dev, SYS_RES_MEMORY,
2286 adapter->memory_rid, adapter->memory);
1eca7b82
SZ
2287 }
2288
9c80d176
SZ
2289 if (adapter->flash != NULL) {
2290 bus_release_resource(dev, SYS_RES_MEMORY,
2291 adapter->flash_rid, adapter->flash);
1eca7b82
SZ
2292 }
2293
9c80d176
SZ
2294 if (adapter->ioport != NULL) {
2295 bus_release_resource(dev, SYS_RES_IOPORT,
2296 adapter->io_rid, adapter->ioport);
1eca7b82
SZ
2297 }
2298}
2299
984263bc 2300static int
2d0e5700 2301em_reset(struct adapter *adapter)
984263bc 2302{
9c80d176
SZ
2303 device_t dev = adapter->dev;
2304 uint16_t rx_buffer_size;
7ea52455 2305
984263bc
MD
2306 /* When hardware is reset, fifo_head is also reset */
2307 adapter->tx_fifo_head = 0;
2308
87307ba1 2309 /* Set up smart power down as default off on newer adapters. */
1eca7b82 2310 if (!em_smart_pwr_down &&
9c80d176
SZ
2311 (adapter->hw.mac.type == e1000_82571 ||
2312 adapter->hw.mac.type == e1000_82572)) {
1eca7b82
SZ
2313 uint16_t phy_tmp = 0;
2314
87307ba1 2315 /* Speed up time to link by disabling smart power down. */
9c80d176
SZ
2316 e1000_read_phy_reg(&adapter->hw,
2317 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1eca7b82 2318 phy_tmp &= ~IGP02E1000_PM_SPD;
9c80d176
SZ
2319 e1000_write_phy_reg(&adapter->hw,
2320 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1eca7b82
SZ
2321 }
2322
7ea52455 2323 /*
87307ba1
SZ
2324 * These parameters control the automatic generation (Tx) and
2325 * response (Rx) to Ethernet PAUSE frames.
7ea52455
SZ
2326 * - High water mark should allow for at least two frames to be
2327 * received after sending an XOFF.
2328 * - Low water mark works best when it is very near the high water mark.
2329 * This allows the receiver to restart by sending XON when it has
9c80d176
SZ
2330 * drained a bit. Here we use an arbitary value of 1500 which will
2331 * restart after one full frame is pulled from the buffer. There
7ea52455
SZ
2332 * could be several smaller frames in the buffer and if so they will
2333 * not trigger the XON until their total number reduces the buffer
2334 * by 1500.
2335 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2336 */
9c80d176
SZ
2337 rx_buffer_size =
2338 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10;
7ea52455 2339
9c80d176
SZ
2340 adapter->hw.fc.high_water = rx_buffer_size -
2341 roundup2(adapter->max_frame_size, 1024);
2342 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2343
2344 if (adapter->hw.mac.type == e1000_80003es2lan)
2345 adapter->hw.fc.pause_time = 0xFFFF;
1eca7b82 2346 else
9c80d176 2347 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2d0e5700 2348
9c80d176 2349 adapter->hw.fc.send_xon = TRUE;
2d0e5700 2350
9c80d176 2351 adapter->hw.fc.requested_mode = e1000_fc_full;
7ea52455 2352
2d0e5700
SZ
2353 /* Workaround: no TX flow ctrl for PCH */
2354 if (adapter->hw.mac.type == e1000_pchlan)
2355 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
2356
2357 /* Override - settings for PCH2LAN, ya its magic :) */
2358 if (adapter->hw.mac.type == e1000_pch2lan) {
2359 adapter->hw.fc.high_water = 0x5C20;
2360 adapter->hw.fc.low_water = 0x5048;
2361 adapter->hw.fc.pause_time = 0x0650;
2362 adapter->hw.fc.refresh_time = 0x0400;
2363
2364 /* Jumbos need adjusted PBA */
2365 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU)
2366 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12);
2367 else
2368 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26);
2369 }
2370
2371 /* Issue a global reset */
2372 e1000_reset_hw(&adapter->hw);
2373 if (adapter->hw.mac.type >= e1000_82544)
2374 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
6d5e2922 2375 em_disable_aspm(adapter);
2d0e5700 2376
9c80d176
SZ
2377 if (e1000_init_hw(&adapter->hw) < 0) {
2378 device_printf(dev, "Hardware Initialization Failed\n");
87307ba1 2379 return (EIO);
984263bc
MD
2380 }
2381
2d0e5700
SZ
2382 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
2383 e1000_get_phy_info(&adapter->hw);
9c80d176 2384 e1000_check_for_link(&adapter->hw);
984263bc 2385
87307ba1 2386 return (0);
984263bc
MD
2387}
2388
984263bc 2389static void
9c80d176 2390em_setup_ifp(struct adapter *adapter)
984263bc 2391{
9c80d176 2392 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 2393
9c80d176
SZ
2394 if_initname(ifp, device_get_name(adapter->dev),
2395 device_get_unit(adapter->dev));
984263bc
MD
2396 ifp->if_softc = adapter;
2397 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9c80d176 2398 ifp->if_init = em_init;
984263bc
MD
2399 ifp->if_ioctl = em_ioctl;
2400 ifp->if_start = em_start;
9c095379
MD
2401#ifdef DEVICE_POLLING
2402 ifp->if_poll = em_poll;
2403#endif
984263bc 2404 ifp->if_watchdog = em_watchdog;
e26dc3e9 2405 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
19b1d5b8 2406 ifq_set_ready(&ifp->if_snd);
984263bc 2407
9c80d176 2408 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
984263bc 2409
9c80d176
SZ
2410 if (adapter->hw.mac.type >= e1000_82543)
2411 ifp->if_capabilities = IFCAP_HWCSUM;
e095c7aa 2412
9c80d176
SZ
2413 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2414 ifp->if_capenable = ifp->if_capabilities;
984263bc 2415
9c80d176
SZ
2416 if (ifp->if_capenable & IFCAP_TXCSUM)
2417 ifp->if_hwassist = EM_CSUM_FEATURES;
21fa6062 2418
f647ad3d
JS
2419 /*
2420 * Tell the upper layer(s) we support long frames.
2421 */
2422 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
984263bc 2423
87307ba1 2424 /*
984263bc
MD
2425 * Specify the media types supported by this adapter and register
2426 * callbacks to update media and link information
2427 */
9c80d176
SZ
2428 ifmedia_init(&adapter->media, IFM_IMASK,
2429 em_media_change, em_media_status);
2430 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2431 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
2432 u_char fiber_type = IFM_1000_SX; /* default type */
2433
2434 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
2435 fiber_type = IFM_1000_LX;
2436 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
984263bc 2437 0, NULL);
87307ba1 2438 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
984263bc
MD
2439 } else {
2440 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
87307ba1 2441 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
984263bc 2442 0, NULL);
87307ba1 2443 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
984263bc 2444 0, NULL);
87307ba1 2445 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
984263bc 2446 0, NULL);
9c80d176
SZ
2447 if (adapter->hw.phy.type != e1000_phy_ife) {
2448 ifmedia_add(&adapter->media,
2449 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2450 ifmedia_add(&adapter->media,
2451 IFM_ETHER | IFM_1000_T, 0, NULL);
2452 }
984263bc
MD
2453 }
2454 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2455 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
984263bc
MD
2456}
2457
9c80d176
SZ
2458
2459/*
2460 * Workaround for SmartSpeed on 82541 and 82547 controllers
2461 */
984263bc
MD
2462static void
2463em_smartspeed(struct adapter *adapter)
2464{
f647ad3d
JS
2465 uint16_t phy_tmp;
2466
9c80d176
SZ
2467 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp ||
2468 adapter->hw.mac.autoneg == 0 ||
2469 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
984263bc
MD
2470 return;
2471
f647ad3d
JS
2472 if (adapter->smartspeed == 0) {
2473 /*
2474 * If Master/Slave config fault is asserted twice,
9c80d176 2475 * we assume back-to-back
f647ad3d 2476 */
9c80d176 2477 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d
JS
2478 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2479 return;
9c80d176 2480 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d 2481 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
9c80d176
SZ
2482 e1000_read_phy_reg(&adapter->hw,
2483 PHY_1000T_CTRL, &phy_tmp);
f647ad3d
JS
2484 if (phy_tmp & CR_1000T_MS_ENABLE) {
2485 phy_tmp &= ~CR_1000T_MS_ENABLE;
9c80d176
SZ
2486 e1000_write_phy_reg(&adapter->hw,
2487 PHY_1000T_CTRL, phy_tmp);
f647ad3d 2488 adapter->smartspeed++;
9c80d176
SZ
2489 if (adapter->hw.mac.autoneg &&
2490 !e1000_phy_setup_autoneg(&adapter->hw) &&
2491 !e1000_read_phy_reg(&adapter->hw,
2492 PHY_CONTROL, &phy_tmp)) {
2493 phy_tmp |= MII_CR_AUTO_NEG_EN |
2494 MII_CR_RESTART_AUTO_NEG;
2495 e1000_write_phy_reg(&adapter->hw,
2496 PHY_CONTROL, phy_tmp);
f647ad3d
JS
2497 }
2498 }
2499 }
87307ba1 2500 return;
f647ad3d
JS
2501 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2502 /* If still no link, perhaps using 2/3 pair cable */
9c80d176 2503 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
f647ad3d 2504 phy_tmp |= CR_1000T_MS_ENABLE;
9c80d176
SZ
2505 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2506 if (adapter->hw.mac.autoneg &&
2507 !e1000_phy_setup_autoneg(&adapter->hw) &&
2508 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2509 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2510 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
f647ad3d
JS
2511 }
2512 }
9c80d176 2513
f647ad3d
JS
2514 /* Restart process after EM_SMARTSPEED_MAX iterations */
2515 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2516 adapter->smartspeed = 0;
984263bc
MD
2517}
2518
9ccd8c1f
JS
2519static int
2520em_dma_malloc(struct adapter *adapter, bus_size_t size,
87307ba1 2521 struct em_dma_alloc *dma)
9ccd8c1f 2522{
9c80d176
SZ
2523 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag,
2524 EM_DBA_ALIGN, size, BUS_DMA_WAITOK,
2525 &dma->dma_tag, &dma->dma_map,
2526 &dma->dma_paddr);
2527 if (dma->dma_vaddr == NULL)
2528 return ENOMEM;
2529 else
2530 return 0;
9ccd8c1f
JS
2531}
2532
2533static void
2534em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2535{
9c80d176
SZ
2536 if (dma->dma_tag == NULL)
2537 return;
2538 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2539 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2540 bus_dma_tag_destroy(dma->dma_tag);
984263bc
MD
2541}
2542
984263bc 2543static int
9c80d176 2544em_create_tx_ring(struct adapter *adapter)
984263bc 2545{
9c80d176 2546 device_t dev = adapter->dev;
1eca7b82 2547 struct em_buffer *tx_buffer;
1eca7b82
SZ
2548 int error, i;
2549
87307ba1
SZ
2550 adapter->tx_buffer_area =
2551 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2552 M_DEVBUF, M_WAITOK | M_ZERO);
984263bc 2553
9c80d176
SZ
2554 /*
2555 * Create DMA tags for tx buffers
2556 */
2557 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
2558 1, 0, /* alignment, bounds */
2559 BUS_SPACE_MAXADDR, /* lowaddr */
2560 BUS_SPACE_MAXADDR, /* highaddr */
2561 NULL, NULL, /* filter, filterarg */
2562 EM_TSO_SIZE, /* maxsize */
2563 EM_MAX_SCATTER, /* nsegments */
2564 EM_MAX_SEGSIZE, /* maxsegsize */
2565 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2566 BUS_DMA_ONEBPAGE, /* flags */
2567 &adapter->txtag);
2568 if (error) {
2569 device_printf(dev, "Unable to allocate TX DMA tag\n");
2570 kfree(adapter->tx_buffer_area, M_DEVBUF);
2571 adapter->tx_buffer_area = NULL;
2572 return error;
2573 }
2574
2575 /*
2576 * Create DMA maps for tx buffers
2577 */
1eca7b82 2578 for (i = 0; i < adapter->num_tx_desc; i++) {
9c80d176
SZ
2579 tx_buffer = &adapter->tx_buffer_area[i];
2580
2581 error = bus_dmamap_create(adapter->txtag,
2582 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2583 &tx_buffer->map);
1eca7b82 2584 if (error) {
9c80d176
SZ
2585 device_printf(dev, "Unable to create TX DMA map\n");
2586 em_destroy_tx_ring(adapter, i);
2587 return error;
1eca7b82 2588 }
1eca7b82 2589 }
9c80d176
SZ
2590 return (0);
2591}
9ccd8c1f 2592
9c80d176
SZ
2593static void
2594em_init_tx_ring(struct adapter *adapter)
2595{
2596 /* Clear the old ring contents */
2597 bzero(adapter->tx_desc_base,
2598 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2599
2600 /* Reset state */
87307ba1
SZ
2601 adapter->next_avail_tx_desc = 0;
2602 adapter->next_tx_to_clean = 0;
984263bc 2603 adapter->num_tx_desc_avail = adapter->num_tx_desc;
984263bc
MD
2604}
2605
984263bc 2606static void
9c80d176 2607em_init_tx_unit(struct adapter *adapter)
984263bc 2608{
9c80d176 2609 uint32_t tctl, tarc, tipg = 0;
9ccd8c1f
JS
2610 uint64_t bus_addr;
2611
984263bc 2612 /* Setup the Base and Length of the Tx Descriptor Ring */
9ccd8c1f 2613 bus_addr = adapter->txdma.dma_paddr;
9c80d176
SZ
2614 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2615 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2616 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2617 (uint32_t)(bus_addr >> 32));
2618 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2619 (uint32_t)bus_addr);
984263bc 2620 /* Setup the HW Tx Head and Tail descriptor pointers */
9c80d176
SZ
2621 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2622 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
984263bc 2623
984263bc 2624 /* Set the default values for the Tx Inter Packet Gap timer */
9c80d176
SZ
2625 switch (adapter->hw.mac.type) {
2626 case e1000_82542:
2627 tipg = DEFAULT_82542_TIPG_IPGT;
2628 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2629 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
984263bc 2630 break;
9c80d176
SZ
2631
2632 case e1000_80003es2lan:
2633 tipg = DEFAULT_82543_TIPG_IPGR1;
2634 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2635 E1000_TIPG_IPGR2_SHIFT;
1eca7b82 2636 break;
9c80d176 2637
984263bc 2638 default:
9c80d176
SZ
2639 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2640 adapter->hw.phy.media_type ==
2641 e1000_media_type_internal_serdes)
2642 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
984263bc 2643 else
9c80d176
SZ
2644 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2645 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2646 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2647 break;
2648 }
2649
2650 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
91e8debf
SZ
2651
2652 /* NOTE: 0 is not allowed for TIDV */
2653 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1);
2654 if(adapter->hw.mac.type >= e1000_82540)
2655 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0);
984263bc 2656
9c80d176
SZ
2657 if (adapter->hw.mac.type == e1000_82571 ||
2658 adapter->hw.mac.type == e1000_82572) {
2659 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2660 tarc |= SPEED_MODE_BIT;
2661 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2662 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
2663 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2664 tarc |= 1;
2665 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2666 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
2667 tarc |= 1;
2668 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
1eca7b82
SZ
2669 }
2670
984263bc 2671 /* Program the Transmit Control Register */
9c80d176
SZ
2672 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2673 tctl &= ~E1000_TCTL_CT;
2674 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2675 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2676
2677 if (adapter->hw.mac.type >= e1000_82571)
2678 tctl |= E1000_TCTL_MULR;
1eca7b82 2679
87307ba1 2680 /* This write will effectively turn on the transmit unit. */
9c80d176 2681 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
2682}
2683
984263bc 2684static void
9c80d176 2685em_destroy_tx_ring(struct adapter *adapter, int ndesc)
984263bc 2686{
f647ad3d
JS
2687 struct em_buffer *tx_buffer;
2688 int i;
984263bc 2689
9c80d176
SZ
2690 if (adapter->tx_buffer_area == NULL)
2691 return;
984263bc 2692
9c80d176
SZ
2693 for (i = 0; i < ndesc; i++) {
2694 tx_buffer = &adapter->tx_buffer_area[i];
1eca7b82 2695
9c80d176
SZ
2696 KKASSERT(tx_buffer->m_head == NULL);
2697 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
9ccd8c1f 2698 }
9c80d176
SZ
2699 bus_dma_tag_destroy(adapter->txtag);
2700
2701 kfree(adapter->tx_buffer_area, M_DEVBUF);
2702 adapter->tx_buffer_area = NULL;
984263bc
MD
2703}
2704
9c80d176
SZ
2705/*
2706 * The offload context needs to be set when we transfer the first
2707 * packet of a particular protocol (TCP/UDP). This routine has been
002b3a05 2708 * enhanced to deal with inserted VLAN headers.
51e6819f
SZ
2709 *
2710 * If the new packet's ether header length, ip header length and
2711 * csum offloading type are same as the previous packet, we should
2712 * avoid allocating a new csum context descriptor; mainly to take
2713 * advantage of the pipeline effect of the TX data read request.
9f60d74b
SZ
2714 *
2715 * This function returns number of TX descrptors allocated for
2716 * csum context.
9c80d176 2717 */
9f60d74b 2718static int
9c80d176
SZ
2719em_txcsum(struct adapter *adapter, struct mbuf *mp,
2720 uint32_t *txd_upper, uint32_t *txd_lower)
984263bc 2721{
9c80d176 2722 struct e1000_context_desc *TXD;
984263bc 2723 struct em_buffer *tx_buffer;
9c80d176 2724 struct ether_vlan_header *eh;
51e6819f
SZ
2725 struct ip *ip;
2726 int curr_txd, ehdrlen, csum_flags;
9c80d176
SZ
2727 uint32_t cmd, hdr_len, ip_hlen;
2728 uint16_t etype;
9c80d176 2729
9c80d176
SZ
2730 /*
2731 * Determine where frame payload starts.
2732 * Jump over vlan headers if already present,
2733 * helpful for QinQ too.
2734 */
252dfd0d 2735 KASSERT(mp->m_len >= ETHER_HDR_LEN,
ed20d0e3 2736 ("em_txcsum_pullup is not called (eh)?"));
9c80d176
SZ
2737 eh = mtod(mp, struct ether_vlan_header *);
2738 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
252dfd0d 2739 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN,
ed20d0e3 2740 ("em_txcsum_pullup is not called (evh)?"));
9c80d176
SZ
2741 etype = ntohs(eh->evl_proto);
2742 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN;
984263bc 2743 } else {
9c80d176
SZ
2744 etype = ntohs(eh->evl_encap_proto);
2745 ehdrlen = ETHER_HDR_LEN;
984263bc
MD
2746 }
2747
1eca7b82 2748 /*
002b3a05 2749 * We only support TCP/UDP for IPv4 for the moment.
9c80d176 2750 * TODO: Support SCTP too when it hits the tree.
984263bc 2751 */
51e6819f 2752 if (etype != ETHERTYPE_IP)
9f60d74b 2753 return 0;
002b3a05 2754
51e6819f 2755 KASSERT(mp->m_len >= ehdrlen + EM_IPVHL_SIZE,
ed20d0e3 2756 ("em_txcsum_pullup is not called (eh+ip_vhl)?"));
9c80d176 2757
51e6819f
SZ
2758 /* NOTE: We could only safely access ip.ip_vhl part */
2759 ip = (struct ip *)(mp->m_data + ehdrlen);
2760 ip_hlen = ip->ip_hl << 2;
984263bc 2761
51e6819f
SZ
2762 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES;
2763
2764 if (adapter->csum_ehlen == ehdrlen &&
2765 adapter->csum_iphlen == ip_hlen &&
2766 adapter->csum_flags == csum_flags) {
2767 /*
2768 * Same csum offload context as the previous packets;
2769 * just return.
2770 */
2771 *txd_upper = adapter->csum_txd_upper;
2772 *txd_lower = adapter->csum_txd_lower;
9f60d74b 2773 return 0;
984263bc
MD
2774 }
2775
51e6819f
SZ
2776 /*
2777 * Setup a new csum offload context.
2778 */
2779
2780 curr_txd = adapter->next_avail_tx_desc;
2781 tx_buffer = &adapter->tx_buffer_area[curr_txd];
2782 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd];
2783
2784 cmd = 0;
2785
2786 /* Setup of IP header checksum. */
2787 if (csum_flags & CSUM_IP) {
2788 /*
2789 * Start offset for header checksum calculation.
2790 * End offset for header checksum calculation.
2791 * Offset of place to put the checksum.
2792 */
2793 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2794 TXD->lower_setup.ip_fields.ipcse =
2795 htole16(ehdrlen + ip_hlen - 1);
2796 TXD->lower_setup.ip_fields.ipcso =
2797 ehdrlen + offsetof(struct ip, ip_sum);
2798 cmd |= E1000_TXD_CMD_IP;
2799 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2800 }
2801 hdr_len = ehdrlen + ip_hlen;
2802
2803 if (csum_flags & CSUM_TCP) {
002b3a05
SZ
2804 /*
2805 * Start offset for payload checksum calculation.
2806 * End offset for payload checksum calculation.
2807 * Offset of place to put the checksum.
2808 */
2809 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2810 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2811 TXD->upper_setup.tcp_fields.tucso =
2812 hdr_len + offsetof(struct tcphdr, th_sum);
2813 cmd |= E1000_TXD_CMD_TCP;
2814 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
51e6819f 2815 } else if (csum_flags & CSUM_UDP) {
002b3a05
SZ
2816 /*
2817 * Start offset for header checksum calculation.
2818 * End offset for header checksum calculation.
2819 * Offset of place to put the checksum.
2820 */
2821 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2822 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2823 TXD->upper_setup.tcp_fields.tucso =
2824 hdr_len + offsetof(struct udphdr, uh_sum);
2825 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
9c80d176
SZ
2826 }
2827
2828 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2829 E1000_TXD_DTYP_D; /* Data descr */
51e6819f
SZ
2830
2831 /* Save the information for this csum offloading context */
2832 adapter->csum_ehlen = ehdrlen;
2833 adapter->csum_iphlen = ip_hlen;
2834 adapter->csum_flags = csum_flags;
2835 adapter->csum_txd_upper = *txd_upper;
2836 adapter->csum_txd_lower = *txd_lower;
2837
9c80d176
SZ
2838 TXD->tcp_seg_setup.data = htole32(0);
2839 TXD->cmd_and_length =
2af74b85 2840 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
984263bc
MD
2841
2842 if (++curr_txd == adapter->num_tx_desc)
2843 curr_txd = 0;
2844
9c80d176 2845 KKASSERT(adapter->num_tx_desc_avail > 0);
984263bc 2846 adapter->num_tx_desc_avail--;
9c80d176 2847
984263bc 2848 adapter->next_avail_tx_desc = curr_txd;
9f60d74b 2849 return 1;
984263bc
MD
2850}
2851
002b3a05
SZ
2852static int
2853em_txcsum_pullup(struct adapter *adapter, struct mbuf **m0)
2854{
2855 struct mbuf *m = *m0;
2856 struct ether_header *eh;
2857 int len;
2858
2859 adapter->tx_csum_try_pullup++;
2860
2861 len = ETHER_HDR_LEN + EM_IPVHL_SIZE;
2862
2863 if (__predict_false(!M_WRITABLE(m))) {
2864 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2865 adapter->tx_csum_drop1++;
2866 m_freem(m);
2867 *m0 = NULL;
2868 return ENOBUFS;
2869 }
2870 eh = mtod(m, struct ether_header *);
2871
2872 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2873 len += EVL_ENCAPLEN;
2874
3752657e 2875 if (m->m_len < len) {
002b3a05
SZ
2876 adapter->tx_csum_drop2++;
2877 m_freem(m);
2878 *m0 = NULL;
2879 return ENOBUFS;
2880 }
2881 return 0;
2882 }
2883
2884 if (__predict_false(m->m_len < ETHER_HDR_LEN)) {
2885 adapter->tx_csum_pullup1++;
2886 m = m_pullup(m, ETHER_HDR_LEN);
2887 if (m == NULL) {
2888 adapter->tx_csum_pullup1_failed++;
2889 *m0 = NULL;
2890 return ENOBUFS;
2891 }
2892 *m0 = m;
2893 }
2894 eh = mtod(m, struct ether_header *);
2895
2896 if (eh->ether_type == htons(ETHERTYPE_VLAN))
2897 len += EVL_ENCAPLEN;
2898
3752657e 2899 if (m->m_len < len) {
002b3a05
SZ
2900 adapter->tx_csum_pullup2++;
2901 m = m_pullup(m, len);
2902 if (m == NULL) {
2903 adapter->tx_csum_pullup2_failed++;
2904 *m0 = NULL;
2905 return ENOBUFS;
2906 }
2907 *m0 = m;
2908 }
2909 return 0;
2910}
2911
984263bc 2912static void
87307ba1 2913em_txeof(struct adapter *adapter)
984263bc 2914{
9c80d176 2915 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2916 struct em_buffer *tx_buffer;
2917 int first, num_avail;
2918
2919 if (adapter->tx_dd_head == adapter->tx_dd_tail)
2920 return;
984263bc 2921
f647ad3d
JS
2922 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2923 return;
984263bc 2924
9c80d176 2925 num_avail = adapter->num_tx_desc_avail;
87307ba1 2926 first = adapter->next_tx_to_clean;
9c80d176 2927
9f60d74b 2928 while (adapter->tx_dd_head != adapter->tx_dd_tail) {
4e499730 2929 struct e1000_tx_desc *tx_desc;
9f60d74b 2930 int dd_idx = adapter->tx_dd[adapter->tx_dd_head];
984263bc 2931
9f60d74b 2932 tx_desc = &adapter->tx_desc_base[dd_idx];
9f60d74b
SZ
2933 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2934 EM_INC_TXDD_IDX(adapter->tx_dd_head);
984263bc 2935
9f60d74b
SZ
2936 if (++dd_idx == adapter->num_tx_desc)
2937 dd_idx = 0;
9c80d176 2938
9f60d74b 2939 while (first != dd_idx) {
edbfa193
SZ
2940 logif(pkt_txclean);
2941
9f60d74b
SZ
2942 num_avail++;
2943
4e499730 2944 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
2945 if (tx_buffer->m_head) {
2946 ifp->if_opackets++;
2947 bus_dmamap_unload(adapter->txtag,
2948 tx_buffer->map);
2949 m_freem(tx_buffer->m_head);
2950 tx_buffer->m_head = NULL;
2951 }
2952
2953 if (++first == adapter->num_tx_desc)
2954 first = 0;
2955 }
87307ba1
SZ
2956 } else {
2957 break;
2958 }
f647ad3d 2959 }
9f60d74b
SZ
2960 adapter->next_tx_to_clean = first;
2961 adapter->num_tx_desc_avail = num_avail;
2962
2963 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2964 adapter->tx_dd_head = 0;
2965 adapter->tx_dd_tail = 0;
2966 }
2967
2968 if (!EM_IS_OACTIVE(adapter)) {
2969 ifp->if_flags &= ~IFF_OACTIVE;
2970
2971 /* All clean, turn off the timer */
2972 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2973 ifp->if_timer = 0;
2974 }
2975}
2976
2977static void
2978em_tx_collect(struct adapter *adapter)
2979{
2980 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2981 struct em_buffer *tx_buffer;
2982 int tdh, first, num_avail, dd_idx = -1;
2983
2984 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2985 return;
2986
2987 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
2988 if (tdh == adapter->next_tx_to_clean)
2989 return;
2990
2991 if (adapter->tx_dd_head != adapter->tx_dd_tail)
2992 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2993
2994 num_avail = adapter->num_tx_desc_avail;
2995 first = adapter->next_tx_to_clean;
2996
2997 while (first != tdh) {
edbfa193
SZ
2998 logif(pkt_txclean);
2999
9f60d74b
SZ
3000 num_avail++;
3001
4e499730 3002 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
3003 if (tx_buffer->m_head) {
3004 ifp->if_opackets++;
3005 bus_dmamap_unload(adapter->txtag,
3006 tx_buffer->map);
3007 m_freem(tx_buffer->m_head);
3008 tx_buffer->m_head = NULL;
3009 }
3010
3011 if (first == dd_idx) {
3012 EM_INC_TXDD_IDX(adapter->tx_dd_head);
3013 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
3014 adapter->tx_dd_head = 0;
3015 adapter->tx_dd_tail = 0;
3016 dd_idx = -1;
3017 } else {
3018 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
3019 }
3020 }
3021
3022 if (++first == adapter->num_tx_desc)
3023 first = 0;
3024 }
3025 adapter->next_tx_to_clean = first;
9c80d176 3026 adapter->num_tx_desc_avail = num_avail;
984263bc 3027
9f60d74b 3028 if (!EM_IS_OACTIVE(adapter)) {
9c80d176 3029 ifp->if_flags &= ~IFF_OACTIVE;
afa68aa1 3030
9c80d176
SZ
3031 /* All clean, turn off the timer */
3032 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
3033 ifp->if_timer = 0;
3034 }
3035}
984263bc 3036
9c80d176
SZ
3037/*
3038 * When Link is lost sometimes there is work still in the TX ring
3039 * which will result in a watchdog, rather than allow that do an
3040 * attempted cleanup and then reinit here. Note that this has been
3041 * seens mostly with fiber adapters.
3042 */
3043static void
3044em_tx_purge(struct adapter *adapter)
3045{
3046 struct ifnet *ifp = &adapter->arpcom.ac_if;
3047
3048 if (!adapter->link_active && ifp->if_timer) {
9f60d74b 3049 em_tx_collect(adapter);
9c80d176
SZ
3050 if (ifp->if_timer) {
3051 if_printf(ifp, "Link lost, TX pending, reinit\n");
f647ad3d 3052 ifp->if_timer = 0;
9c80d176
SZ
3053 em_init(adapter);
3054 }
f647ad3d 3055 }
984263bc
MD
3056}
3057
984263bc 3058static int
9c80d176 3059em_newbuf(struct adapter *adapter, int i, int init)
984263bc 3060{
9c80d176
SZ
3061 struct mbuf *m;
3062 bus_dma_segment_t seg;
3063 bus_dmamap_t map;
9ccd8c1f 3064 struct em_buffer *rx_buffer;
9c80d176
SZ
3065 int error, nseg;
3066
3067 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3068 if (m == NULL) {
3069 adapter->mbuf_cluster_failed++;
3070 if (init) {
3071 if_printf(&adapter->arpcom.ac_if,
3072 "Unable to allocate RX mbuf\n");
984263bc 3073 }
9c80d176 3074 return (ENOBUFS);
984263bc 3075 }
9c80d176 3076 m->m_len = m->m_pkthdr.len = MCLBYTES;
87307ba1 3077
9c80d176
SZ
3078 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN)
3079 m_adj(m, ETHER_ALIGN);
9ccd8c1f 3080
9c80d176
SZ
3081 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
3082 adapter->rx_sparemap, m,
3083 &seg, 1, &nseg, BUS_DMA_NOWAIT);
9ccd8c1f 3084 if (error) {
9c80d176
SZ
3085 m_freem(m);
3086 if (init) {
3087 if_printf(&adapter->arpcom.ac_if,
3088 "Unable to load RX mbuf\n");
3089 }
87307ba1 3090 return (error);
9ccd8c1f 3091 }
984263bc 3092
9c80d176
SZ
3093 rx_buffer = &adapter->rx_buffer_area[i];
3094 if (rx_buffer->m_head != NULL)
3095 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3096
3097 map = rx_buffer->map;
3098 rx_buffer->map = adapter->rx_sparemap;
3099 adapter->rx_sparemap = map;
3100
3101 rx_buffer->m_head = m;
3102
3103 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr);
87307ba1 3104 return (0);
984263bc
MD
3105}
3106
984263bc 3107static int
9c80d176 3108em_create_rx_ring(struct adapter *adapter)
984263bc 3109{
9c80d176 3110 device_t dev = adapter->dev;
9ccd8c1f 3111 struct em_buffer *rx_buffer;
9c80d176
SZ
3112 int i, error;
3113
3114 adapter->rx_buffer_area =
3115 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
3116 M_DEVBUF, M_WAITOK | M_ZERO);
9ccd8c1f 3117
9c80d176
SZ
3118 /*
3119 * Create DMA tag for rx buffers
3120 */
3121 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
3122 1, 0, /* alignment, bounds */
3123 BUS_SPACE_MAXADDR, /* lowaddr */
3124 BUS_SPACE_MAXADDR, /* highaddr */
3125 NULL, NULL, /* filter, filterarg */
3126 MCLBYTES, /* maxsize */
3127 1, /* nsegments */
3128 MCLBYTES, /* maxsegsize */
3129 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
3130 &adapter->rxtag);
87307ba1 3131 if (error) {
9c80d176
SZ
3132 device_printf(dev, "Unable to allocate RX DMA tag\n");
3133 kfree(adapter->rx_buffer_area, M_DEVBUF);
3134 adapter->rx_buffer_area = NULL;
3135 return error;
3136 }
3137
3138 /*
3139 * Create spare DMA map for rx buffers
3140 */
3141 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
3142 &adapter->rx_sparemap);
3143 if (error) {
3144 device_printf(dev, "Unable to create spare RX DMA map\n");
3145 bus_dma_tag_destroy(adapter->rxtag);
3146 kfree(adapter->rx_buffer_area, M_DEVBUF);
3147 adapter->rx_buffer_area = NULL;
3148 return error;
9ccd8c1f 3149 }
9c80d176
SZ
3150
3151 /*
3152 * Create DMA maps for rx buffers
3153 */
3154 for (i = 0; i < adapter->num_rx_desc; i++) {
3155 rx_buffer = &adapter->rx_buffer_area[i];
3156
3157 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
9ccd8c1f 3158 &rx_buffer->map);
87307ba1 3159 if (error) {
9c80d176
SZ
3160 device_printf(dev, "Unable to create RX DMA map\n");
3161 em_destroy_rx_ring(adapter, i);
3162 return error;
9ccd8c1f 3163 }
984263bc 3164 }
87307ba1 3165 return (0);
984263bc
MD
3166}
3167
984263bc 3168static int
9c80d176 3169em_init_rx_ring(struct adapter *adapter)
984263bc 3170{
9c80d176 3171 int i, error;
984263bc 3172
9c80d176 3173 /* Reset descriptor ring */
87307ba1 3174 bzero(adapter->rx_desc_base,
9c80d176 3175 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
87307ba1 3176
9c80d176
SZ
3177 /* Allocate new ones. */
3178 for (i = 0; i < adapter->num_rx_desc; i++) {
3179 error = em_newbuf(adapter, i, 1);
3180 if (error)
3181 return (error);
3182 }
984263bc
MD
3183
3184 /* Setup our descriptor pointers */
f647ad3d 3185 adapter->next_rx_desc_to_check = 0;
87307ba1
SZ
3186
3187 return (0);
984263bc
MD
3188}
3189
984263bc 3190static void
9c80d176 3191em_init_rx_unit(struct adapter *adapter)
984263bc 3192{
9c80d176 3193 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 3194 uint64_t bus_addr;
2d0e5700 3195 uint32_t rctl;
984263bc 3196
87307ba1
SZ
3197 /*
3198 * Make sure receives are disabled while setting
3199 * up the descriptor ring
3200 */
9c80d176
SZ
3201 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3202 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
984263bc 3203
9c80d176 3204 if (adapter->hw.mac.type >= e1000_82540) {
2d0e5700
SZ
3205 uint32_t itr;
3206
9c80d176
SZ
3207 /*
3208 * Set the interrupt throttling rate. Value is calculated
3209 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3210 */
2d0e5700
SZ
3211 if (adapter->int_throttle_ceil)
3212 itr = 1000000000 / 256 / adapter->int_throttle_ceil;
3213 else
3214 itr = 0;
3215 em_set_itr(adapter, itr);
f647ad3d 3216 }
984263bc 3217
9c80d176
SZ
3218 /* Disable accelerated ackknowledge */
3219 if (adapter->hw.mac.type == e1000_82574) {
3220 E1000_WRITE_REG(&adapter->hw,
3221 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3222 }
3223
2d0e5700
SZ
3224 /* Receive Checksum Offload for TCP and UDP */
3225 if (ifp->if_capenable & IFCAP_RXCSUM) {
3226 uint32_t rxcsum;
3227
3228 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3229 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3230 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3231 }
3232
3233 /*
3234 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3235 * long latencies are observed, like Lenovo X60. This
3236 * change eliminates the problem, but since having positive
3237 * values in RDTR is a known source of problems on other
3238 * platforms another solution is being sought.
3239 */
3240 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) {
3241 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573);
3242 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573);
3243 }
3244
3245 /*
3246 * Setup the Base and Length of the Rx Descriptor Ring
3247 */
9ccd8c1f 3248 bus_addr = adapter->rxdma.dma_paddr;
9c80d176
SZ
3249 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3250 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3251 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3252 (uint32_t)(bus_addr >> 32));
3253 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3254 (uint32_t)bus_addr);
984263bc 3255
2d0e5700
SZ