em: Utilize mbuf's header length to setup TX csum context
[dragonfly.git] / sys / dev / netif / em / if_em.c
CommitLineData
78195a76 1/*
78195a76
MD
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
9c80d176 4 * Copyright (c) 2001-2008, Intel Corporation
78195a76
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9c80d176 9 *
78195a76
MD
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
9c80d176 12 *
78195a76
MD
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
9c80d176 16 *
78195a76
MD
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
9c80d176 20 *
78195a76
MD
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
9c80d176 35 *
78195a76
MD
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
9c80d176 38 *
78195a76
MD
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
9c80d176 42 *
78195a76
MD
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
9c80d176 52 *
78195a76
MD
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
9c80d176 65 *
78195a76
MD
66 */
67/*
68 * SERIALIZATION API RULES:
69 *
70 * - If the driver uses the same serializer for the interrupt as for the
71 * ifnet, most of the serialization will be done automatically for the
9c80d176 72 * driver.
78195a76
MD
73 *
74 * - ifmedia entry points will be serialized by the ifmedia code using the
75 * ifnet serializer.
76 *
77 * - if_* entry points except for if_input will be serialized by the IF
78 * and protocol layers.
79 *
80 * - The device driver must be sure to serialize access from timeout code
81 * installed by the device driver.
82 *
83 * - The device driver typically holds the serializer at the time it wishes
9c80d176
SZ
84 * to call if_input.
85 *
86 * - We must call lwkt_serialize_handler_enable() prior to enabling the
87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling
88 * the hardware interrupt in order to avoid handler execution races from
89 * scheduled interrupt threads.
78195a76
MD
90 *
91 * NOTE! Since callers into the device driver hold the ifnet serializer,
92 * the device driver may be holding a serializer at the time it calls
93 * if_input even if it is not serializer-aware.
94 */
2b71c8f1
SZ
95
96#include "opt_polling.h"
87307ba1
SZ
97
98#include <sys/param.h>
99#include <sys/bus.h>
100#include <sys/endian.h>
9db4b353 101#include <sys/interrupt.h>
87307ba1
SZ
102#include <sys/kernel.h>
103#include <sys/ktr.h>
104#include <sys/malloc.h>
105#include <sys/mbuf.h>
9c80d176 106#include <sys/proc.h>
87307ba1
SZ
107#include <sys/rman.h>
108#include <sys/serialize.h>
109#include <sys/socket.h>
110#include <sys/sockio.h>
111#include <sys/sysctl.h>
9c80d176 112#include <sys/systm.h>
87307ba1
SZ
113
114#include <net/bpf.h>
115#include <net/ethernet.h>
116#include <net/if.h>
117#include <net/if_arp.h>
118#include <net/if_dl.h>
119#include <net/if_media.h>
87307ba1
SZ
120#include <net/ifq_var.h>
121#include <net/vlan/if_vlan_var.h>
b637f170 122#include <net/vlan/if_vlan_ether.h>
87307ba1 123
87307ba1 124#include <netinet/in_systm.h>
9c80d176 125#include <netinet/in.h>
87307ba1
SZ
126#include <netinet/ip.h>
127#include <netinet/tcp.h>
128#include <netinet/udp.h>
984263bc 129
9c80d176
SZ
130#include <bus/pci/pcivar.h>
131#include <bus/pci/pcireg.h>
984263bc 132
9c80d176
SZ
133#include <dev/netif/ig_hal/e1000_api.h>
134#include <dev/netif/ig_hal/e1000_82571.h>
135#include <dev/netif/em/if_em.h>
984263bc 136
9c80d176 137#define EM_NAME "Intel(R) PRO/1000 Network Connection "
6d5e2922 138#define EM_VER " 7.2.4"
9c80d176 139
96ced48a
SZ
140#define _EM_DEVICE(id, ret) \
141 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER }
142#define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100)
143#define EM_DEVICE(id) _EM_DEVICE(id, 0)
144#define EM_DEVICE_NULL { 0, 0, 0, NULL }
9c80d176
SZ
145
146static const struct em_vendor_info em_vendor_info_array[] = {
147 EM_DEVICE(82540EM),
148 EM_DEVICE(82540EM_LOM),
149 EM_DEVICE(82540EP),
150 EM_DEVICE(82540EP_LOM),
151 EM_DEVICE(82540EP_LP),
152
153 EM_DEVICE(82541EI),
154 EM_DEVICE(82541ER),
155 EM_DEVICE(82541ER_LOM),
156 EM_DEVICE(82541EI_MOBILE),
157 EM_DEVICE(82541GI),
158 EM_DEVICE(82541GI_LF),
159 EM_DEVICE(82541GI_MOBILE),
160
161 EM_DEVICE(82542),
162
163 EM_DEVICE(82543GC_FIBER),
164 EM_DEVICE(82543GC_COPPER),
165
166 EM_DEVICE(82544EI_COPPER),
167 EM_DEVICE(82544EI_FIBER),
168 EM_DEVICE(82544GC_COPPER),
169 EM_DEVICE(82544GC_LOM),
170
171 EM_DEVICE(82545EM_COPPER),
172 EM_DEVICE(82545EM_FIBER),
173 EM_DEVICE(82545GM_COPPER),
174 EM_DEVICE(82545GM_FIBER),
175 EM_DEVICE(82545GM_SERDES),
176
177 EM_DEVICE(82546EB_COPPER),
178 EM_DEVICE(82546EB_FIBER),
179 EM_DEVICE(82546EB_QUAD_COPPER),
180 EM_DEVICE(82546GB_COPPER),
181 EM_DEVICE(82546GB_FIBER),
182 EM_DEVICE(82546GB_SERDES),
183 EM_DEVICE(82546GB_PCIE),
184 EM_DEVICE(82546GB_QUAD_COPPER),
185 EM_DEVICE(82546GB_QUAD_COPPER_KSP3),
186
187 EM_DEVICE(82547EI),
188 EM_DEVICE(82547EI_MOBILE),
189 EM_DEVICE(82547GI),
190
96ced48a
SZ
191 EM_EMX_DEVICE(82571EB_COPPER),
192 EM_EMX_DEVICE(82571EB_FIBER),
193 EM_EMX_DEVICE(82571EB_SERDES),
194 EM_EMX_DEVICE(82571EB_SERDES_DUAL),
195 EM_EMX_DEVICE(82571EB_SERDES_QUAD),
196 EM_EMX_DEVICE(82571EB_QUAD_COPPER),
75a5634e 197 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP),
96ced48a
SZ
198 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP),
199 EM_EMX_DEVICE(82571EB_QUAD_FIBER),
200 EM_EMX_DEVICE(82571PT_QUAD_COPPER),
201
202 EM_EMX_DEVICE(82572EI_COPPER),
203 EM_EMX_DEVICE(82572EI_FIBER),
204 EM_EMX_DEVICE(82572EI_SERDES),
205 EM_EMX_DEVICE(82572EI),
206
207 EM_EMX_DEVICE(82573E),
208 EM_EMX_DEVICE(82573E_IAMT),
209 EM_EMX_DEVICE(82573L),
210
2d0e5700
SZ
211 EM_DEVICE(82583V),
212
96ced48a
SZ
213 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT),
214 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT),
215 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT),
216 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT),
9c80d176
SZ
217
218 EM_DEVICE(ICH8_IGP_M_AMT),
219 EM_DEVICE(ICH8_IGP_AMT),
220 EM_DEVICE(ICH8_IGP_C),
221 EM_DEVICE(ICH8_IFE),
222 EM_DEVICE(ICH8_IFE_GT),
223 EM_DEVICE(ICH8_IFE_G),
224 EM_DEVICE(ICH8_IGP_M),
2d0e5700 225 EM_DEVICE(ICH8_82567V_3),
9c80d176
SZ
226
227 EM_DEVICE(ICH9_IGP_M_AMT),
228 EM_DEVICE(ICH9_IGP_AMT),
229 EM_DEVICE(ICH9_IGP_C),
230 EM_DEVICE(ICH9_IGP_M),
231 EM_DEVICE(ICH9_IGP_M_V),
232 EM_DEVICE(ICH9_IFE),
233 EM_DEVICE(ICH9_IFE_GT),
234 EM_DEVICE(ICH9_IFE_G),
235 EM_DEVICE(ICH9_BM),
236
96ced48a 237 EM_EMX_DEVICE(82574L),
2d0e5700 238 EM_EMX_DEVICE(82574LA),
9c80d176
SZ
239
240 EM_DEVICE(ICH10_R_BM_LM),
241 EM_DEVICE(ICH10_R_BM_LF),
242 EM_DEVICE(ICH10_R_BM_V),
243 EM_DEVICE(ICH10_D_BM_LM),
244 EM_DEVICE(ICH10_D_BM_LF),
2d0e5700
SZ
245 EM_DEVICE(ICH10_D_BM_V),
246
247 EM_DEVICE(PCH_M_HV_LM),
248 EM_DEVICE(PCH_M_HV_LC),
249 EM_DEVICE(PCH_D_HV_DM),
250 EM_DEVICE(PCH_D_HV_DC),
251
252 EM_DEVICE(PCH2_LV_LM),
253 EM_DEVICE(PCH2_LV_V),
984263bc 254
f647ad3d 255 /* required last entry */
9c80d176 256 EM_DEVICE_NULL
984263bc
MD
257};
258
f647ad3d
JS
259static int em_probe(device_t);
260static int em_attach(device_t);
261static int em_detach(device_t);
262static int em_shutdown(device_t);
87307ba1
SZ
263static int em_suspend(device_t);
264static int em_resume(device_t);
9c80d176
SZ
265
266static void em_init(void *);
267static void em_stop(struct adapter *);
f647ad3d 268static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
9c80d176
SZ
269static void em_start(struct ifnet *);
270#ifdef DEVICE_POLLING
271static void em_poll(struct ifnet *, enum poll_cmd, int);
272#endif
f647ad3d 273static void em_watchdog(struct ifnet *);
f647ad3d
JS
274static void em_media_status(struct ifnet *, struct ifmediareq *);
275static int em_media_change(struct ifnet *);
9c80d176
SZ
276static void em_timer(void *);
277
278static void em_intr(void *);
87ab432b
SZ
279static void em_intr_mask(void *);
280static void em_intr_body(struct adapter *, boolean_t);
9c80d176
SZ
281static void em_rxeof(struct adapter *, int);
282static void em_txeof(struct adapter *);
9f60d74b 283static void em_tx_collect(struct adapter *);
9c80d176 284static void em_tx_purge(struct adapter *);
f647ad3d
JS
285static void em_enable_intr(struct adapter *);
286static void em_disable_intr(struct adapter *);
9c80d176
SZ
287
288static int em_dma_malloc(struct adapter *, bus_size_t,
289 struct em_dma_alloc *);
290static void em_dma_free(struct adapter *, struct em_dma_alloc *);
291static void em_init_tx_ring(struct adapter *);
292static int em_init_rx_ring(struct adapter *);
293static int em_create_tx_ring(struct adapter *);
294static int em_create_rx_ring(struct adapter *);
295static void em_destroy_tx_ring(struct adapter *, int);
296static void em_destroy_rx_ring(struct adapter *, int);
297static int em_newbuf(struct adapter *, int, int);
298static int em_encap(struct adapter *, struct mbuf **);
299static void em_rxcsum(struct adapter *, struct e1000_rx_desc *,
300 struct mbuf *);
9f60d74b 301static int em_txcsum(struct adapter *, struct mbuf *,
9c80d176
SZ
302 uint32_t *, uint32_t *);
303
304static int em_get_hw_info(struct adapter *);
305static int em_is_valid_eaddr(const uint8_t *);
306static int em_alloc_pci_res(struct adapter *);
307static void em_free_pci_res(struct adapter *);
2d0e5700 308static int em_reset(struct adapter *);
9c80d176
SZ
309static void em_setup_ifp(struct adapter *);
310static void em_init_tx_unit(struct adapter *);
311static void em_init_rx_unit(struct adapter *);
312static void em_update_stats(struct adapter *);
f647ad3d
JS
313static void em_set_promisc(struct adapter *);
314static void em_disable_promisc(struct adapter *);
315static void em_set_multi(struct adapter *);
87307ba1 316static void em_update_link_status(struct adapter *);
f647ad3d 317static void em_smartspeed(struct adapter *);
2d0e5700 318static void em_set_itr(struct adapter *, uint32_t);
6d5e2922 319static void em_disable_aspm(struct adapter *);
9c80d176
SZ
320
321/* Hardware workarounds */
f647ad3d
JS
322static int em_82547_fifo_workaround(struct adapter *, int);
323static void em_82547_update_fifo_head(struct adapter *, int);
324static int em_82547_tx_fifo_reset(struct adapter *);
1eca7b82
SZ
325static void em_82547_move_tail(void *);
326static void em_82547_move_tail_serialized(struct adapter *);
9c80d176
SZ
327static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY);
328
f647ad3d 329static void em_print_debug_info(struct adapter *);
9c80d176
SZ
330static void em_print_nvm_info(struct adapter *);
331static void em_print_hw_stats(struct adapter *);
332
f647ad3d
JS
333static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
334static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
d0870c72 335static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
9f60d74b 336static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS);
9c80d176 337static void em_add_sysctl(struct adapter *adapter);
984263bc 338
9c80d176
SZ
339/* Management and WOL Support */
340static void em_get_mgmt(struct adapter *);
341static void em_rel_mgmt(struct adapter *);
342static void em_get_hw_control(struct adapter *);
343static void em_rel_hw_control(struct adapter *);
344static void em_enable_wol(device_t);
984263bc
MD
345
346static device_method_t em_methods[] = {
347 /* Device interface */
9c80d176
SZ
348 DEVMETHOD(device_probe, em_probe),
349 DEVMETHOD(device_attach, em_attach),
350 DEVMETHOD(device_detach, em_detach),
351 DEVMETHOD(device_shutdown, em_shutdown),
352 DEVMETHOD(device_suspend, em_suspend),
353 DEVMETHOD(device_resume, em_resume),
354 { 0, 0 }
984263bc
MD
355};
356
357static driver_t em_driver = {
9c80d176
SZ
358 "em",
359 em_methods,
360 sizeof(struct adapter),
984263bc
MD
361};
362
363static devclass_t em_devclass;
32832096
MD
364
365DECLARE_DUMMY_MODULE(if_em);
9c80d176 366MODULE_DEPEND(em, ig_hal, 1, 1, 1);
aa2b9d05 367DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL);
984263bc 368
91e8debf
SZ
369/*
370 * Tunables
371 */
9c80d176
SZ
372static int em_int_throttle_ceil = EM_DEFAULT_ITR;
373static int em_rxd = EM_DEFAULT_RXD;
374static int em_txd = EM_DEFAULT_TXD;
053f3ae6 375static int em_smart_pwr_down = 0;
0d366ee7 376
9c80d176
SZ
377/* Controls whether promiscuous also shows bad packets */
378static int em_debug_sbp = FALSE;
0d366ee7 379
053f3ae6
SZ
380static int em_82573_workaround = 1;
381static int em_msi_enable = 1;
05580856 382
d0870c72 383TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
1eca7b82
SZ
384TUNABLE_INT("hw.em.rxd", &em_rxd);
385TUNABLE_INT("hw.em.txd", &em_txd);
386TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
9c80d176 387TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
05580856 388TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround);
053f3ae6 389TUNABLE_INT("hw.em.msi.enable", &em_msi_enable);
9c80d176
SZ
390
391/* Global used in WOL setup with multiport cards */
392static int em_global_quad_port_a = 0;
393
394/* Set this to one to display debug statistics */
395static int em_display_debug_stats = 0;
0d366ee7 396
07855a48
MD
397#if !defined(KTR_IF_EM)
398#define KTR_IF_EM KTR_ALL
399#endif
400KTR_INFO_MASTER(if_em);
5bf48697
AE
401KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin");
402KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end");
403KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet");
404KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet");
405KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean");
07855a48
MD
406#define logif(name) KTR_LOG(if_em_ ## name)
407
984263bc
MD
408static int
409em_probe(device_t dev)
410{
9c80d176
SZ
411 const struct em_vendor_info *ent;
412 uint16_t vid, did;
984263bc 413
9c80d176
SZ
414 vid = pci_get_vendor(dev);
415 did = pci_get_device(dev);
984263bc 416
9c80d176
SZ
417 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) {
418 if (vid == ent->vendor_id && did == ent->device_id) {
419 device_set_desc(dev, ent->desc);
dbcd0c9b 420 device_set_async_attach(dev, TRUE);
96ced48a 421 return (ent->ret);
984263bc 422 }
984263bc 423 }
87307ba1 424 return (ENXIO);
984263bc
MD
425}
426
984263bc
MD
427static int
428em_attach(device_t dev)
429{
9c80d176
SZ
430 struct adapter *adapter = device_get_softc(dev);
431 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d
JS
432 int tsize, rsize;
433 int error = 0;
2d0e5700 434 uint16_t eeprom_data, device_id, apme_mask;
87ab432b 435 driver_intr_t *intr_func;
984263bc 436
9c80d176 437 adapter->dev = adapter->osdep.dev = dev;
f647ad3d 438
bf0ecf68
MD
439 callout_init_mp(&adapter->timer);
440 callout_init_mp(&adapter->tx_fifo_timer);
af82d4bb 441
9c80d176
SZ
442 /* Determine hardware and mac info */
443 error = em_get_hw_info(adapter);
444 if (error) {
445 device_printf(dev, "Identify hardware failed\n");
446 goto fail;
f647ad3d
JS
447 }
448
9c80d176
SZ
449 /* Setup PCI resources */
450 error = em_alloc_pci_res(adapter);
451 if (error) {
452 device_printf(dev, "Allocation of PCI resources failed\n");
453 goto fail;
454 }
984263bc 455
9c80d176
SZ
456 /*
457 * For ICH8 and family we need to map the flash memory,
458 * and this must happen after the MAC is identified.
459 */
460 if (adapter->hw.mac.type == e1000_ich8lan ||
2d0e5700 461 adapter->hw.mac.type == e1000_ich9lan ||
9c80d176 462 adapter->hw.mac.type == e1000_ich10lan ||
2d0e5700
SZ
463 adapter->hw.mac.type == e1000_pchlan ||
464 adapter->hw.mac.type == e1000_pch2lan) {
9c80d176
SZ
465 adapter->flash_rid = EM_BAR_FLASH;
466
467 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
468 &adapter->flash_rid, RF_ACTIVE);
469 if (adapter->flash == NULL) {
470 device_printf(dev, "Mapping of Flash failed\n");
471 error = ENXIO;
472 goto fail;
473 }
474 adapter->osdep.flash_bus_space_tag =
475 rman_get_bustag(adapter->flash);
476 adapter->osdep.flash_bus_space_handle =
477 rman_get_bushandle(adapter->flash);
984263bc 478
9c80d176
SZ
479 /*
480 * This is used in the shared code
481 * XXX this goof is actually not used.
482 */
483 adapter->hw.flash_address = (uint8_t *)adapter->flash;
484 }
0d366ee7 485
9c80d176
SZ
486 /* Do Shared Code initialization */
487 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
488 device_printf(dev, "Setup of Shared code failed\n");
489 error = ENXIO;
490 goto fail;
f647ad3d 491 }
7ea52455 492
9c80d176
SZ
493 e1000_get_bus_info(&adapter->hw);
494
1eca7b82 495 /*
9c80d176 496 * Validate number of transmit and receive descriptors. It
1eca7b82 497 * must not exceed hardware maximum, and must be multiple
9c80d176 498 * of E1000_DBA_ALIGN.
1eca7b82 499 */
9c80d176
SZ
500 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 ||
501 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
502 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
503 em_txd < EM_MIN_TXD) {
1eca7b82 504 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
9c80d176 505 EM_DEFAULT_TXD, em_txd);
1eca7b82
SZ
506 adapter->num_tx_desc = EM_DEFAULT_TXD;
507 } else {
508 adapter->num_tx_desc = em_txd;
509 }
9c80d176
SZ
510 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 ||
511 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
512 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
513 em_rxd < EM_MIN_RXD) {
1eca7b82 514 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
9c80d176 515 EM_DEFAULT_RXD, em_rxd);
1eca7b82
SZ
516 adapter->num_rx_desc = EM_DEFAULT_RXD;
517 } else {
518 adapter->num_rx_desc = em_rxd;
519 }
520
9c80d176
SZ
521 adapter->hw.mac.autoneg = DO_AUTO_NEG;
522 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
523 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
524 adapter->rx_buffer_len = MCLBYTES;
e94c2bf4 525
9c80d176
SZ
526 /*
527 * Interrupt throttle rate
528 */
529 if (em_int_throttle_ceil == 0) {
530 adapter->int_throttle_ceil = 0;
531 } else {
532 int throttle = em_int_throttle_ceil;
f647ad3d 533
9c80d176
SZ
534 if (throttle < 0)
535 throttle = EM_DEFAULT_ITR;
0d366ee7 536
9c80d176
SZ
537 /* Recalculate the tunable value to get the exact frequency. */
538 throttle = 1000000000 / 256 / throttle;
664c7645
SZ
539
540 /* Upper 16bits of ITR is reserved and should be zero */
541 if (throttle & 0xffff0000)
542 throttle = 1000000000 / 256 / EM_DEFAULT_ITR;
543
9c80d176
SZ
544 adapter->int_throttle_ceil = 1000000000 / 256 / throttle;
545 }
984263bc 546
9c80d176
SZ
547 e1000_init_script_state_82541(&adapter->hw, TRUE);
548 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
549
550 /* Copper options */
551 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
552 adapter->hw.phy.mdix = AUTO_ALL_MODES;
553 adapter->hw.phy.disable_polarity_correction = FALSE;
554 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
555 }
556
557 /* Set the frame limits assuming standard ethernet sized frames. */
558 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
559 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN;
984263bc 560
9c80d176
SZ
561 /* This controls when hardware reports transmit completion status. */
562 adapter->hw.mac.report_tx_early = 1;
984263bc 563
87307ba1 564 /*
9c80d176 565 * Create top level busdma tag
984263bc 566 */
9c80d176
SZ
567 error = bus_dma_tag_create(NULL, 1, 0,
568 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
569 NULL, NULL,
570 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
571 0, &adapter->parent_dtag);
572 if (error) {
573 device_printf(dev, "could not create top level DMA tag\n");
af82d4bb 574 goto fail;
9c80d176 575 }
af82d4bb 576
9c80d176
SZ
577 /*
578 * Allocate Transmit Descriptor ring
579 */
580 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
1eca7b82 581 EM_DBA_ALIGN);
87307ba1
SZ
582 error = em_dma_malloc(adapter, tsize, &adapter->txdma);
583 if (error) {
9c80d176 584 device_printf(dev, "Unable to allocate tx_desc memory\n");
af82d4bb 585 goto fail;
984263bc 586 }
9c80d176 587 adapter->tx_desc_base = adapter->txdma.dma_vaddr;
984263bc 588
9c80d176
SZ
589 /*
590 * Allocate Receive Descriptor ring
591 */
592 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
1eca7b82 593 EM_DBA_ALIGN);
87307ba1
SZ
594 error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
595 if (error) {
9ccd8c1f 596 device_printf(dev, "Unable to allocate rx_desc memory\n");
af82d4bb 597 goto fail;
984263bc 598 }
9c80d176
SZ
599 adapter->rx_desc_base = adapter->rxdma.dma_vaddr;
600
2d0e5700
SZ
601 /* Allocate multicast array memory. */
602 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
603 M_DEVBUF, M_WAITOK);
604
605 /* Indicate SOL/IDER usage */
606 if (e1000_check_reset_block(&adapter->hw)) {
607 device_printf(dev,
608 "PHY reset is blocked due to SOL/IDER session.\n");
609 }
610
611 /*
612 * Start from a known state, this is important in reading the
613 * nvm and mac from that.
614 */
615 e1000_reset_hw(&adapter->hw);
616
9c80d176
SZ
617 /* Make sure we have a good EEPROM before we read from it */
618 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
619 /*
620 * Some PCI-E parts fail the first check due to
621 * the link being in sleep state, call it again,
622 * if it fails a second time its a real issue.
623 */
624 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
625 device_printf(dev,
626 "The EEPROM Checksum Is Not Valid\n");
627 error = EIO;
628 goto fail;
629 }
630 }
984263bc 631
984263bc 632 /* Copy the permanent MAC address out of the EEPROM */
9c80d176
SZ
633 if (e1000_read_mac_addr(&adapter->hw) < 0) {
634 device_printf(dev, "EEPROM read error while reading MAC"
635 " address\n");
984263bc 636 error = EIO;
af82d4bb 637 goto fail;
984263bc 638 }
9c80d176 639 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) {
87307ba1 640 device_printf(dev, "Invalid MAC address\n");
984263bc 641 error = EIO;
af82d4bb 642 goto fail;
984263bc
MD
643 }
644
9c80d176
SZ
645 /* Allocate transmit descriptors and buffers */
646 error = em_create_tx_ring(adapter);
647 if (error) {
648 device_printf(dev, "Could not setup transmit structures\n");
649 goto fail;
650 }
651
652 /* Allocate receive descriptors and buffers */
653 error = em_create_rx_ring(adapter);
654 if (error) {
655 device_printf(dev, "Could not setup receive structures\n");
656 goto fail;
657 }
658
659 /* Manually turn off all interrupts */
660 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
661
9c80d176 662 /* Determine if we have to control management hardware */
79878f87
SZ
663 if (e1000_enable_mng_pass_thru(&adapter->hw))
664 adapter->flags |= EM_FLAG_HAS_MGMT;
9c80d176
SZ
665
666 /*
667 * Setup Wake-on-Lan
668 */
2d0e5700
SZ
669 apme_mask = EM_EEPROM_APME;
670 eeprom_data = 0;
9c80d176
SZ
671 switch (adapter->hw.mac.type) {
672 case e1000_82542:
673 case e1000_82543:
674 break;
675
2d0e5700
SZ
676 case e1000_82573:
677 case e1000_82583:
79878f87 678 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700
SZ
679 /* FALL THROUGH */
680
9c80d176
SZ
681 case e1000_82546:
682 case e1000_82546_rev_3:
683 case e1000_82571:
2d0e5700 684 case e1000_82572:
9c80d176
SZ
685 case e1000_80003es2lan:
686 if (adapter->hw.bus.func == 1) {
687 e1000_read_nvm(&adapter->hw,
688 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
689 } else {
690 e1000_read_nvm(&adapter->hw,
691 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
692 }
2d0e5700
SZ
693 break;
694
695 case e1000_ich8lan:
696 case e1000_ich9lan:
697 case e1000_ich10lan:
698 case e1000_pchlan:
699 case e1000_pch2lan:
700 apme_mask = E1000_WUC_APME;
79878f87 701 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700 702 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
9c80d176
SZ
703 break;
704
705 default:
2d0e5700
SZ
706 e1000_read_nvm(&adapter->hw,
707 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
9c80d176
SZ
708 break;
709 }
2d0e5700
SZ
710 if (eeprom_data & apme_mask)
711 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
712
9c80d176
SZ
713 /*
714 * We have the eeprom settings, now apply the special cases
715 * where the eeprom may be wrong or the board won't support
716 * wake on lan on a particular port
717 */
718 device_id = pci_get_device(dev);
719 switch (device_id) {
720 case E1000_DEV_ID_82546GB_PCIE:
721 adapter->wol = 0;
722 break;
723
724 case E1000_DEV_ID_82546EB_FIBER:
725 case E1000_DEV_ID_82546GB_FIBER:
726 case E1000_DEV_ID_82571EB_FIBER:
727 /*
728 * Wake events only supported on port A for dual fiber
729 * regardless of eeprom setting
730 */
731 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
732 E1000_STATUS_FUNC_1)
733 adapter->wol = 0;
734 break;
735
736 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
737 case E1000_DEV_ID_82571EB_QUAD_COPPER:
738 case E1000_DEV_ID_82571EB_QUAD_FIBER:
739 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
740 /* if quad port adapter, disable WoL on all but port A */
741 if (em_global_quad_port_a != 0)
742 adapter->wol = 0;
743 /* Reset for multiple quad port adapters */
744 if (++em_global_quad_port_a == 4)
745 em_global_quad_port_a = 0;
746 break;
747 }
748
749 /* XXX disable wol */
750 adapter->wol = 0;
751
2d0e5700
SZ
752 /* Setup OS specific network interface */
753 em_setup_ifp(adapter);
754
755 /* Add sysctl tree, must after em_setup_ifp() */
756 em_add_sysctl(adapter);
757
758 /* Reset the hardware */
759 error = em_reset(adapter);
760 if (error) {
761 device_printf(dev, "Unable to reset the hardware\n");
762 goto fail;
763 }
764
765 /* Initialize statistics */
766 em_update_stats(adapter);
767
768 adapter->hw.mac.get_link_status = 1;
769 em_update_link_status(adapter);
770
9c80d176
SZ
771 /* Do we need workaround for 82544 PCI-X adapter? */
772 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
773 adapter->hw.mac.type == e1000_82544)
f647ad3d 774 adapter->pcix_82544 = TRUE;
87307ba1 775 else
f647ad3d 776 adapter->pcix_82544 = FALSE;
af82d4bb 777
9c80d176
SZ
778 if (adapter->pcix_82544) {
779 /*
780 * 82544 on PCI-X may split one TX segment
781 * into two TX descs, so we double its number
782 * of spare TX desc here.
783 */
784 adapter->spare_tx_desc = 2 * EM_TX_SPARE;
785 } else {
786 adapter->spare_tx_desc = EM_TX_SPARE;
787 }
788
9f60d74b
SZ
789 /*
790 * Keep following relationship between spare_tx_desc, oact_tx_desc
791 * and tx_int_nsegs:
792 * (spare_tx_desc + EM_TX_RESERVED) <=
793 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs
794 */
795 adapter->oact_tx_desc = adapter->num_tx_desc / 8;
796 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX)
797 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX;
798 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED)
799 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED;
800
801 adapter->tx_int_nsegs = adapter->num_tx_desc / 16;
802 if (adapter->tx_int_nsegs < adapter->oact_tx_desc)
803 adapter->tx_int_nsegs = adapter->oact_tx_desc;
804
2d0e5700 805 /* Non-AMT based hardware can now take control from firmware */
79878f87
SZ
806 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
807 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571)
2d0e5700
SZ
808 em_get_hw_control(adapter);
809
87ab432b
SZ
810 /*
811 * Missing Interrupt Following ICR read:
812 *
a835687d
SZ
813 * 82571/82572 specification update errata #76
814 * 82573 specification update errata #31
815 * 82574 specification update errata #12
816 * 82583 specification update errata #4
87ab432b
SZ
817 */
818 intr_func = em_intr;
819 if ((adapter->flags & EM_FLAG_SHARED_INTR) &&
820 (adapter->hw.mac.type == e1000_82571 ||
821 adapter->hw.mac.type == e1000_82572 ||
822 adapter->hw.mac.type == e1000_82573 ||
823 adapter->hw.mac.type == e1000_82574 ||
824 adapter->hw.mac.type == e1000_82583))
825 intr_func = em_intr_mask;
826
9c80d176 827 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE,
87ab432b 828 intr_func, adapter, &adapter->intr_tag,
9c80d176 829 ifp->if_serializer);
af82d4bb 830 if (error) {
9c80d176
SZ
831 device_printf(dev, "Failed to register interrupt handler");
832 ether_ifdetach(&adapter->arpcom.ac_if);
af82d4bb
JS
833 goto fail;
834 }
835
a749d1d2 836 ifp->if_cpuid = rman_get_cpuid(adapter->intr_res);
9db4b353 837 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
9c80d176 838 return (0);
af82d4bb
JS
839fail:
840 em_detach(dev);
9c80d176 841 return (error);
984263bc
MD
842}
843
984263bc
MD
844static int
845em_detach(device_t dev)
846{
78195a76 847 struct adapter *adapter = device_get_softc(dev);
984263bc 848
af82d4bb 849 if (device_is_attached(dev)) {
9c80d176 850 struct ifnet *ifp = &adapter->arpcom.ac_if;
cdf89432
SZ
851
852 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 853
af82d4bb 854 em_stop(adapter);
9c80d176
SZ
855
856 e1000_phy_hw_reset(&adapter->hw);
857
858 em_rel_mgmt(adapter);
2d0e5700 859 em_rel_hw_control(adapter);
9c80d176
SZ
860
861 if (adapter->wol) {
862 E1000_WRITE_REG(&adapter->hw, E1000_WUC,
863 E1000_WUC_PME_EN);
864 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
865 em_enable_wol(dev);
866 }
867
868 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag);
869
cdf89432
SZ
870 lwkt_serialize_exit(ifp->if_serializer);
871
872 ether_ifdetach(ifp);
a19a8754 873 } else if (adapter->memory != NULL) {
2d0e5700 874 em_rel_hw_control(adapter);
7ea52455 875 }
cdf89432
SZ
876 bus_generic_detach(dev);
877
9c80d176
SZ
878 em_free_pci_res(adapter);
879
880 em_destroy_tx_ring(adapter, adapter->num_tx_desc);
881 em_destroy_rx_ring(adapter, adapter->num_rx_desc);
af82d4bb 882
984263bc 883 /* Free Transmit Descriptor ring */
9c80d176 884 if (adapter->tx_desc_base)
9ccd8c1f 885 em_dma_free(adapter, &adapter->txdma);
984263bc 886
984263bc 887 /* Free Receive Descriptor ring */
9c80d176 888 if (adapter->rx_desc_base)
9ccd8c1f 889 em_dma_free(adapter, &adapter->rxdma);
9c80d176
SZ
890
891 /* Free top level busdma tag */
892 if (adapter->parent_dtag != NULL)
893 bus_dma_tag_destroy(adapter->parent_dtag);
984263bc 894
1eca7b82 895 /* Free sysctl tree */
9c80d176 896 if (adapter->sysctl_tree != NULL)
1eca7b82 897 sysctl_ctx_free(&adapter->sysctl_ctx);
984263bc 898
a19a8754
SZ
899 if (adapter->mta != NULL)
900 kfree(adapter->mta, M_DEVBUF);
901
87307ba1 902 return (0);
984263bc
MD
903}
904
984263bc
MD
905static int
906em_shutdown(device_t dev)
907{
9c80d176 908 return em_suspend(dev);
87307ba1
SZ
909}
910
87307ba1
SZ
911static int
912em_suspend(device_t dev)
913{
914 struct adapter *adapter = device_get_softc(dev);
9c80d176 915 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
916
917 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 918
87307ba1 919 em_stop(adapter);
9c80d176
SZ
920
921 em_rel_mgmt(adapter);
2d0e5700 922 em_rel_hw_control(adapter);
9c80d176 923
2d0e5700 924 if (adapter->wol) {
9c80d176
SZ
925 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
926 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
927 em_enable_wol(dev);
2d0e5700 928 }
9c80d176 929
87307ba1 930 lwkt_serialize_exit(ifp->if_serializer);
9c80d176
SZ
931
932 return bus_generic_suspend(dev);
87307ba1
SZ
933}
934
935static int
936em_resume(device_t dev)
937{
938 struct adapter *adapter = device_get_softc(dev);
9c80d176 939 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
940
941 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 942
87307ba1 943 em_init(adapter);
9c80d176 944 em_get_mgmt(adapter);
9db4b353 945 if_devstart(ifp);
9c80d176 946
87307ba1
SZ
947 lwkt_serialize_exit(ifp->if_serializer);
948
949 return bus_generic_resume(dev);
984263bc
MD
950}
951
984263bc
MD
952static void
953em_start(struct ifnet *ifp)
954{
f647ad3d 955 struct adapter *adapter = ifp->if_softc;
9c80d176 956 struct mbuf *m_head;
984263bc 957
1eca7b82 958 ASSERT_SERIALIZED(ifp->if_serializer);
78195a76 959
87307ba1
SZ
960 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
961 return;
9c80d176 962
9db4b353
SZ
963 if (!adapter->link_active) {
964 ifq_purge(&ifp->if_snd);
f647ad3d 965 return;
9db4b353 966 }
9c80d176 967
e26dc3e9 968 while (!ifq_is_empty(&ifp->if_snd)) {
9f60d74b
SZ
969 /* Now do we at least have a minimal? */
970 if (EM_IS_OACTIVE(adapter)) {
971 em_tx_collect(adapter);
9c80d176 972 if (EM_IS_OACTIVE(adapter)) {
9c80d176 973 ifp->if_flags |= IFF_OACTIVE;
9f60d74b 974 adapter->no_tx_desc_avail1++;
9c80d176
SZ
975 break;
976 }
977 }
978
979 logif(pkt_txqueue);
9db4b353 980 m_head = ifq_dequeue(&ifp->if_snd, NULL);
f647ad3d
JS
981 if (m_head == NULL)
982 break;
984263bc 983
9c80d176 984 if (em_encap(adapter, &m_head)) {
002b3a05 985 ifp->if_oerrors++;
9f60d74b
SZ
986 em_tx_collect(adapter);
987 continue;
f647ad3d 988 }
984263bc
MD
989
990 /* Send a copy of the frame to the BPF listener */
b637f170 991 ETHER_BPF_MTAP(ifp, m_head);
87307ba1
SZ
992
993 /* Set timeout in case hardware has problems transmitting. */
994 ifp->if_timer = EM_TX_TIMEOUT;
f647ad3d 995 }
984263bc
MD
996}
997
984263bc 998static int
bd4539cc 999em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc 1000{
f647ad3d 1001 struct adapter *adapter = ifp->if_softc;
9c80d176 1002 struct ifreq *ifr = (struct ifreq *)data;
1eca7b82 1003 uint16_t eeprom_data = 0;
9c80d176
SZ
1004 int max_frame_size, mask, reinit;
1005 int error = 0;
0d366ee7 1006
9c80d176 1007 ASSERT_SERIALIZED(ifp->if_serializer);
0d366ee7 1008
984263bc 1009 switch (command) {
984263bc 1010 case SIOCSIFMTU:
9c80d176
SZ
1011 switch (adapter->hw.mac.type) {
1012 case e1000_82573:
1eca7b82
SZ
1013 /*
1014 * 82573 only supports jumbo frames
1015 * if ASPM is disabled.
1016 */
9c80d176
SZ
1017 e1000_read_nvm(&adapter->hw,
1018 NVM_INIT_3GIO_3, 1, &eeprom_data);
1019 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1eca7b82
SZ
1020 max_frame_size = ETHER_MAX_LEN;
1021 break;
1022 }
9c80d176
SZ
1023 /* FALL THROUGH */
1024
1025 /* Limit Jumbo Frame size */
1026 case e1000_82571:
1027 case e1000_82572:
1028 case e1000_ich9lan:
1029 case e1000_ich10lan:
2d0e5700 1030 case e1000_pch2lan:
9c80d176 1031 case e1000_82574:
6d5e2922 1032 case e1000_82583:
9c80d176 1033 case e1000_80003es2lan:
1eca7b82 1034 max_frame_size = 9234;
7ea52455 1035 break;
9c80d176 1036
2d0e5700
SZ
1037 case e1000_pchlan:
1038 max_frame_size = 4096;
1039 break;
1040
9c80d176
SZ
1041 /* Adapters that do not support jumbo frames */
1042 case e1000_82542:
1043 case e1000_ich8lan:
7ea52455
SZ
1044 max_frame_size = ETHER_MAX_LEN;
1045 break;
9c80d176 1046
7ea52455
SZ
1047 default:
1048 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1049 break;
1050 }
9c80d176
SZ
1051 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1052 ETHER_CRC_LEN) {
984263bc 1053 error = EINVAL;
9c80d176 1054 break;
984263bc 1055 }
9c80d176
SZ
1056
1057 ifp->if_mtu = ifr->ifr_mtu;
1058 adapter->max_frame_size =
1059 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1060
1061 if (ifp->if_flags & IFF_RUNNING)
1062 em_init(adapter);
984263bc 1063 break;
9c80d176 1064
984263bc 1065 case SIOCSIFFLAGS:
984263bc 1066 if (ifp->if_flags & IFF_UP) {
9c80d176
SZ
1067 if ((ifp->if_flags & IFF_RUNNING)) {
1068 if ((ifp->if_flags ^ adapter->if_flags) &
1069 (IFF_PROMISC | IFF_ALLMULTI)) {
1070 em_disable_promisc(adapter);
1071 em_set_promisc(adapter);
1072 }
1073 } else {
78195a76 1074 em_init(adapter);
87307ba1 1075 }
9c80d176
SZ
1076 } else if (ifp->if_flags & IFF_RUNNING) {
1077 em_stop(adapter);
984263bc 1078 }
87307ba1 1079 adapter->if_flags = ifp->if_flags;
984263bc 1080 break;
9c80d176 1081
984263bc
MD
1082 case SIOCADDMULTI:
1083 case SIOCDELMULTI:
984263bc
MD
1084 if (ifp->if_flags & IFF_RUNNING) {
1085 em_disable_intr(adapter);
1086 em_set_multi(adapter);
9c80d176
SZ
1087 if (adapter->hw.mac.type == e1000_82542 &&
1088 adapter->hw.revision_id == E1000_REVISION_2)
1089 em_init_rx_unit(adapter);
1eca7b82 1090#ifdef DEVICE_POLLING
9c80d176 1091 if (!(ifp->if_flags & IFF_POLLING))
1eca7b82 1092#endif
9c80d176 1093 em_enable_intr(adapter);
984263bc
MD
1094 }
1095 break;
9c80d176 1096
984263bc 1097 case SIOCSIFMEDIA:
87307ba1 1098 /* Check SOL/IDER usage */
9c80d176
SZ
1099 if (e1000_check_reset_block(&adapter->hw)) {
1100 device_printf(adapter->dev, "Media change is"
1101 " blocked due to SOL/IDER session.\n");
87307ba1
SZ
1102 break;
1103 }
9c80d176
SZ
1104 /* FALL THROUGH */
1105
984263bc 1106 case SIOCGIFMEDIA:
984263bc
MD
1107 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1108 break;
9c80d176 1109
984263bc 1110 case SIOCSIFCAP:
9c80d176 1111 reinit = 0;
984263bc 1112 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
f54de229
SZ
1113 if (mask & IFCAP_RXCSUM) {
1114 ifp->if_capenable ^= IFCAP_RXCSUM;
1eca7b82 1115 reinit = 1;
984263bc 1116 }
f54de229
SZ
1117 if (mask & IFCAP_TXCSUM) {
1118 ifp->if_capenable ^= IFCAP_TXCSUM;
1119 if (ifp->if_capenable & IFCAP_TXCSUM)
1120 ifp->if_hwassist |= EM_CSUM_FEATURES;
1121 else
1122 ifp->if_hwassist &= ~EM_CSUM_FEATURES;
1123 }
1eca7b82
SZ
1124 if (mask & IFCAP_VLAN_HWTAGGING) {
1125 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1126 reinit = 1;
1127 }
9c80d176 1128 if (reinit && (ifp->if_flags & IFF_RUNNING))
1eca7b82 1129 em_init(adapter);
984263bc 1130 break;
9c80d176 1131
984263bc 1132 default:
1eca7b82
SZ
1133 error = ether_ioctl(ifp, command, data);
1134 break;
984263bc 1135 }
87307ba1 1136 return (error);
984263bc
MD
1137}
1138
984263bc
MD
1139static void
1140em_watchdog(struct ifnet *ifp)
1141{
1eca7b82 1142 struct adapter *adapter = ifp->if_softc;
984263bc 1143
9c80d176
SZ
1144 ASSERT_SERIALIZED(ifp->if_serializer);
1145
1146 /*
1147 * The timer is set to 5 every time start queues a packet.
1148 * Then txeof keeps resetting it as long as it cleans at
1149 * least one descriptor.
1150 * Finally, anytime all descriptors are clean the timer is
1151 * set to 0.
1152 */
1153
9f60d74b
SZ
1154 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1155 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) {
1156 /*
1157 * If we reach here, all TX jobs are completed and
1158 * the TX engine should have been idled for some time.
1159 * We don't need to call if_devstart() here.
1160 */
1161 ifp->if_flags &= ~IFF_OACTIVE;
1162 ifp->if_timer = 0;
1163 return;
1164 }
1165
1eca7b82
SZ
1166 /*
1167 * If we are in this routine because of pause frames, then
984263bc
MD
1168 * don't reset the hardware.
1169 */
9c80d176
SZ
1170 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1171 E1000_STATUS_TXOFF) {
984263bc
MD
1172 ifp->if_timer = EM_TX_TIMEOUT;
1173 return;
1174 }
1175
9c80d176 1176 if (e1000_check_for_link(&adapter->hw) == 0)
f647ad3d 1177 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc 1178
9c80d176
SZ
1179 ifp->if_oerrors++;
1180 adapter->watchdog_events++;
1181
984263bc
MD
1182 em_init(adapter);
1183
9c80d176
SZ
1184 if (!ifq_is_empty(&ifp->if_snd))
1185 if_devstart(ifp);
984263bc
MD
1186}
1187
984263bc 1188static void
9c80d176 1189em_init(void *xsc)
984263bc 1190{
9c80d176
SZ
1191 struct adapter *adapter = xsc;
1192 struct ifnet *ifp = &adapter->arpcom.ac_if;
1193 device_t dev = adapter->dev;
eac00e59 1194 uint32_t pba;
984263bc 1195
87307ba1
SZ
1196 ASSERT_SERIALIZED(ifp->if_serializer);
1197
984263bc
MD
1198 em_stop(adapter);
1199
eac00e59
SZ
1200 /*
1201 * Packet Buffer Allocation (PBA)
1202 * Writing PBA sets the receive portion of the buffer
1203 * the remainder is used for the transmit buffer.
1eca7b82
SZ
1204 *
1205 * Devices before the 82547 had a Packet Buffer of 64K.
1206 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1207 * After the 82547 the buffer was reduced to 40K.
1208 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1209 * Note: default does not leave enough room for Jumbo Frame >10k.
eac00e59 1210 */
9c80d176
SZ
1211 switch (adapter->hw.mac.type) {
1212 case e1000_82547:
1213 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1214 if (adapter->max_frame_size > 8192)
eac00e59 1215 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
7ea52455
SZ
1216 else
1217 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
eac00e59
SZ
1218 adapter->tx_fifo_head = 0;
1219 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1220 adapter->tx_fifo_size =
9c80d176 1221 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
7ea52455 1222 break;
9c80d176 1223
87307ba1 1224 /* Total Packet Buffer on these is 48K */
9c80d176
SZ
1225 case e1000_82571:
1226 case e1000_82572:
1227 case e1000_80003es2lan:
7ea52455
SZ
1228 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1229 break;
9c80d176
SZ
1230
1231 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
7ea52455
SZ
1232 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1233 break;
9c80d176
SZ
1234
1235 case e1000_82574:
2d0e5700 1236 case e1000_82583:
9c80d176 1237 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1eca7b82 1238 break;
9c80d176 1239
2d0e5700
SZ
1240 case e1000_ich8lan:
1241 pba = E1000_PBA_8K;
1242 break;
1243
9c80d176
SZ
1244 case e1000_ich9lan:
1245 case e1000_ich10lan:
1246#define E1000_PBA_10K 0x000A
b0ff1d56
MS
1247 pba = E1000_PBA_10K;
1248 break;
9c80d176 1249
2d0e5700
SZ
1250 case e1000_pchlan:
1251 case e1000_pch2lan:
1252 pba = E1000_PBA_26K;
9c80d176
SZ
1253 break;
1254
7ea52455
SZ
1255 default:
1256 /* Devices before 82547 had a Packet Buffer of 64K. */
9c80d176 1257 if (adapter->max_frame_size > 8192)
7ea52455
SZ
1258 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1259 else
1260 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
eac00e59 1261 }
9c80d176 1262 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
a4a205fa 1263
0d366ee7 1264 /* Get the latest mac address, User can use a LAA */
9c80d176
SZ
1265 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN);
1266
1267 /* Put the address into the Receive Address Array */
1268 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1269
1270 /*
1271 * With the 82571 adapter, RAR[0] may be overwritten
1272 * when the other port is reset, we make a duplicate
1273 * in RAR[14] for that eventuality, this assures
1274 * the interface continues to function.
1275 */
1276 if (adapter->hw.mac.type == e1000_82571) {
1277 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1278 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1279 E1000_RAR_ENTRIES - 1);
1280 }
0d366ee7 1281
2d0e5700
SZ
1282 /* Reset the hardware */
1283 if (em_reset(adapter)) {
1284 device_printf(dev, "Unable to reset the hardware\n");
9c80d176 1285 /* XXX em_stop()? */
984263bc
MD
1286 return;
1287 }
87307ba1 1288 em_update_link_status(adapter);
984263bc 1289
9c80d176
SZ
1290 /* Setup VLAN support, basic and offload if available */
1291 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
984263bc 1292
9c80d176
SZ
1293 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1294 uint32_t ctrl;
1295
1296 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1297 ctrl |= E1000_CTRL_VME;
1298 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
87307ba1
SZ
1299 }
1300
9c80d176
SZ
1301 /* Configure for OS presence */
1302 em_get_mgmt(adapter);
1303
984263bc 1304 /* Prepare transmit descriptors and buffers */
9c80d176
SZ
1305 em_init_tx_ring(adapter);
1306 em_init_tx_unit(adapter);
984263bc
MD
1307
1308 /* Setup Multicast table */
1309 em_set_multi(adapter);
1310
1311 /* Prepare receive descriptors and buffers */
9c80d176
SZ
1312 if (em_init_rx_ring(adapter)) {
1313 device_printf(dev, "Could not setup receive structures\n");
984263bc 1314 em_stop(adapter);
984263bc
MD
1315 return;
1316 }
9c80d176 1317 em_init_rx_unit(adapter);
7ea52455 1318
87307ba1 1319 /* Don't lose promiscuous settings */
0d366ee7 1320 em_set_promisc(adapter);
984263bc 1321
984263bc
MD
1322 ifp->if_flags |= IFF_RUNNING;
1323 ifp->if_flags &= ~IFF_OACTIVE;
1324
9c80d176
SZ
1325 callout_reset(&adapter->timer, hz, em_timer, adapter);
1326 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1327
1328 /* MSI/X configuration for 82574 */
1329 if (adapter->hw.mac.type == e1000_82574) {
1330 int tmp;
1331
1332 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1333 tmp |= E1000_CTRL_EXT_PBA_CLR;
1334 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1335 /*
2d0e5700 1336 * XXX MSIX
9c80d176
SZ
1337 * Set the IVAR - interrupt vector routing.
1338 * Each nibble represents a vector, high bit
1339 * is enable, other 3 bits are the MSIX table
1340 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1341 * Link (other) to 2, hence the magic number.
1342 */
1343 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1344 }
1eca7b82
SZ
1345
1346#ifdef DEVICE_POLLING
9c80d176
SZ
1347 /*
1348 * Only enable interrupts if we are not polling, make sure
1349 * they are off otherwise.
1350 */
1eca7b82
SZ
1351 if (ifp->if_flags & IFF_POLLING)
1352 em_disable_intr(adapter);
1353 else
9c80d176
SZ
1354#endif /* DEVICE_POLLING */
1355 em_enable_intr(adapter);
0d366ee7 1356
2d0e5700 1357 /* AMT based hardware can now take control from firmware */
79878f87
SZ
1358 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
1359 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) &&
2d0e5700
SZ
1360 adapter->hw.mac.type >= e1000_82571)
1361 em_get_hw_control(adapter);
1362
0d366ee7 1363 /* Don't reset the phy next time init gets called */
9c80d176 1364 adapter->hw.phy.reset_disable = TRUE;
984263bc
MD
1365}
1366
984263bc 1367#ifdef DEVICE_POLLING
f647ad3d
JS
1368
1369static void
984263bc
MD
1370em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1371{
f647ad3d
JS
1372 struct adapter *adapter = ifp->if_softc;
1373 uint32_t reg_icr;
984263bc 1374
78195a76
MD
1375 ASSERT_SERIALIZED(ifp->if_serializer);
1376
9c80d176 1377 switch (cmd) {
9c095379
MD
1378 case POLL_REGISTER:
1379 em_disable_intr(adapter);
1380 break;
9c80d176 1381
9c095379 1382 case POLL_DEREGISTER:
f647ad3d 1383 em_enable_intr(adapter);
9c095379 1384 break;
9c80d176 1385
9c095379 1386 case POLL_AND_CHECK_STATUS:
9c80d176 1387 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
f647ad3d 1388 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
9ccd8c1f 1389 callout_stop(&adapter->timer);
9c80d176 1390 adapter->hw.mac.get_link_status = 1;
87307ba1 1391 em_update_link_status(adapter);
9c80d176 1392 callout_reset(&adapter->timer, hz, em_timer, adapter);
f647ad3d 1393 }
9c80d176 1394 /* FALL THROUGH */
9c095379
MD
1395 case POLL_ONLY:
1396 if (ifp->if_flags & IFF_RUNNING) {
87307ba1
SZ
1397 em_rxeof(adapter, count);
1398 em_txeof(adapter);
1eca7b82 1399
9c095379 1400 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1401 if_devstart(ifp);
9c095379
MD
1402 }
1403 break;
f647ad3d 1404 }
984263bc 1405}
9c095379 1406
984263bc
MD
1407#endif /* DEVICE_POLLING */
1408
984263bc 1409static void
9c80d176 1410em_intr(void *xsc)
984263bc 1411{
87ab432b
SZ
1412 em_intr_body(xsc, TRUE);
1413}
1414
1415static void
1416em_intr_body(struct adapter *adapter, boolean_t chk_asserted)
1417{
9c80d176 1418 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1419 uint32_t reg_icr;
984263bc 1420
07855a48 1421 logif(intr_beg);
78195a76
MD
1422 ASSERT_SERIALIZED(ifp->if_serializer);
1423
9c80d176
SZ
1424 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1425
87ab432b
SZ
1426 if (chk_asserted &&
1427 ((adapter->hw.mac.type >= e1000_82571 &&
1428 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1429 reg_icr == 0)) {
07855a48 1430 logif(intr_end);
984263bc 1431 return;
07855a48 1432 }
984263bc 1433
87307ba1 1434 /*
9c80d176
SZ
1435 * XXX: some laptops trigger several spurious interrupts
1436 * on em(4) when in the resume cycle. The ICR register
1437 * reports all-ones value in this case. Processing such
1438 * interrupts would lead to a freeze. I don't know why.
87307ba1
SZ
1439 */
1440 if (reg_icr == 0xffffffff) {
1441 logif(intr_end);
1442 return;
984263bc
MD
1443 }
1444
79938e61 1445 if (ifp->if_flags & IFF_RUNNING) {
9f60d74b 1446 if (reg_icr &
6643d744 1447 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO))
9f60d74b 1448 em_rxeof(adapter, -1);
6643d744 1449 if (reg_icr & E1000_ICR_TXDW) {
9f60d74b
SZ
1450 em_txeof(adapter);
1451 if (!ifq_is_empty(&ifp->if_snd))
1452 if_devstart(ifp);
1453 }
f647ad3d 1454 }
984263bc 1455
87307ba1
SZ
1456 /* Link status change */
1457 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1458 callout_stop(&adapter->timer);
9c80d176 1459 adapter->hw.mac.get_link_status = 1;
87307ba1 1460 em_update_link_status(adapter);
9c80d176
SZ
1461
1462 /* Deal with TX cruft when link lost */
1463 em_tx_purge(adapter);
1464
1465 callout_reset(&adapter->timer, hz, em_timer, adapter);
87307ba1
SZ
1466 }
1467
1468 if (reg_icr & E1000_ICR_RXO)
1469 adapter->rx_overruns++;
1470
07855a48 1471 logif(intr_end);
984263bc
MD
1472}
1473
984263bc 1474static void
87ab432b
SZ
1475em_intr_mask(void *xsc)
1476{
1477 struct adapter *adapter = xsc;
1478
1479 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
1480 /*
1481 * NOTE:
1482 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1483 * so don't check it.
1484 */
1485 em_intr_body(adapter, FALSE);
1486 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
1487}
1488
1489static void
984263bc
MD
1490em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1491{
87307ba1 1492 struct adapter *adapter = ifp->if_softc;
1eca7b82 1493 u_char fiber_type = IFM_1000_SX;
984263bc 1494
78195a76
MD
1495 ASSERT_SERIALIZED(ifp->if_serializer);
1496
87307ba1 1497 em_update_link_status(adapter);
984263bc
MD
1498
1499 ifmr->ifm_status = IFM_AVALID;
1500 ifmr->ifm_active = IFM_ETHER;
1501
1502 if (!adapter->link_active)
1503 return;
1504
1505 ifmr->ifm_status |= IFM_ACTIVE;
1506
9c80d176
SZ
1507 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
1508 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1509 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
1510 fiber_type = IFM_1000_LX;
1511 ifmr->ifm_active |= fiber_type | IFM_FDX;
984263bc
MD
1512 } else {
1513 switch (adapter->link_speed) {
1514 case 10:
1515 ifmr->ifm_active |= IFM_10_T;
1516 break;
1517 case 100:
1518 ifmr->ifm_active |= IFM_100_TX;
1519 break;
9c80d176 1520
984263bc 1521 case 1000:
7f259627 1522 ifmr->ifm_active |= IFM_1000_T;
984263bc
MD
1523 break;
1524 }
1525 if (adapter->link_duplex == FULL_DUPLEX)
1526 ifmr->ifm_active |= IFM_FDX;
1527 else
1528 ifmr->ifm_active |= IFM_HDX;
1529 }
984263bc
MD
1530}
1531
984263bc
MD
1532static int
1533em_media_change(struct ifnet *ifp)
1534{
87307ba1
SZ
1535 struct adapter *adapter = ifp->if_softc;
1536 struct ifmedia *ifm = &adapter->media;
984263bc 1537
78195a76 1538 ASSERT_SERIALIZED(ifp->if_serializer);
9c095379 1539
87307ba1
SZ
1540 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1541 return (EINVAL);
1542
984263bc
MD
1543 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1544 case IFM_AUTO:
9c80d176
SZ
1545 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1546 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
984263bc 1547 break;
9c80d176 1548
1eca7b82 1549 case IFM_1000_LX:
984263bc 1550 case IFM_1000_SX:
7f259627 1551 case IFM_1000_T:
9c80d176
SZ
1552 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1553 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
984263bc 1554 break;
9c80d176 1555
984263bc 1556 case IFM_100_TX:
9c80d176
SZ
1557 adapter->hw.mac.autoneg = FALSE;
1558 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1559 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1560 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
984263bc 1561 else
9c80d176 1562 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
984263bc 1563 break;
9c80d176 1564
984263bc 1565 case IFM_10_T:
9c80d176
SZ
1566 adapter->hw.mac.autoneg = FALSE;
1567 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1568 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1569 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
984263bc 1570 else
9c80d176 1571 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
984263bc 1572 break;
9c80d176 1573
984263bc 1574 default:
f647ad3d 1575 if_printf(ifp, "Unsupported media type\n");
9c80d176 1576 break;
984263bc 1577 }
9c80d176 1578
f647ad3d 1579 /*
9c80d176 1580 * As the speed/duplex settings my have changed we need to
f647ad3d
JS
1581 * reset the PHY.
1582 */
9c80d176 1583 adapter->hw.phy.reset_disable = FALSE;
984263bc 1584
78195a76 1585 em_init(adapter);
984263bc 1586
9c80d176 1587 return (0);
9ccd8c1f
JS
1588}
1589
984263bc 1590static int
9c80d176 1591em_encap(struct adapter *adapter, struct mbuf **m_headp)
9ccd8c1f 1592{
9c80d176 1593 bus_dma_segment_t segs[EM_MAX_SCATTER];
1eca7b82 1594 bus_dmamap_t map;
9c80d176
SZ
1595 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1596 struct e1000_tx_desc *ctxd = NULL;
002b3a05 1597 struct mbuf *m_head = *m_headp;
9f60d74b 1598 uint32_t txd_upper, txd_lower, txd_used, cmd = 0;
9c80d176 1599 int maxsegs, nsegs, i, j, first, last = 0, error;
984263bc 1600
9c80d176
SZ
1601 txd_upper = txd_lower = 0;
1602 txd_used = 0;
87307ba1
SZ
1603
1604 /*
9c80d176
SZ
1605 * Capture the first descriptor index, this descriptor
1606 * will have the index of the EOP which is the only one
1607 * that now gets a DONE bit writeback.
87307ba1 1608 */
9c80d176
SZ
1609 first = adapter->next_avail_tx_desc;
1610 tx_buffer = &adapter->tx_buffer_area[first];
1611 tx_buffer_mapped = tx_buffer;
1612 map = tx_buffer->map;
87307ba1 1613
9c80d176
SZ
1614 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED;
1615 KASSERT(maxsegs >= adapter->spare_tx_desc,
ed20d0e3 1616 ("not enough spare TX desc"));
9c80d176
SZ
1617 if (adapter->pcix_82544) {
1618 /* Half it; see the comment in em_attach() */
1619 maxsegs >>= 1;
9ccd8c1f 1620 }
9c80d176
SZ
1621 if (maxsegs > EM_MAX_SCATTER)
1622 maxsegs = EM_MAX_SCATTER;
984263bc 1623
9c80d176
SZ
1624 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp,
1625 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1626 if (error) {
1627 if (error == ENOBUFS)
1628 adapter->mbuf_alloc_failed++;
1629 else
1630 adapter->no_tx_dma_setup++;
984263bc 1631
9c80d176
SZ
1632 m_freem(*m_headp);
1633 *m_headp = NULL;
1634 return error;
7ea52455 1635 }
9c80d176 1636 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
984263bc 1637
9c80d176 1638 m_head = *m_headp;
9f60d74b 1639 adapter->tx_nsegs += nsegs;
9c80d176 1640
002b3a05 1641 if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) {
9c80d176 1642 /* TX csum offloading will consume one TX desc */
9f60d74b
SZ
1643 adapter->tx_nsegs += em_txcsum(adapter, m_head,
1644 &txd_upper, &txd_lower);
9c80d176 1645 }
984263bc 1646 i = adapter->next_avail_tx_desc;
87307ba1
SZ
1647
1648 /* Set up our transmit descriptors */
9c80d176 1649 for (j = 0; j < nsegs; j++) {
9ccd8c1f
JS
1650 /* If adapter is 82544 and on PCIX bus */
1651 if(adapter->pcix_82544) {
87307ba1
SZ
1652 DESC_ARRAY desc_array;
1653 uint32_t array_elements, counter;
1654
9c80d176 1655 /*
f647ad3d
JS
1656 * Check the Address and Length combination and
1657 * split the data accordingly
9ccd8c1f 1658 */
9c80d176
SZ
1659 array_elements = em_82544_fill_desc(segs[j].ds_addr,
1660 segs[j].ds_len, &desc_array);
9ccd8c1f 1661 for (counter = 0; counter < array_elements; counter++) {
9c80d176
SZ
1662 KKASSERT(txd_used < adapter->num_tx_desc_avail);
1663
9ccd8c1f 1664 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176
SZ
1665 ctxd = &adapter->tx_desc_base[i];
1666
1667 ctxd->buffer_addr = htole64(
1668 desc_array.descriptor[counter].address);
1669 ctxd->lower.data = htole32(
2af74b85 1670 E1000_TXD_CMD_IFCS | txd_lower |
9c80d176
SZ
1671 desc_array.descriptor[counter].length);
1672 ctxd->upper.data = htole32(txd_upper);
87307ba1
SZ
1673
1674 last = i;
9ccd8c1f
JS
1675 if (++i == adapter->num_tx_desc)
1676 i = 0;
1677
9ccd8c1f 1678 txd_used++;
9c80d176 1679 }
9ccd8c1f 1680 } else {
0d366ee7 1681 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176 1682 ctxd = &adapter->tx_desc_base[i];
9ccd8c1f 1683
9c80d176 1684 ctxd->buffer_addr = htole64(segs[j].ds_addr);
2af74b85 1685 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
9c80d176
SZ
1686 txd_lower | segs[j].ds_len);
1687 ctxd->upper.data = htole32(txd_upper);
984263bc 1688
87307ba1 1689 last = i;
0d366ee7
MD
1690 if (++i == adapter->num_tx_desc)
1691 i = 0;
0d366ee7 1692 }
984263bc 1693 }
9ccd8c1f 1694
984263bc 1695 adapter->next_avail_tx_desc = i;
9c80d176
SZ
1696 if (adapter->pcix_82544) {
1697 KKASSERT(adapter->num_tx_desc_avail > txd_used);
9ccd8c1f 1698 adapter->num_tx_desc_avail -= txd_used;
9c80d176
SZ
1699 } else {
1700 KKASSERT(adapter->num_tx_desc_avail > nsegs);
1701 adapter->num_tx_desc_avail -= nsegs;
1702 }
984263bc 1703
9c80d176 1704 /* Handle VLAN tag */
83790f85 1705 if (m_head->m_flags & M_VLANTAG) {
9c80d176
SZ
1706 /* Set the vlan id. */
1707 ctxd->upper.fields.special =
1708 htole16(m_head->m_pkthdr.ether_vlantag);
9ccd8c1f 1709
f647ad3d 1710 /* Tell hardware to add tag */
9c80d176 1711 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
f647ad3d 1712 }
984263bc
MD
1713
1714 tx_buffer->m_head = m_head;
9c80d176 1715 tx_buffer_mapped->map = tx_buffer->map;
1eca7b82 1716 tx_buffer->map = map;
9ccd8c1f 1717
9f60d74b
SZ
1718 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) {
1719 adapter->tx_nsegs = 0;
4e4e8481
SZ
1720
1721 /*
1722 * Report Status (RS) is turned on
1723 * every tx_int_nsegs descriptors.
1724 */
9f60d74b
SZ
1725 cmd = E1000_TXD_CMD_RS;
1726
b4b0a2b4
SZ
1727 /*
1728 * Keep track of the descriptor, which will
1729 * be written back by hardware.
1730 */
9f60d74b
SZ
1731 adapter->tx_dd[adapter->tx_dd_tail] = last;
1732 EM_INC_TXDD_IDX(adapter->tx_dd_tail);
1733 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head);
1734 }
1735
9ccd8c1f 1736 /*
984263bc 1737 * Last Descriptor of Packet needs End Of Packet (EOP)
87307ba1 1738 */
9f60d74b 1739 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
87307ba1
SZ
1740
1741 /*
9c80d176 1742 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
984263bc
MD
1743 * that this frame is available to transmit.
1744 */
9c80d176 1745 if (adapter->hw.mac.type == e1000_82547 &&
984263bc 1746 adapter->link_duplex == HALF_DUPLEX) {
cfefda96 1747 em_82547_move_tail_serialized(adapter);
9ccd8c1f 1748 } else {
9c80d176
SZ
1749 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1750 if (adapter->hw.mac.type == e1000_82547) {
cfefda96 1751 em_82547_update_fifo_head(adapter,
9c80d176 1752 m_head->m_pkthdr.len);
984263bc
MD
1753 }
1754 }
87307ba1 1755 return (0);
984263bc
MD
1756}
1757
9c80d176 1758/*
984263bc 1759 * 82547 workaround to avoid controller hang in half-duplex environment.
87307ba1 1760 * The workaround is to avoid queuing a large packet that would span
9c80d176
SZ
1761 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1762 * in this case. We do that only when FIFO is quiescent.
1763 */
9c095379 1764static void
1eca7b82 1765em_82547_move_tail_serialized(struct adapter *adapter)
9c095379 1766{
9c80d176
SZ
1767 struct e1000_tx_desc *tx_desc;
1768 uint16_t hw_tdt, sw_tdt, length = 0;
1769 bool eop = 0;
984263bc 1770
9c80d176
SZ
1771 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer);
1772
1773 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
984263bc 1774 sw_tdt = adapter->next_avail_tx_desc;
f647ad3d 1775
984263bc
MD
1776 while (hw_tdt != sw_tdt) {
1777 tx_desc = &adapter->tx_desc_base[hw_tdt];
1778 length += tx_desc->lower.flags.length;
1779 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
87307ba1 1780 if (++hw_tdt == adapter->num_tx_desc)
984263bc
MD
1781 hw_tdt = 0;
1782
87307ba1 1783 if (eop) {
984263bc 1784 if (em_82547_fifo_workaround(adapter, length)) {
eac00e59 1785 adapter->tx_fifo_wrk_cnt++;
9ccd8c1f
JS
1786 callout_reset(&adapter->tx_fifo_timer, 1,
1787 em_82547_move_tail, adapter);
1788 break;
984263bc 1789 }
9c80d176 1790 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
9ccd8c1f
JS
1791 em_82547_update_fifo_head(adapter, length);
1792 length = 0;
984263bc 1793 }
9c80d176
SZ
1794 }
1795}
1796
1797static void
1798em_82547_move_tail(void *xsc)
1799{
1800 struct adapter *adapter = xsc;
1801 struct ifnet *ifp = &adapter->arpcom.ac_if;
1802
1803 lwkt_serialize_enter(ifp->if_serializer);
1804 em_82547_move_tail_serialized(adapter);
1805 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1806}
1807
1808static int
1809em_82547_fifo_workaround(struct adapter *adapter, int len)
1810{
1811 int fifo_space, fifo_pkt_len;
1812
1eca7b82 1813 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
984263bc
MD
1814
1815 if (adapter->link_duplex == HALF_DUPLEX) {
eac00e59 1816 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
984263bc
MD
1817
1818 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
f647ad3d 1819 if (em_82547_tx_fifo_reset(adapter))
87307ba1 1820 return (0);
f647ad3d 1821 else
87307ba1 1822 return (1);
984263bc
MD
1823 }
1824 }
87307ba1 1825 return (0);
984263bc
MD
1826}
1827
1828static void
1829em_82547_update_fifo_head(struct adapter *adapter, int len)
1830{
1eca7b82 1831 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
f647ad3d 1832
984263bc
MD
1833 /* tx_fifo_head is always 16 byte aligned */
1834 adapter->tx_fifo_head += fifo_pkt_len;
eac00e59
SZ
1835 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1836 adapter->tx_fifo_head -= adapter->tx_fifo_size;
984263bc
MD
1837}
1838
984263bc
MD
1839static int
1840em_82547_tx_fifo_reset(struct adapter *adapter)
7ea52455 1841{
984263bc
MD
1842 uint32_t tctl;
1843
9c80d176
SZ
1844 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1845 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1846 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1847 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1848 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1849 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1850 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
984263bc 1851 /* Disable TX unit */
9c80d176
SZ
1852 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1853 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1854 tctl & ~E1000_TCTL_EN);
984263bc
MD
1855
1856 /* Reset FIFO pointers */
9c80d176
SZ
1857 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1858 adapter->tx_head_addr);
1859 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1860 adapter->tx_head_addr);
1861 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1862 adapter->tx_head_addr);
1863 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1864 adapter->tx_head_addr);
984263bc
MD
1865
1866 /* Re-enable TX unit */
9c80d176 1867 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
1868 E1000_WRITE_FLUSH(&adapter->hw);
1869
1870 adapter->tx_fifo_head = 0;
eac00e59 1871 adapter->tx_fifo_reset_cnt++;
984263bc 1872
87307ba1 1873 return (TRUE);
eac00e59 1874 } else {
87307ba1 1875 return (FALSE);
984263bc
MD
1876 }
1877}
1878
1879static void
f647ad3d 1880em_set_promisc(struct adapter *adapter)
984263bc 1881{
9c80d176 1882 struct ifnet *ifp = &adapter->arpcom.ac_if;
1eca7b82 1883 uint32_t reg_rctl;
984263bc 1884
9c80d176 1885 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc
MD
1886
1887 if (ifp->if_flags & IFF_PROMISC) {
1888 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
9c80d176
SZ
1889 /* Turn this on if you want to see bad packets */
1890 if (em_debug_sbp)
1891 reg_rctl |= E1000_RCTL_SBP;
1892 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1893 } else if (ifp->if_flags & IFF_ALLMULTI) {
1894 reg_rctl |= E1000_RCTL_MPE;
1895 reg_rctl &= ~E1000_RCTL_UPE;
9c80d176 1896 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc 1897 }
984263bc
MD
1898}
1899
1900static void
f647ad3d 1901em_disable_promisc(struct adapter *adapter)
984263bc 1902{
f647ad3d 1903 uint32_t reg_rctl;
984263bc 1904
9c80d176 1905 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc 1906
9c80d176
SZ
1907 reg_rctl &= ~E1000_RCTL_UPE;
1908 reg_rctl &= ~E1000_RCTL_MPE;
1909 reg_rctl &= ~E1000_RCTL_SBP;
1910 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1911}
1912
984263bc 1913static void
f647ad3d 1914em_set_multi(struct adapter *adapter)
984263bc 1915{
9c80d176 1916 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1917 struct ifmultiaddr *ifma;
9c80d176 1918 uint32_t reg_rctl = 0;
2d0e5700 1919 uint8_t *mta;
f647ad3d 1920 int mcnt = 0;
f647ad3d 1921
2d0e5700
SZ
1922 mta = adapter->mta;
1923 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1924
9c80d176
SZ
1925 if (adapter->hw.mac.type == e1000_82542 &&
1926 adapter->hw.revision_id == E1000_REVISION_2) {
1927 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1928 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1929 e1000_pci_clear_mwi(&adapter->hw);
f647ad3d 1930 reg_rctl |= E1000_RCTL_RST;
9c80d176 1931 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d
JS
1932 msec_delay(5);
1933 }
984263bc 1934
441d34b2 1935 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
f647ad3d
JS
1936 if (ifma->ifma_addr->sa_family != AF_LINK)
1937 continue;
1938
1939 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1940 break;
984263bc 1941
f647ad3d 1942 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
9c80d176 1943 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
f647ad3d
JS
1944 mcnt++;
1945 }
1946
1947 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
9c80d176 1948 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1949 reg_rctl |= E1000_RCTL_MPE;
9c80d176 1950 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
7ea52455 1951 } else {
6a5a645e 1952 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
7ea52455 1953 }
f647ad3d 1954
9c80d176
SZ
1955 if (adapter->hw.mac.type == e1000_82542 &&
1956 adapter->hw.revision_id == E1000_REVISION_2) {
1957 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1958 reg_rctl &= ~E1000_RCTL_RST;
9c80d176 1959 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d 1960 msec_delay(5);
9c80d176
SZ
1961 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1962 e1000_pci_set_mwi(&adapter->hw);
f647ad3d
JS
1963 }
1964}
984263bc 1965
9c80d176
SZ
1966/*
1967 * This routine checks for link status and updates statistics.
1968 */
984263bc 1969static void
9c80d176 1970em_timer(void *xsc)
984263bc 1971{
9c80d176
SZ
1972 struct adapter *adapter = xsc;
1973 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 1974
78195a76 1975 lwkt_serialize_enter(ifp->if_serializer);
984263bc 1976
87307ba1 1977 em_update_link_status(adapter);
9c80d176
SZ
1978 em_update_stats(adapter);
1979
1980 /* Reset LAA into RAR[0] on 82571 */
1981 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
1982 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1983
1984 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
984263bc 1985 em_print_hw_stats(adapter);
9c80d176 1986
984263bc
MD
1987 em_smartspeed(adapter);
1988
9c80d176 1989 callout_reset(&adapter->timer, hz, em_timer, adapter);
984263bc 1990
78195a76 1991 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1992}
1993
1994static void
87307ba1 1995em_update_link_status(struct adapter *adapter)
984263bc 1996{
9c80d176
SZ
1997 struct e1000_hw *hw = &adapter->hw;
1998 struct ifnet *ifp = &adapter->arpcom.ac_if;
1999 device_t dev = adapter->dev;
2000 uint32_t link_check = 0;
2001
2002 /* Get the cached link value or read phy for real */
2003 switch (hw->phy.media_type) {
2004 case e1000_media_type_copper:
2005 if (hw->mac.get_link_status) {
2006 /* Do the work to read phy */
2007 e1000_check_for_link(hw);
2008 link_check = !hw->mac.get_link_status;
2009 if (link_check) /* ESB2 fix */
2010 e1000_cfg_on_link_up(hw);
2011 } else {
2012 link_check = TRUE;
984263bc 2013 }
9c80d176
SZ
2014 break;
2015
2016 case e1000_media_type_fiber:
2017 e1000_check_for_link(hw);
2018 link_check =
2019 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
2020 break;
2021
2022 case e1000_media_type_internal_serdes:
2023 e1000_check_for_link(hw);
2024 link_check = adapter->hw.mac.serdes_has_link;
2025 break;
2026
2027 case e1000_media_type_unknown:
2028 default:
2029 break;
2030 }
2031
2032 /* Now check for a transition */
2033 if (link_check && adapter->link_active == 0) {
2034 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2035 &adapter->link_duplex);
cb5a6be6
SZ
2036
2037 /*
2038 * Check if we should enable/disable SPEED_MODE bit on
2039 * 82571/82572
2040 */
2d0e5700
SZ
2041 if (adapter->link_speed != SPEED_1000 &&
2042 (hw->mac.type == e1000_82571 ||
2043 hw->mac.type == e1000_82572)) {
9c80d176
SZ
2044 int tarc0;
2045
2046 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2d0e5700 2047 tarc0 &= ~SPEED_MODE_BIT;
9c80d176 2048 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
984263bc 2049 }
9c80d176
SZ
2050 if (bootverbose) {
2051 device_printf(dev, "Link is up %d Mbps %s\n",
2052 adapter->link_speed,
2053 ((adapter->link_duplex == FULL_DUPLEX) ?
2054 "Full Duplex" : "Half Duplex"));
2055 }
2056 adapter->link_active = 1;
2057 adapter->smartspeed = 0;
2058 ifp->if_baudrate = adapter->link_speed * 1000000;
2059 ifp->if_link_state = LINK_STATE_UP;
2060 if_link_state_change(ifp);
2061 } else if (!link_check && adapter->link_active == 1) {
2062 ifp->if_baudrate = adapter->link_speed = 0;
2063 adapter->link_duplex = 0;
2064 if (bootverbose)
2065 device_printf(dev, "Link is Down\n");
2066 adapter->link_active = 0;
2067#if 0
2068 /* Link down, disable watchdog */
2069 if->if_timer = 0;
2070#endif
2071 ifp->if_link_state = LINK_STATE_DOWN;
2072 if_link_state_change(ifp);
984263bc 2073 }
984263bc
MD
2074}
2075
984263bc 2076static void
9c80d176 2077em_stop(struct adapter *adapter)
984263bc 2078{
9c80d176
SZ
2079 struct ifnet *ifp = &adapter->arpcom.ac_if;
2080 int i;
984263bc 2081
1eca7b82
SZ
2082 ASSERT_SERIALIZED(ifp->if_serializer);
2083
984263bc 2084 em_disable_intr(adapter);
9c80d176 2085
9ccd8c1f
JS
2086 callout_stop(&adapter->timer);
2087 callout_stop(&adapter->tx_fifo_timer);
984263bc 2088
984263bc 2089 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
af82d4bb 2090 ifp->if_timer = 0;
9c80d176
SZ
2091
2092 e1000_reset_hw(&adapter->hw);
2093 if (adapter->hw.mac.type >= e1000_82544)
2094 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2095
2096 for (i = 0; i < adapter->num_tx_desc; i++) {
2097 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i];
2098
2099 if (tx_buffer->m_head != NULL) {
2100 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2101 m_freem(tx_buffer->m_head);
2102 tx_buffer->m_head = NULL;
2103 }
9c80d176
SZ
2104 }
2105
2106 for (i = 0; i < adapter->num_rx_desc; i++) {
2107 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i];
2108
2109 if (rx_buffer->m_head != NULL) {
2110 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2111 m_freem(rx_buffer->m_head);
2112 rx_buffer->m_head = NULL;
2113 }
2114 }
c9ff32cc
SZ
2115
2116 if (adapter->fmp != NULL)
2117 m_freem(adapter->fmp);
2118 adapter->fmp = NULL;
2119 adapter->lmp = NULL;
51e6819f
SZ
2120
2121 adapter->csum_flags = 0;
ed4fc0fe 2122 adapter->csum_lhlen = 0;
51e6819f 2123 adapter->csum_iphlen = 0;
9f60d74b
SZ
2124
2125 adapter->tx_dd_head = 0;
2126 adapter->tx_dd_tail = 0;
2127 adapter->tx_nsegs = 0;
984263bc
MD
2128}
2129
9c80d176
SZ
2130static int
2131em_get_hw_info(struct adapter *adapter)
984263bc
MD
2132{
2133 device_t dev = adapter->dev;
2134
984263bc
MD
2135 /* Save off the information about this board */
2136 adapter->hw.vendor_id = pci_get_vendor(dev);
2137 adapter->hw.device_id = pci_get_device(dev);
f647ad3d
JS
2138 adapter->hw.revision_id = pci_get_revid(dev);
2139 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
9c80d176 2140 adapter->hw.subsystem_device_id = pci_get_subdevice(dev);
984263bc 2141
9c80d176
SZ
2142 /* Do Shared Code Init and Setup */
2143 if (e1000_set_mac_type(&adapter->hw))
2144 return ENXIO;
2145 return 0;
984263bc
MD
2146}
2147
1eca7b82 2148static int
9c80d176 2149em_alloc_pci_res(struct adapter *adapter)
1eca7b82 2150{
9c80d176 2151 device_t dev = adapter->dev;
053f3ae6 2152 u_int intr_flags;
84e26aaa 2153 int val, rid, msi_enable;
9c80d176
SZ
2154
2155 /* Enable bus mastering */
2156 pci_enable_busmaster(dev);
1eca7b82 2157
9c80d176
SZ
2158 adapter->memory_rid = EM_BAR_MEM;
2159 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2160 &adapter->memory_rid, RF_ACTIVE);
2161 if (adapter->memory == NULL) {
1eca7b82 2162 device_printf(dev, "Unable to allocate bus resource: memory\n");
9c80d176 2163 return (ENXIO);
1eca7b82
SZ
2164 }
2165 adapter->osdep.mem_bus_space_tag =
9c80d176 2166 rman_get_bustag(adapter->memory);
1eca7b82 2167 adapter->osdep.mem_bus_space_handle =
9c80d176
SZ
2168 rman_get_bushandle(adapter->memory);
2169
2170 /* XXX This is quite goofy, it is not actually used */
1eca7b82
SZ
2171 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2172
9c80d176
SZ
2173 /* Only older adapters use IO mapping */
2174 if (adapter->hw.mac.type > e1000_82543 &&
2175 adapter->hw.mac.type < e1000_82571) {
1eca7b82 2176 /* Figure our where our IO BAR is ? */
9c80d176 2177 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) {
1eca7b82 2178 val = pci_read_config(dev, rid, 4);
87307ba1 2179 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1eca7b82
SZ
2180 adapter->io_rid = rid;
2181 break;
2182 }
2183 rid += 4;
87307ba1
SZ
2184 /* check for 64bit BAR */
2185 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2186 rid += 4;
1eca7b82 2187 }
9c80d176 2188 if (rid >= PCIR_CARDBUSCIS) {
87307ba1
SZ
2189 device_printf(dev, "Unable to locate IO BAR\n");
2190 return (ENXIO);
9c80d176
SZ
2191 }
2192 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
2193 &adapter->io_rid, RF_ACTIVE);
2194 if (adapter->ioport == NULL) {
1eca7b82 2195 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2196 "ioport\n");
2197 return (ENXIO);
1eca7b82 2198 }
87307ba1
SZ
2199 adapter->hw.io_base = 0;
2200 adapter->osdep.io_bus_space_tag =
9c80d176 2201 rman_get_bustag(adapter->ioport);
87307ba1 2202 adapter->osdep.io_bus_space_handle =
9c80d176 2203 rman_get_bushandle(adapter->ioport);
1eca7b82
SZ
2204 }
2205
84e26aaa 2206 /*
a835687d
SZ
2207 * Don't enable MSI-X on 82574, see:
2208 * 82574 specification update errata #15
2209 *
84e26aaa 2210 * Don't enable MSI on PCI/PCI-X chips, see:
a835687d
SZ
2211 * 82540 specification update errata #6
2212 * 82545 specification update errata #4
84e26aaa
SZ
2213 *
2214 * Don't enable MSI on 82571/82572, see:
a835687d 2215 * 82571/82572 specification update errata #63
84e26aaa
SZ
2216 */
2217 msi_enable = em_msi_enable;
2218 if (msi_enable &&
2219 (!pci_is_pcie(dev) ||
2220 adapter->hw.mac.type == e1000_82571 ||
2221 adapter->hw.mac.type == e1000_82572))
2222 msi_enable = 0;
2223
2224 adapter->intr_type = pci_alloc_1intr(dev, msi_enable,
053f3ae6
SZ
2225 &adapter->intr_rid, &intr_flags);
2226
87ab432b
SZ
2227 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) {
2228 int unshared;
2229
2230 unshared = device_getenv_int(dev, "irq.unshared", 0);
2231 if (!unshared) {
2232 adapter->flags |= EM_FLAG_SHARED_INTR;
2233 if (bootverbose)
2234 device_printf(dev, "IRQ shared\n");
2235 } else {
2236 intr_flags &= ~RF_SHAREABLE;
2237 if (bootverbose)
2238 device_printf(dev, "IRQ unshared\n");
2239 }
2240 }
2241
9c80d176 2242 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
053f3ae6 2243 &adapter->intr_rid, intr_flags);
9c80d176 2244 if (adapter->intr_res == NULL) {
1eca7b82 2245 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2246 "interrupt\n");
2247 return (ENXIO);
1eca7b82
SZ
2248 }
2249
9c80d176 2250 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1eca7b82 2251 adapter->hw.back = &adapter->osdep;
a483bd34 2252 return (0);
1eca7b82
SZ
2253}
2254
2255static void
9c80d176 2256em_free_pci_res(struct adapter *adapter)
1eca7b82 2257{
9c80d176 2258 device_t dev = adapter->dev;
1eca7b82 2259
9c80d176
SZ
2260 if (adapter->intr_res != NULL) {
2261 bus_release_resource(dev, SYS_RES_IRQ,
2262 adapter->intr_rid, adapter->intr_res);
1eca7b82 2263 }
9c80d176 2264
053f3ae6
SZ
2265 if (adapter->intr_type == PCI_INTR_TYPE_MSI)
2266 pci_release_msi(dev);
2267
9c80d176
SZ
2268 if (adapter->memory != NULL) {
2269 bus_release_resource(dev, SYS_RES_MEMORY,
2270 adapter->memory_rid, adapter->memory);
1eca7b82
SZ
2271 }
2272
9c80d176
SZ
2273 if (adapter->flash != NULL) {
2274 bus_release_resource(dev, SYS_RES_MEMORY,
2275 adapter->flash_rid, adapter->flash);
1eca7b82
SZ
2276 }
2277
9c80d176
SZ
2278 if (adapter->ioport != NULL) {
2279 bus_release_resource(dev, SYS_RES_IOPORT,
2280 adapter->io_rid, adapter->ioport);
1eca7b82
SZ
2281 }
2282}
2283
984263bc 2284static int
2d0e5700 2285em_reset(struct adapter *adapter)
984263bc 2286{
9c80d176
SZ
2287 device_t dev = adapter->dev;
2288 uint16_t rx_buffer_size;
7ea52455 2289
984263bc
MD
2290 /* When hardware is reset, fifo_head is also reset */
2291 adapter->tx_fifo_head = 0;
2292
87307ba1 2293 /* Set up smart power down as default off on newer adapters. */
1eca7b82 2294 if (!em_smart_pwr_down &&
9c80d176
SZ
2295 (adapter->hw.mac.type == e1000_82571 ||
2296 adapter->hw.mac.type == e1000_82572)) {
1eca7b82
SZ
2297 uint16_t phy_tmp = 0;
2298
87307ba1 2299 /* Speed up time to link by disabling smart power down. */
9c80d176
SZ
2300 e1000_read_phy_reg(&adapter->hw,
2301 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1eca7b82 2302 phy_tmp &= ~IGP02E1000_PM_SPD;
9c80d176
SZ
2303 e1000_write_phy_reg(&adapter->hw,
2304 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1eca7b82
SZ
2305 }
2306
7ea52455 2307 /*
87307ba1
SZ
2308 * These parameters control the automatic generation (Tx) and
2309 * response (Rx) to Ethernet PAUSE frames.
7ea52455
SZ
2310 * - High water mark should allow for at least two frames to be
2311 * received after sending an XOFF.
2312 * - Low water mark works best when it is very near the high water mark.
2313 * This allows the receiver to restart by sending XON when it has
9c80d176
SZ
2314 * drained a bit. Here we use an arbitary value of 1500 which will
2315 * restart after one full frame is pulled from the buffer. There
7ea52455
SZ
2316 * could be several smaller frames in the buffer and if so they will
2317 * not trigger the XON until their total number reduces the buffer
2318 * by 1500.
2319 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2320 */
9c80d176
SZ
2321 rx_buffer_size =
2322 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10;
7ea52455 2323
9c80d176
SZ
2324 adapter->hw.fc.high_water = rx_buffer_size -
2325 roundup2(adapter->max_frame_size, 1024);
2326 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2327
2328 if (adapter->hw.mac.type == e1000_80003es2lan)
2329 adapter->hw.fc.pause_time = 0xFFFF;
1eca7b82 2330 else
9c80d176 2331 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2d0e5700 2332
9c80d176 2333 adapter->hw.fc.send_xon = TRUE;
2d0e5700 2334
9c80d176 2335 adapter->hw.fc.requested_mode = e1000_fc_full;
7ea52455 2336
2d0e5700
SZ
2337 /* Workaround: no TX flow ctrl for PCH */
2338 if (adapter->hw.mac.type == e1000_pchlan)
2339 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
2340
2341 /* Override - settings for PCH2LAN, ya its magic :) */
2342 if (adapter->hw.mac.type == e1000_pch2lan) {
2343 adapter->hw.fc.high_water = 0x5C20;
2344 adapter->hw.fc.low_water = 0x5048;
2345 adapter->hw.fc.pause_time = 0x0650;
2346 adapter->hw.fc.refresh_time = 0x0400;
2347
2348 /* Jumbos need adjusted PBA */
2349 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU)
2350 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12);
2351 else
2352 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26);
2353 }
2354
2355 /* Issue a global reset */
2356 e1000_reset_hw(&adapter->hw);
2357 if (adapter->hw.mac.type >= e1000_82544)
2358 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
6d5e2922 2359 em_disable_aspm(adapter);
2d0e5700 2360
9c80d176
SZ
2361 if (e1000_init_hw(&adapter->hw) < 0) {
2362 device_printf(dev, "Hardware Initialization Failed\n");
87307ba1 2363 return (EIO);
984263bc
MD
2364 }
2365
2d0e5700
SZ
2366 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
2367 e1000_get_phy_info(&adapter->hw);
9c80d176 2368 e1000_check_for_link(&adapter->hw);
984263bc 2369
87307ba1 2370 return (0);
984263bc
MD
2371}
2372
984263bc 2373static void
9c80d176 2374em_setup_ifp(struct adapter *adapter)
984263bc 2375{
9c80d176 2376 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 2377
9c80d176
SZ
2378 if_initname(ifp, device_get_name(adapter->dev),
2379 device_get_unit(adapter->dev));
984263bc
MD
2380 ifp->if_softc = adapter;
2381 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9c80d176 2382 ifp->if_init = em_init;
984263bc
MD
2383 ifp->if_ioctl = em_ioctl;
2384 ifp->if_start = em_start;
9c095379
MD
2385#ifdef DEVICE_POLLING
2386 ifp->if_poll = em_poll;
2387#endif
984263bc 2388 ifp->if_watchdog = em_watchdog;
e26dc3e9 2389 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
19b1d5b8 2390 ifq_set_ready(&ifp->if_snd);
984263bc 2391
9c80d176 2392 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
984263bc 2393
9c80d176
SZ
2394 if (adapter->hw.mac.type >= e1000_82543)
2395 ifp->if_capabilities = IFCAP_HWCSUM;
e095c7aa 2396
9c80d176
SZ
2397 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
2398 ifp->if_capenable = ifp->if_capabilities;
984263bc 2399
9c80d176
SZ
2400 if (ifp->if_capenable & IFCAP_TXCSUM)
2401 ifp->if_hwassist = EM_CSUM_FEATURES;
21fa6062 2402
f647ad3d
JS
2403 /*
2404 * Tell the upper layer(s) we support long frames.
2405 */
2406 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
984263bc 2407
87307ba1 2408 /*
984263bc
MD
2409 * Specify the media types supported by this adapter and register
2410 * callbacks to update media and link information
2411 */
9c80d176
SZ
2412 ifmedia_init(&adapter->media, IFM_IMASK,
2413 em_media_change, em_media_status);
2414 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2415 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
2416 u_char fiber_type = IFM_1000_SX; /* default type */
2417
2418 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
2419 fiber_type = IFM_1000_LX;
2420 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
984263bc 2421 0, NULL);
87307ba1 2422 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
984263bc
MD
2423 } else {
2424 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
87307ba1 2425 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
984263bc 2426 0, NULL);
87307ba1 2427 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
984263bc 2428 0, NULL);
87307ba1 2429 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
984263bc 2430 0, NULL);
9c80d176
SZ
2431 if (adapter->hw.phy.type != e1000_phy_ife) {
2432 ifmedia_add(&adapter->media,
2433 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2434 ifmedia_add(&adapter->media,
2435 IFM_ETHER | IFM_1000_T, 0, NULL);
2436 }
984263bc
MD
2437 }
2438 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2439 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
984263bc
MD
2440}
2441
9c80d176
SZ
2442
2443/*
2444 * Workaround for SmartSpeed on 82541 and 82547 controllers
2445 */
984263bc
MD
2446static void
2447em_smartspeed(struct adapter *adapter)
2448{
f647ad3d
JS
2449 uint16_t phy_tmp;
2450
9c80d176
SZ
2451 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp ||
2452 adapter->hw.mac.autoneg == 0 ||
2453 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
984263bc
MD
2454 return;
2455
f647ad3d
JS
2456 if (adapter->smartspeed == 0) {
2457 /*
2458 * If Master/Slave config fault is asserted twice,
9c80d176 2459 * we assume back-to-back
f647ad3d 2460 */
9c80d176 2461 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d
JS
2462 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2463 return;
9c80d176 2464 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d 2465 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
9c80d176
SZ
2466 e1000_read_phy_reg(&adapter->hw,
2467 PHY_1000T_CTRL, &phy_tmp);
f647ad3d
JS
2468 if (phy_tmp & CR_1000T_MS_ENABLE) {
2469 phy_tmp &= ~CR_1000T_MS_ENABLE;
9c80d176
SZ
2470 e1000_write_phy_reg(&adapter->hw,
2471 PHY_1000T_CTRL, phy_tmp);
f647ad3d 2472 adapter->smartspeed++;
9c80d176
SZ
2473 if (adapter->hw.mac.autoneg &&
2474 !e1000_phy_setup_autoneg(&adapter->hw) &&
2475 !e1000_read_phy_reg(&adapter->hw,
2476 PHY_CONTROL, &phy_tmp)) {
2477 phy_tmp |= MII_CR_AUTO_NEG_EN |
2478 MII_CR_RESTART_AUTO_NEG;
2479 e1000_write_phy_reg(&adapter->hw,
2480 PHY_CONTROL, phy_tmp);
f647ad3d
JS
2481 }
2482 }
2483 }
87307ba1 2484 return;
f647ad3d
JS
2485 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2486 /* If still no link, perhaps using 2/3 pair cable */
9c80d176 2487 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
f647ad3d 2488 phy_tmp |= CR_1000T_MS_ENABLE;
9c80d176
SZ
2489 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2490 if (adapter->hw.mac.autoneg &&
2491 !e1000_phy_setup_autoneg(&adapter->hw) &&
2492 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2493 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2494 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
f647ad3d
JS
2495 }
2496 }
9c80d176 2497
f647ad3d
JS
2498 /* Restart process after EM_SMARTSPEED_MAX iterations */
2499 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2500 adapter->smartspeed = 0;
984263bc
MD
2501}
2502
9ccd8c1f
JS
2503static int
2504em_dma_malloc(struct adapter *adapter, bus_size_t size,
87307ba1 2505 struct em_dma_alloc *dma)
9ccd8c1f 2506{
9c80d176
SZ
2507 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag,
2508 EM_DBA_ALIGN, size, BUS_DMA_WAITOK,
2509 &dma->dma_tag, &dma->dma_map,
2510 &dma->dma_paddr);
2511 if (dma->dma_vaddr == NULL)
2512 return ENOMEM;
2513 else
2514 return 0;
9ccd8c1f
JS
2515}
2516
2517static void
2518em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2519{
9c80d176
SZ
2520 if (dma->dma_tag == NULL)
2521 return;
2522 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2523 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2524 bus_dma_tag_destroy(dma->dma_tag);
984263bc
MD
2525}
2526
984263bc 2527static int
9c80d176 2528em_create_tx_ring(struct adapter *adapter)
984263bc 2529{
9c80d176 2530 device_t dev = adapter->dev;
1eca7b82 2531 struct em_buffer *tx_buffer;
1eca7b82
SZ
2532 int error, i;
2533
87307ba1
SZ
2534 adapter->tx_buffer_area =
2535 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2536 M_DEVBUF, M_WAITOK | M_ZERO);
984263bc 2537
9c80d176
SZ
2538 /*
2539 * Create DMA tags for tx buffers
2540 */
2541 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
2542 1, 0, /* alignment, bounds */
2543 BUS_SPACE_MAXADDR, /* lowaddr */
2544 BUS_SPACE_MAXADDR, /* highaddr */
2545 NULL, NULL, /* filter, filterarg */
2546 EM_TSO_SIZE, /* maxsize */
2547 EM_MAX_SCATTER, /* nsegments */
2548 EM_MAX_SEGSIZE, /* maxsegsize */
2549 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2550 BUS_DMA_ONEBPAGE, /* flags */
2551 &adapter->txtag);
2552 if (error) {
2553 device_printf(dev, "Unable to allocate TX DMA tag\n");
2554 kfree(adapter->tx_buffer_area, M_DEVBUF);
2555 adapter->tx_buffer_area = NULL;
2556 return error;
2557 }
2558
2559 /*
2560 * Create DMA maps for tx buffers
2561 */
1eca7b82 2562 for (i = 0; i < adapter->num_tx_desc; i++) {
9c80d176
SZ
2563 tx_buffer = &adapter->tx_buffer_area[i];
2564
2565 error = bus_dmamap_create(adapter->txtag,
2566 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2567 &tx_buffer->map);
1eca7b82 2568 if (error) {
9c80d176
SZ
2569 device_printf(dev, "Unable to create TX DMA map\n");
2570 em_destroy_tx_ring(adapter, i);
2571 return error;
1eca7b82 2572 }
1eca7b82 2573 }
9c80d176
SZ
2574 return (0);
2575}
9ccd8c1f 2576
9c80d176
SZ
2577static void
2578em_init_tx_ring(struct adapter *adapter)
2579{
2580 /* Clear the old ring contents */
2581 bzero(adapter->tx_desc_base,
2582 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2583
2584 /* Reset state */
87307ba1
SZ
2585 adapter->next_avail_tx_desc = 0;
2586 adapter->next_tx_to_clean = 0;
984263bc 2587 adapter->num_tx_desc_avail = adapter->num_tx_desc;
984263bc
MD
2588}
2589
984263bc 2590static void
9c80d176 2591em_init_tx_unit(struct adapter *adapter)
984263bc 2592{
9c80d176 2593 uint32_t tctl, tarc, tipg = 0;
9ccd8c1f
JS
2594 uint64_t bus_addr;
2595
984263bc 2596 /* Setup the Base and Length of the Tx Descriptor Ring */
9ccd8c1f 2597 bus_addr = adapter->txdma.dma_paddr;
9c80d176
SZ
2598 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2599 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2600 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2601 (uint32_t)(bus_addr >> 32));
2602 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2603 (uint32_t)bus_addr);
984263bc 2604 /* Setup the HW Tx Head and Tail descriptor pointers */
9c80d176
SZ
2605 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2606 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
984263bc 2607
984263bc 2608 /* Set the default values for the Tx Inter Packet Gap timer */
9c80d176
SZ
2609 switch (adapter->hw.mac.type) {
2610 case e1000_82542:
2611 tipg = DEFAULT_82542_TIPG_IPGT;
2612 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2613 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
984263bc 2614 break;
9c80d176
SZ
2615
2616 case e1000_80003es2lan:
2617 tipg = DEFAULT_82543_TIPG_IPGR1;
2618 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2619 E1000_TIPG_IPGR2_SHIFT;
1eca7b82 2620 break;
9c80d176 2621
984263bc 2622 default:
9c80d176
SZ
2623 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2624 adapter->hw.phy.media_type ==
2625 e1000_media_type_internal_serdes)
2626 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
984263bc 2627 else
9c80d176
SZ
2628 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2629 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2630 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2631 break;
2632 }
2633
2634 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
91e8debf
SZ
2635
2636 /* NOTE: 0 is not allowed for TIDV */
2637 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1);
2638 if(adapter->hw.mac.type >= e1000_82540)
2639 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0);
984263bc 2640
9c80d176
SZ
2641 if (adapter->hw.mac.type == e1000_82571 ||
2642 adapter->hw.mac.type == e1000_82572) {
2643 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2644 tarc |= SPEED_MODE_BIT;
2645 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2646 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
2647 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2648 tarc |= 1;
2649 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2650 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
2651 tarc |= 1;
2652 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
1eca7b82
SZ
2653 }
2654
984263bc 2655 /* Program the Transmit Control Register */
9c80d176
SZ
2656 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2657 tctl &= ~E1000_TCTL_CT;
2658 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2659 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2660
2661 if (adapter->hw.mac.type >= e1000_82571)
2662 tctl |= E1000_TCTL_MULR;
1eca7b82 2663
87307ba1 2664 /* This write will effectively turn on the transmit unit. */
9c80d176 2665 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
2666}
2667
984263bc 2668static void
9c80d176 2669em_destroy_tx_ring(struct adapter *adapter, int ndesc)
984263bc 2670{
f647ad3d
JS
2671 struct em_buffer *tx_buffer;
2672 int i;
984263bc 2673
9c80d176
SZ
2674 if (adapter->tx_buffer_area == NULL)
2675 return;
984263bc 2676
9c80d176
SZ
2677 for (i = 0; i < ndesc; i++) {
2678 tx_buffer = &adapter->tx_buffer_area[i];
1eca7b82 2679
9c80d176
SZ
2680 KKASSERT(tx_buffer->m_head == NULL);
2681 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
9ccd8c1f 2682 }
9c80d176
SZ
2683 bus_dma_tag_destroy(adapter->txtag);
2684
2685 kfree(adapter->tx_buffer_area, M_DEVBUF);
2686 adapter->tx_buffer_area = NULL;
984263bc
MD
2687}
2688
9c80d176
SZ
2689/*
2690 * The offload context needs to be set when we transfer the first
2691 * packet of a particular protocol (TCP/UDP). This routine has been
002b3a05 2692 * enhanced to deal with inserted VLAN headers.
51e6819f
SZ
2693 *
2694 * If the new packet's ether header length, ip header length and
2695 * csum offloading type are same as the previous packet, we should
2696 * avoid allocating a new csum context descriptor; mainly to take
2697 * advantage of the pipeline effect of the TX data read request.
9f60d74b
SZ
2698 *
2699 * This function returns number of TX descrptors allocated for
2700 * csum context.
9c80d176 2701 */
9f60d74b 2702static int
9c80d176
SZ
2703em_txcsum(struct adapter *adapter, struct mbuf *mp,
2704 uint32_t *txd_upper, uint32_t *txd_lower)
984263bc 2705{
9c80d176 2706 struct e1000_context_desc *TXD;
51e6819f 2707 int curr_txd, ehdrlen, csum_flags;
9c80d176 2708 uint32_t cmd, hdr_len, ip_hlen;
984263bc 2709
51e6819f 2710 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES;
ed4fc0fe
SZ
2711 ip_hlen = mp->m_pkthdr.csum_iphlen;
2712 ehdrlen = mp->m_pkthdr.csum_lhlen;
51e6819f 2713
ed4fc0fe 2714 if (adapter->csum_lhlen == ehdrlen &&
51e6819f
SZ
2715 adapter->csum_iphlen == ip_hlen &&
2716 adapter->csum_flags == csum_flags) {
2717 /*
2718 * Same csum offload context as the previous packets;
2719 * just return.
2720 */
2721 *txd_upper = adapter->csum_txd_upper;
2722 *txd_lower = adapter->csum_txd_lower;
9f60d74b 2723 return 0;
984263bc
MD
2724 }
2725
51e6819f
SZ
2726 /*
2727 * Setup a new csum offload context.
2728 */
2729
2730 curr_txd = adapter->next_avail_tx_desc;
51e6819f
SZ
2731 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd];
2732
2733 cmd = 0;
2734
2735 /* Setup of IP header checksum. */
2736 if (csum_flags & CSUM_IP) {
2737 /*
2738 * Start offset for header checksum calculation.
2739 * End offset for header checksum calculation.
2740 * Offset of place to put the checksum.
2741 */
2742 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2743 TXD->lower_setup.ip_fields.ipcse =
2744 htole16(ehdrlen + ip_hlen - 1);
2745 TXD->lower_setup.ip_fields.ipcso =
2746 ehdrlen + offsetof(struct ip, ip_sum);
2747 cmd |= E1000_TXD_CMD_IP;
2748 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2749 }
2750 hdr_len = ehdrlen + ip_hlen;
2751
2752 if (csum_flags & CSUM_TCP) {
002b3a05
SZ
2753 /*
2754 * Start offset for payload checksum calculation.
2755 * End offset for payload checksum calculation.
2756 * Offset of place to put the checksum.
2757 */
2758 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2759 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2760 TXD->upper_setup.tcp_fields.tucso =
2761 hdr_len + offsetof(struct tcphdr, th_sum);
2762 cmd |= E1000_TXD_CMD_TCP;
2763 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
51e6819f 2764 } else if (csum_flags & CSUM_UDP) {
002b3a05
SZ
2765 /*
2766 * Start offset for header checksum calculation.
2767 * End offset for header checksum calculation.
2768 * Offset of place to put the checksum.
2769 */
2770 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2771 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2772 TXD->upper_setup.tcp_fields.tucso =
2773 hdr_len + offsetof(struct udphdr, uh_sum);
2774 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
9c80d176
SZ
2775 }
2776
2777 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2778 E1000_TXD_DTYP_D; /* Data descr */
51e6819f
SZ
2779
2780 /* Save the information for this csum offloading context */
ed4fc0fe 2781 adapter->csum_lhlen = ehdrlen;
51e6819f
SZ
2782 adapter->csum_iphlen = ip_hlen;
2783 adapter->csum_flags = csum_flags;
2784 adapter->csum_txd_upper = *txd_upper;
2785 adapter->csum_txd_lower = *txd_lower;
2786
9c80d176
SZ
2787 TXD->tcp_seg_setup.data = htole32(0);
2788 TXD->cmd_and_length =
2af74b85 2789 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
984263bc
MD
2790
2791 if (++curr_txd == adapter->num_tx_desc)
2792 curr_txd = 0;
2793
9c80d176 2794 KKASSERT(adapter->num_tx_desc_avail > 0);
984263bc 2795 adapter->num_tx_desc_avail--;
9c80d176 2796
984263bc 2797 adapter->next_avail_tx_desc = curr_txd;
9f60d74b 2798 return 1;
984263bc
MD
2799}
2800
984263bc 2801static void
87307ba1 2802em_txeof(struct adapter *adapter)
984263bc 2803{
9c80d176 2804 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2805 struct em_buffer *tx_buffer;
2806 int first, num_avail;
2807
2808 if (adapter->tx_dd_head == adapter->tx_dd_tail)
2809 return;
984263bc 2810
f647ad3d
JS
2811 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2812 return;
984263bc 2813
9c80d176 2814 num_avail = adapter->num_tx_desc_avail;
87307ba1 2815 first = adapter->next_tx_to_clean;
9c80d176 2816
9f60d74b 2817 while (adapter->tx_dd_head != adapter->tx_dd_tail) {
4e499730 2818 struct e1000_tx_desc *tx_desc;
9f60d74b 2819 int dd_idx = adapter->tx_dd[adapter->tx_dd_head];
984263bc 2820
9f60d74b 2821 tx_desc = &adapter->tx_desc_base[dd_idx];
9f60d74b
SZ
2822 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2823 EM_INC_TXDD_IDX(adapter->tx_dd_head);
984263bc 2824
9f60d74b
SZ
2825 if (++dd_idx == adapter->num_tx_desc)
2826 dd_idx = 0;
9c80d176 2827
9f60d74b 2828 while (first != dd_idx) {
edbfa193
SZ
2829 logif(pkt_txclean);
2830
9f60d74b
SZ
2831 num_avail++;
2832
4e499730 2833 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
2834 if (tx_buffer->m_head) {
2835 ifp->if_opackets++;
2836 bus_dmamap_unload(adapter->txtag,
2837 tx_buffer->map);
2838 m_freem(tx_buffer->m_head);
2839 tx_buffer->m_head = NULL;
2840 }
2841
2842 if (++first == adapter->num_tx_desc)
2843 first = 0;
2844 }
87307ba1
SZ
2845 } else {
2846 break;
2847 }
f647ad3d 2848 }
9f60d74b
SZ
2849 adapter->next_tx_to_clean = first;
2850 adapter->num_tx_desc_avail = num_avail;
2851
2852 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2853 adapter->tx_dd_head = 0;
2854 adapter->tx_dd_tail = 0;
2855 }
2856
2857 if (!EM_IS_OACTIVE(adapter)) {
2858 ifp->if_flags &= ~IFF_OACTIVE;
2859
2860 /* All clean, turn off the timer */
2861 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2862 ifp->if_timer = 0;
2863 }
2864}
2865
2866static void
2867em_tx_collect(struct adapter *adapter)
2868{
2869 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2870 struct em_buffer *tx_buffer;
2871 int tdh, first, num_avail, dd_idx = -1;
2872
2873 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2874 return;
2875
2876 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
2877 if (tdh == adapter->next_tx_to_clean)
2878 return;
2879
2880 if (adapter->tx_dd_head != adapter->tx_dd_tail)
2881 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2882
2883 num_avail = adapter->num_tx_desc_avail;
2884 first = adapter->next_tx_to_clean;
2885
2886 while (first != tdh) {
edbfa193
SZ
2887 logif(pkt_txclean);
2888
9f60d74b
SZ
2889 num_avail++;
2890
4e499730 2891 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
2892 if (tx_buffer->m_head) {
2893 ifp->if_opackets++;
2894 bus_dmamap_unload(adapter->txtag,
2895 tx_buffer->map);
2896 m_freem(tx_buffer->m_head);
2897 tx_buffer->m_head = NULL;
2898 }
2899
2900 if (first == dd_idx) {
2901 EM_INC_TXDD_IDX(adapter->tx_dd_head);
2902 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2903 adapter->tx_dd_head = 0;
2904 adapter->tx_dd_tail = 0;
2905 dd_idx = -1;
2906 } else {
2907 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2908 }
2909 }
2910
2911 if (++first == adapter->num_tx_desc)
2912 first = 0;
2913 }
2914 adapter->next_tx_to_clean = first;
9c80d176 2915 adapter->num_tx_desc_avail = num_avail;
984263bc 2916
9f60d74b 2917 if (!EM_IS_OACTIVE(adapter)) {
9c80d176 2918 ifp->if_flags &= ~IFF_OACTIVE;
afa68aa1 2919
9c80d176
SZ
2920 /* All clean, turn off the timer */
2921 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2922 ifp->if_timer = 0;
2923 }
2924}
984263bc 2925
9c80d176
SZ
2926/*
2927 * When Link is lost sometimes there is work still in the TX ring
2928 * which will result in a watchdog, rather than allow that do an
2929 * attempted cleanup and then reinit here. Note that this has been
2930 * seens mostly with fiber adapters.
2931 */
2932static void
2933em_tx_purge(struct adapter *adapter)
2934{
2935 struct ifnet *ifp = &adapter->arpcom.ac_if;
2936
2937 if (!adapter->link_active && ifp->if_timer) {
9f60d74b 2938 em_tx_collect(adapter);
9c80d176
SZ
2939 if (ifp->if_timer) {
2940 if_printf(ifp, "Link lost, TX pending, reinit\n");
f647ad3d 2941 ifp->if_timer = 0;
9c80d176
SZ
2942 em_init(adapter);
2943 }
f647ad3d 2944 }
984263bc
MD
2945}
2946
984263bc 2947static int
9c80d176 2948em_newbuf(struct adapter *adapter, int i, int init)
984263bc 2949{
9c80d176
SZ
2950 struct mbuf *m;
2951 bus_dma_segment_t seg;
2952 bus_dmamap_t map;
9ccd8c1f 2953 struct em_buffer *rx_buffer;
9c80d176
SZ
2954 int error, nseg;
2955
2956 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2957 if (m == NULL) {
2958 adapter->mbuf_cluster_failed++;
2959 if (init) {
2960 if_printf(&adapter->arpcom.ac_if,
2961 "Unable to allocate RX mbuf\n");
984263bc 2962 }
9c80d176 2963 return (ENOBUFS);
984263bc 2964 }
9c80d176 2965 m->m_len = m->m_pkthdr.len = MCLBYTES;
87307ba1 2966
9c80d176
SZ
2967 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN)
2968 m_adj(m, ETHER_ALIGN);
9ccd8c1f 2969
9c80d176
SZ
2970 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
2971 adapter->rx_sparemap, m,
2972 &seg, 1, &nseg, BUS_DMA_NOWAIT);
9ccd8c1f 2973 if (error) {
9c80d176
SZ
2974 m_freem(m);
2975 if (init) {
2976 if_printf(&adapter->arpcom.ac_if,
2977 "Unable to load RX mbuf\n");
2978 }
87307ba1 2979 return (error);
9ccd8c1f 2980 }
984263bc 2981
9c80d176
SZ
2982 rx_buffer = &adapter->rx_buffer_area[i];
2983 if (rx_buffer->m_head != NULL)
2984 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2985
2986 map = rx_buffer->map;
2987 rx_buffer->map = adapter->rx_sparemap;
2988 adapter->rx_sparemap = map;
2989
2990 rx_buffer->m_head = m;
2991
2992 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr);
87307ba1 2993 return (0);
984263bc
MD
2994}
2995
984263bc 2996static int
9c80d176 2997em_create_rx_ring(struct adapter *adapter)
984263bc 2998{
9c80d176 2999 device_t dev = adapter->dev;
9ccd8c1f 3000 struct em_buffer *rx_buffer;
9c80d176
SZ
3001 int i, error;
3002
3003 adapter->rx_buffer_area =
3004 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
3005 M_DEVBUF, M_WAITOK | M_ZERO);
9ccd8c1f 3006
9c80d176
SZ
3007 /*
3008 * Create DMA tag for rx buffers
3009 */
3010 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
3011 1, 0, /* alignment, bounds */
3012 BUS_SPACE_MAXADDR, /* lowaddr */
3013 BUS_SPACE_MAXADDR, /* highaddr */
3014 NULL, NULL, /* filter, filterarg */
3015 MCLBYTES, /* maxsize */
3016 1, /* nsegments */
3017 MCLBYTES, /* maxsegsize */
3018 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
3019 &adapter->rxtag);
87307ba1 3020 if (error) {
9c80d176
SZ
3021 device_printf(dev, "Unable to allocate RX DMA tag\n");
3022 kfree(adapter->rx_buffer_area, M_DEVBUF);
3023 adapter->rx_buffer_area = NULL;
3024 return error;
3025 }
3026
3027 /*
3028 * Create spare DMA map for rx buffers
3029 */
3030 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
3031 &adapter->rx_sparemap);
3032 if (error) {
3033 device_printf(dev, "Unable to create spare RX DMA map\n");
3034 bus_dma_tag_destroy(adapter->rxtag);
3035 kfree(adapter->rx_buffer_area, M_DEVBUF);
3036 adapter->rx_buffer_area = NULL;
3037 return error;
9ccd8c1f 3038 }
9c80d176
SZ
3039
3040 /*
3041 * Create DMA maps for rx buffers
3042 */
3043 for (i = 0; i < adapter->num_rx_desc; i++) {
3044 rx_buffer = &adapter->rx_buffer_area[i];
3045
3046 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
9ccd8c1f 3047 &rx_buffer->map);
87307ba1 3048 if (error) {
9c80d176
SZ
3049 device_printf(dev, "Unable to create RX DMA map\n");
3050 em_destroy_rx_ring(adapter, i);
3051 return error;
9ccd8c1f 3052 }
984263bc 3053 }
87307ba1 3054 return (0);
984263bc
MD
3055}
3056
984263bc 3057static int
9c80d176 3058em_init_rx_ring(struct adapter *adapter)
984263bc 3059{
9c80d176 3060 int i, error;
984263bc 3061
9c80d176 3062 /* Reset descriptor ring */
87307ba1 3063 bzero(adapter->rx_desc_base,
9c80d176 3064 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
87307ba1 3065
9c80d176
SZ
3066 /* Allocate new ones. */
3067 for (i = 0; i < adapter->num_rx_desc; i++) {
3068 error = em_newbuf(adapter, i, 1);
3069 if (error)
3070 return (error);
3071 }
984263bc
MD
3072
3073 /* Setup our descriptor pointers */
f647ad3d 3074 adapter->next_rx_desc_to_check = 0;
87307ba1
SZ
3075
3076 return (0);
984263bc
MD
3077}
3078
984263bc 3079static void
9c80d176 3080em_init_rx_unit(struct adapter *adapter)
984263bc 3081{
9c80d176 3082 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 3083 uint64_t bus_addr;
2d0e5700 3084 uint32_t rctl;
984263bc 3085
87307ba1
SZ
3086 /*
3087 * Make sure receives are disabled while setting
3088 * up the descriptor ring
3089 */
9c80d176
SZ
3090 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3091 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
984263bc 3092
9c80d176 3093 if (adapter->hw.mac.type >= e1000_82540) {
2d0e5700
SZ
3094 uint32_t itr;
3095
9c80d176
SZ
3096 /*
3097 * Set the interrupt throttling rate. Value is calculated
3098 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3099 */
2d0e5700
SZ
3100 if (adapter->int_throttle_ceil)
3101 itr = 1000000000 / 256 / adapter->int_throttle_ceil;
3102 else
3103 itr = 0;
3104 em_set_itr(adapter, itr);
f647ad3d 3105 }
984263bc 3106
9c80d176
SZ
3107 /* Disable accelerated ackknowledge */
3108 if (adapter->hw.mac.type == e1000_82574) {
3109 E1000_WRITE_REG(&adapter->hw,
3110 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3111 }
3112
2d0e5700
SZ
3113 /* Receive Checksum Offload for TCP and UDP */
3114 if (ifp->if_capenable & IFCAP_RXCSUM) {
3115 uint32_t rxcsum;
3116
3117 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3118 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3119 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3120 }
3121
3122 /*
3123 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3124 * long latencies are observed, like Lenovo X60. This
3125 * change eliminates the problem, but since having positive
3126 * values in RDTR is a known source of problems on other
3127 * platforms another solution is being sought.
3128 */
3129 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) {
3130 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573);
3131 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573);
3132 }
3133
3134 /*
3135 * Setup the Base and Length of the Rx Descriptor Ring
3136 */
9ccd8c1f 3137 bus_addr = adapter->rxdma.dma_paddr;
9c80d176
SZ
3138 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3139 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3140 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3141 (uint32_t)(bus_addr >> 32));
3142 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3143 (uint32_t)bus_addr);
984263bc 3144
2d0e5700
SZ
3145 /*
3146 * Setup the HW Rx Head and Tail Descriptor Pointers
3147 */
3148 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3149 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3150
3151 /* Set early receive threshold on appropriate hw */
3152 if (((adapter->hw.mac.type == e1000_ich9lan) ||
3153 (adapter->hw.mac.type == e1000_pch2lan) ||
3154 (adapter->hw.mac.type == e1000_ich10lan)) &&
3155 (ifp->if_mtu > ETHERMTU)) {
3156 uint32_t rxdctl;
3157
3158 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0));
3159 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl | 3);
3160 E1000_WRITE_REG(&adapter->hw, E1000_ERT, 0x100 | (1 << 13));
3161 }
3162
3163 if (adapter->hw.mac.type == e1000_pch2lan) {
3164 if (ifp->if_mtu > ETHERMTU)
3165 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE);
3166 else
3167 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE);
3168 }
3169
984263bc 3170 /* Setup the Receive Control Register */
9c80d176
SZ
3171 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3172 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3173 E1000_RCTL_RDMTS_HALF |
3174 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
984263bc 3175
9c80d176
SZ
3176 /* Make sure VLAN Filters are off */
3177 rctl &= ~E1000_RCTL_VFE;
3178
3179 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3180 rctl |= E1000_RCTL_SBP;
3181 else
3182 rctl &= ~E1000_RCTL_SBP;
984263bc 3183
984263bc
MD
3184 switch (adapter->rx_buffer_len) {
3185 default:
9c80d176
SZ
3186 case 2048:
3187 rctl |= E1000_RCTL_SZ_2048;
3188 break;
3189
3190 case 4096:
3191 rctl |= E1000_RCTL_SZ_4096 |
3192 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3193 break;
9c80d176
SZ
3194
3195 case 8192:
3196 rctl |= E1000_RCTL_SZ_8192 |
3197 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3198 break;
9c80d176
SZ
3199
3200 case 16384:
3201 rctl |= E1000_RCTL_SZ_16384 |
3202 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc
MD
3203 break;
3204 }
3205
3206 if (ifp->if_mtu > ETHERMTU)
9c80d176
SZ
3207 rctl |= E1000_RCTL_LPE;
3208 else
3209 rctl &= ~E1000_RCTL_LPE;
984263bc 3210
6b96e920
SZ
3211 /* Enable Receives */
3212 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
984263bc
MD
3213}
3214
984263bc 3215static void
9c80d176 3216em_destroy_rx_ring(struct adapter *adapter, int ndesc)
984263bc 3217{
f647ad3d
JS
3218 struct em_buffer *rx_buffer;
3219 int i;
984263bc 3220
9c80d176
SZ
3221 if (adapter->rx_buffer_area == NULL)
3222 return;
984263bc 3223
9c80d176
SZ
3224 for (i = 0; i < ndesc; i++) {
3225 rx_buffer = &adapter->rx_buffer_area[i];
3226
3227 KKASSERT(rx_buffer->m_head == NULL);
3228 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map);
9ccd8c1f 3229 }
9c80d176
SZ
3230 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
3231 bus_dma_tag_destroy(adapter->rxtag);
3232
3233 kfree(adapter->rx_buffer_area, M_DEVBUF);
3234 adapter->rx_buffer_area = NULL;
984263bc
MD
3235}
3236
984263bc 3237static void
87307ba1 3238em_rxeof(struct adapter *adapter, int count)
984263bc 3239{
9c80d176
SZ
3240 struct ifnet *ifp = &adapter->arpcom.ac_if;
3241 uint8_t status, accept_frame = 0, eop = 0;
f647ad3d 3242 uint16_t len, desc_len, prev_len_adj;
9c80d176
SZ
3243 struct e1000_rx_desc *current_desc;
3244 struct mbuf *mp;