em: Add TSO support for certain PCIe chips
[dragonfly.git] / sys / dev / netif / em / if_em.c
CommitLineData
78195a76 1/*
78195a76
MD
2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved.
3 *
9c80d176 4 * Copyright (c) 2001-2008, Intel Corporation
78195a76
MD
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9c80d176 9 *
78195a76
MD
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
9c80d176 12 *
78195a76
MD
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
9c80d176 16 *
78195a76
MD
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
9c80d176 20 *
78195a76
MD
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 *
34 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
9c80d176 35 *
78195a76
MD
36 * This code is derived from software contributed to The DragonFly Project
37 * by Matthew Dillon <dillon@backplane.com>
9c80d176 38 *
78195a76
MD
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
9c80d176 42 *
78195a76
MD
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * 3. Neither the name of The DragonFly Project nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific, prior written permission.
9c80d176 52 *
78195a76
MD
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
9c80d176 65 *
78195a76
MD
66 */
67/*
68 * SERIALIZATION API RULES:
69 *
70 * - If the driver uses the same serializer for the interrupt as for the
71 * ifnet, most of the serialization will be done automatically for the
9c80d176 72 * driver.
78195a76
MD
73 *
74 * - ifmedia entry points will be serialized by the ifmedia code using the
75 * ifnet serializer.
76 *
77 * - if_* entry points except for if_input will be serialized by the IF
78 * and protocol layers.
79 *
80 * - The device driver must be sure to serialize access from timeout code
81 * installed by the device driver.
82 *
83 * - The device driver typically holds the serializer at the time it wishes
9c80d176
SZ
84 * to call if_input.
85 *
86 * - We must call lwkt_serialize_handler_enable() prior to enabling the
87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling
88 * the hardware interrupt in order to avoid handler execution races from
89 * scheduled interrupt threads.
78195a76
MD
90 *
91 * NOTE! Since callers into the device driver hold the ifnet serializer,
92 * the device driver may be holding a serializer at the time it calls
93 * if_input even if it is not serializer-aware.
94 */
2b71c8f1
SZ
95
96#include "opt_polling.h"
87307ba1
SZ
97
98#include <sys/param.h>
99#include <sys/bus.h>
100#include <sys/endian.h>
9db4b353 101#include <sys/interrupt.h>
87307ba1
SZ
102#include <sys/kernel.h>
103#include <sys/ktr.h>
104#include <sys/malloc.h>
105#include <sys/mbuf.h>
9c80d176 106#include <sys/proc.h>
87307ba1
SZ
107#include <sys/rman.h>
108#include <sys/serialize.h>
109#include <sys/socket.h>
110#include <sys/sockio.h>
111#include <sys/sysctl.h>
9c80d176 112#include <sys/systm.h>
87307ba1
SZ
113
114#include <net/bpf.h>
115#include <net/ethernet.h>
116#include <net/if.h>
117#include <net/if_arp.h>
118#include <net/if_dl.h>
119#include <net/if_media.h>
87307ba1
SZ
120#include <net/ifq_var.h>
121#include <net/vlan/if_vlan_var.h>
b637f170 122#include <net/vlan/if_vlan_ether.h>
87307ba1 123
87307ba1
SZ
124#include <netinet/ip.h>
125#include <netinet/tcp.h>
126#include <netinet/udp.h>
984263bc 127
9c80d176
SZ
128#include <bus/pci/pcivar.h>
129#include <bus/pci/pcireg.h>
984263bc 130
9c80d176
SZ
131#include <dev/netif/ig_hal/e1000_api.h>
132#include <dev/netif/ig_hal/e1000_82571.h>
133#include <dev/netif/em/if_em.h>
984263bc 134
9c80d176 135#define EM_NAME "Intel(R) PRO/1000 Network Connection "
6d5e2922 136#define EM_VER " 7.2.4"
9c80d176 137
96ced48a
SZ
138#define _EM_DEVICE(id, ret) \
139 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER }
140#define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100)
141#define EM_DEVICE(id) _EM_DEVICE(id, 0)
142#define EM_DEVICE_NULL { 0, 0, 0, NULL }
9c80d176
SZ
143
144static const struct em_vendor_info em_vendor_info_array[] = {
145 EM_DEVICE(82540EM),
146 EM_DEVICE(82540EM_LOM),
147 EM_DEVICE(82540EP),
148 EM_DEVICE(82540EP_LOM),
149 EM_DEVICE(82540EP_LP),
150
151 EM_DEVICE(82541EI),
152 EM_DEVICE(82541ER),
153 EM_DEVICE(82541ER_LOM),
154 EM_DEVICE(82541EI_MOBILE),
155 EM_DEVICE(82541GI),
156 EM_DEVICE(82541GI_LF),
157 EM_DEVICE(82541GI_MOBILE),
158
159 EM_DEVICE(82542),
160
161 EM_DEVICE(82543GC_FIBER),
162 EM_DEVICE(82543GC_COPPER),
163
164 EM_DEVICE(82544EI_COPPER),
165 EM_DEVICE(82544EI_FIBER),
166 EM_DEVICE(82544GC_COPPER),
167 EM_DEVICE(82544GC_LOM),
168
169 EM_DEVICE(82545EM_COPPER),
170 EM_DEVICE(82545EM_FIBER),
171 EM_DEVICE(82545GM_COPPER),
172 EM_DEVICE(82545GM_FIBER),
173 EM_DEVICE(82545GM_SERDES),
174
175 EM_DEVICE(82546EB_COPPER),
176 EM_DEVICE(82546EB_FIBER),
177 EM_DEVICE(82546EB_QUAD_COPPER),
178 EM_DEVICE(82546GB_COPPER),
179 EM_DEVICE(82546GB_FIBER),
180 EM_DEVICE(82546GB_SERDES),
181 EM_DEVICE(82546GB_PCIE),
182 EM_DEVICE(82546GB_QUAD_COPPER),
183 EM_DEVICE(82546GB_QUAD_COPPER_KSP3),
184
185 EM_DEVICE(82547EI),
186 EM_DEVICE(82547EI_MOBILE),
187 EM_DEVICE(82547GI),
188
96ced48a
SZ
189 EM_EMX_DEVICE(82571EB_COPPER),
190 EM_EMX_DEVICE(82571EB_FIBER),
191 EM_EMX_DEVICE(82571EB_SERDES),
192 EM_EMX_DEVICE(82571EB_SERDES_DUAL),
193 EM_EMX_DEVICE(82571EB_SERDES_QUAD),
194 EM_EMX_DEVICE(82571EB_QUAD_COPPER),
75a5634e 195 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP),
96ced48a
SZ
196 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP),
197 EM_EMX_DEVICE(82571EB_QUAD_FIBER),
198 EM_EMX_DEVICE(82571PT_QUAD_COPPER),
199
200 EM_EMX_DEVICE(82572EI_COPPER),
201 EM_EMX_DEVICE(82572EI_FIBER),
202 EM_EMX_DEVICE(82572EI_SERDES),
203 EM_EMX_DEVICE(82572EI),
204
205 EM_EMX_DEVICE(82573E),
206 EM_EMX_DEVICE(82573E_IAMT),
207 EM_EMX_DEVICE(82573L),
208
2d0e5700
SZ
209 EM_DEVICE(82583V),
210
96ced48a
SZ
211 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT),
212 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT),
213 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT),
214 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT),
9c80d176
SZ
215
216 EM_DEVICE(ICH8_IGP_M_AMT),
217 EM_DEVICE(ICH8_IGP_AMT),
218 EM_DEVICE(ICH8_IGP_C),
219 EM_DEVICE(ICH8_IFE),
220 EM_DEVICE(ICH8_IFE_GT),
221 EM_DEVICE(ICH8_IFE_G),
222 EM_DEVICE(ICH8_IGP_M),
2d0e5700 223 EM_DEVICE(ICH8_82567V_3),
9c80d176
SZ
224
225 EM_DEVICE(ICH9_IGP_M_AMT),
226 EM_DEVICE(ICH9_IGP_AMT),
227 EM_DEVICE(ICH9_IGP_C),
228 EM_DEVICE(ICH9_IGP_M),
229 EM_DEVICE(ICH9_IGP_M_V),
230 EM_DEVICE(ICH9_IFE),
231 EM_DEVICE(ICH9_IFE_GT),
232 EM_DEVICE(ICH9_IFE_G),
233 EM_DEVICE(ICH9_BM),
234
96ced48a 235 EM_EMX_DEVICE(82574L),
2d0e5700 236 EM_EMX_DEVICE(82574LA),
9c80d176
SZ
237
238 EM_DEVICE(ICH10_R_BM_LM),
239 EM_DEVICE(ICH10_R_BM_LF),
240 EM_DEVICE(ICH10_R_BM_V),
241 EM_DEVICE(ICH10_D_BM_LM),
242 EM_DEVICE(ICH10_D_BM_LF),
2d0e5700
SZ
243 EM_DEVICE(ICH10_D_BM_V),
244
245 EM_DEVICE(PCH_M_HV_LM),
246 EM_DEVICE(PCH_M_HV_LC),
247 EM_DEVICE(PCH_D_HV_DM),
248 EM_DEVICE(PCH_D_HV_DC),
249
250 EM_DEVICE(PCH2_LV_LM),
251 EM_DEVICE(PCH2_LV_V),
984263bc 252
f647ad3d 253 /* required last entry */
9c80d176 254 EM_DEVICE_NULL
984263bc
MD
255};
256
f647ad3d
JS
257static int em_probe(device_t);
258static int em_attach(device_t);
259static int em_detach(device_t);
260static int em_shutdown(device_t);
87307ba1
SZ
261static int em_suspend(device_t);
262static int em_resume(device_t);
9c80d176
SZ
263
264static void em_init(void *);
265static void em_stop(struct adapter *);
f647ad3d 266static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
9c80d176
SZ
267static void em_start(struct ifnet *);
268#ifdef DEVICE_POLLING
269static void em_poll(struct ifnet *, enum poll_cmd, int);
270#endif
f647ad3d 271static void em_watchdog(struct ifnet *);
f647ad3d
JS
272static void em_media_status(struct ifnet *, struct ifmediareq *);
273static int em_media_change(struct ifnet *);
9c80d176
SZ
274static void em_timer(void *);
275
276static void em_intr(void *);
87ab432b
SZ
277static void em_intr_mask(void *);
278static void em_intr_body(struct adapter *, boolean_t);
9c80d176
SZ
279static void em_rxeof(struct adapter *, int);
280static void em_txeof(struct adapter *);
9f60d74b 281static void em_tx_collect(struct adapter *);
9c80d176 282static void em_tx_purge(struct adapter *);
f647ad3d
JS
283static void em_enable_intr(struct adapter *);
284static void em_disable_intr(struct adapter *);
9c80d176
SZ
285
286static int em_dma_malloc(struct adapter *, bus_size_t,
287 struct em_dma_alloc *);
288static void em_dma_free(struct adapter *, struct em_dma_alloc *);
289static void em_init_tx_ring(struct adapter *);
290static int em_init_rx_ring(struct adapter *);
291static int em_create_tx_ring(struct adapter *);
292static int em_create_rx_ring(struct adapter *);
293static void em_destroy_tx_ring(struct adapter *, int);
294static void em_destroy_rx_ring(struct adapter *, int);
295static int em_newbuf(struct adapter *, int, int);
296static int em_encap(struct adapter *, struct mbuf **);
297static void em_rxcsum(struct adapter *, struct e1000_rx_desc *,
298 struct mbuf *);
9f60d74b 299static int em_txcsum(struct adapter *, struct mbuf *,
9c80d176 300 uint32_t *, uint32_t *);
0bbb59f3
SZ
301static int em_tso_pullup(struct adapter *, struct mbuf **);
302static int em_tso_setup(struct adapter *, struct mbuf *,
303 uint32_t *, uint32_t *);
9c80d176
SZ
304
305static int em_get_hw_info(struct adapter *);
306static int em_is_valid_eaddr(const uint8_t *);
307static int em_alloc_pci_res(struct adapter *);
308static void em_free_pci_res(struct adapter *);
2d0e5700 309static int em_reset(struct adapter *);
9c80d176
SZ
310static void em_setup_ifp(struct adapter *);
311static void em_init_tx_unit(struct adapter *);
312static void em_init_rx_unit(struct adapter *);
313static void em_update_stats(struct adapter *);
f647ad3d
JS
314static void em_set_promisc(struct adapter *);
315static void em_disable_promisc(struct adapter *);
316static void em_set_multi(struct adapter *);
87307ba1 317static void em_update_link_status(struct adapter *);
f647ad3d 318static void em_smartspeed(struct adapter *);
2d0e5700 319static void em_set_itr(struct adapter *, uint32_t);
6d5e2922 320static void em_disable_aspm(struct adapter *);
9c80d176
SZ
321
322/* Hardware workarounds */
f647ad3d
JS
323static int em_82547_fifo_workaround(struct adapter *, int);
324static void em_82547_update_fifo_head(struct adapter *, int);
325static int em_82547_tx_fifo_reset(struct adapter *);
1eca7b82
SZ
326static void em_82547_move_tail(void *);
327static void em_82547_move_tail_serialized(struct adapter *);
9c80d176
SZ
328static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY);
329
f647ad3d 330static void em_print_debug_info(struct adapter *);
9c80d176
SZ
331static void em_print_nvm_info(struct adapter *);
332static void em_print_hw_stats(struct adapter *);
333
f647ad3d
JS
334static int em_sysctl_stats(SYSCTL_HANDLER_ARGS);
335static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
d0870c72 336static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS);
9f60d74b 337static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS);
9c80d176 338static void em_add_sysctl(struct adapter *adapter);
984263bc 339
9c80d176
SZ
340/* Management and WOL Support */
341static void em_get_mgmt(struct adapter *);
342static void em_rel_mgmt(struct adapter *);
343static void em_get_hw_control(struct adapter *);
344static void em_rel_hw_control(struct adapter *);
345static void em_enable_wol(device_t);
984263bc
MD
346
347static device_method_t em_methods[] = {
348 /* Device interface */
9c80d176
SZ
349 DEVMETHOD(device_probe, em_probe),
350 DEVMETHOD(device_attach, em_attach),
351 DEVMETHOD(device_detach, em_detach),
352 DEVMETHOD(device_shutdown, em_shutdown),
353 DEVMETHOD(device_suspend, em_suspend),
354 DEVMETHOD(device_resume, em_resume),
355 { 0, 0 }
984263bc
MD
356};
357
358static driver_t em_driver = {
9c80d176
SZ
359 "em",
360 em_methods,
361 sizeof(struct adapter),
984263bc
MD
362};
363
364static devclass_t em_devclass;
32832096
MD
365
366DECLARE_DUMMY_MODULE(if_em);
9c80d176 367MODULE_DEPEND(em, ig_hal, 1, 1, 1);
aa2b9d05 368DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL);
984263bc 369
91e8debf
SZ
370/*
371 * Tunables
372 */
9c80d176
SZ
373static int em_int_throttle_ceil = EM_DEFAULT_ITR;
374static int em_rxd = EM_DEFAULT_RXD;
375static int em_txd = EM_DEFAULT_TXD;
053f3ae6 376static int em_smart_pwr_down = 0;
0d366ee7 377
9c80d176
SZ
378/* Controls whether promiscuous also shows bad packets */
379static int em_debug_sbp = FALSE;
0d366ee7 380
053f3ae6
SZ
381static int em_82573_workaround = 1;
382static int em_msi_enable = 1;
05580856 383
d0870c72 384TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil);
1eca7b82
SZ
385TUNABLE_INT("hw.em.rxd", &em_rxd);
386TUNABLE_INT("hw.em.txd", &em_txd);
387TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down);
9c80d176 388TUNABLE_INT("hw.em.sbp", &em_debug_sbp);
05580856 389TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround);
053f3ae6 390TUNABLE_INT("hw.em.msi.enable", &em_msi_enable);
9c80d176
SZ
391
392/* Global used in WOL setup with multiport cards */
393static int em_global_quad_port_a = 0;
394
395/* Set this to one to display debug statistics */
396static int em_display_debug_stats = 0;
0d366ee7 397
07855a48
MD
398#if !defined(KTR_IF_EM)
399#define KTR_IF_EM KTR_ALL
400#endif
401KTR_INFO_MASTER(if_em);
5bf48697
AE
402KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin");
403KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end");
404KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet");
405KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet");
406KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean");
07855a48
MD
407#define logif(name) KTR_LOG(if_em_ ## name)
408
984263bc
MD
409static int
410em_probe(device_t dev)
411{
9c80d176
SZ
412 const struct em_vendor_info *ent;
413 uint16_t vid, did;
984263bc 414
9c80d176
SZ
415 vid = pci_get_vendor(dev);
416 did = pci_get_device(dev);
984263bc 417
9c80d176
SZ
418 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) {
419 if (vid == ent->vendor_id && did == ent->device_id) {
420 device_set_desc(dev, ent->desc);
dbcd0c9b 421 device_set_async_attach(dev, TRUE);
96ced48a 422 return (ent->ret);
984263bc 423 }
984263bc 424 }
87307ba1 425 return (ENXIO);
984263bc
MD
426}
427
984263bc
MD
428static int
429em_attach(device_t dev)
430{
9c80d176
SZ
431 struct adapter *adapter = device_get_softc(dev);
432 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d
JS
433 int tsize, rsize;
434 int error = 0;
2d0e5700 435 uint16_t eeprom_data, device_id, apme_mask;
87ab432b 436 driver_intr_t *intr_func;
984263bc 437
9c80d176 438 adapter->dev = adapter->osdep.dev = dev;
f647ad3d 439
bf0ecf68
MD
440 callout_init_mp(&adapter->timer);
441 callout_init_mp(&adapter->tx_fifo_timer);
af82d4bb 442
9c80d176
SZ
443 /* Determine hardware and mac info */
444 error = em_get_hw_info(adapter);
445 if (error) {
446 device_printf(dev, "Identify hardware failed\n");
447 goto fail;
f647ad3d
JS
448 }
449
9c80d176
SZ
450 /* Setup PCI resources */
451 error = em_alloc_pci_res(adapter);
452 if (error) {
453 device_printf(dev, "Allocation of PCI resources failed\n");
454 goto fail;
455 }
984263bc 456
9c80d176
SZ
457 /*
458 * For ICH8 and family we need to map the flash memory,
459 * and this must happen after the MAC is identified.
460 */
461 if (adapter->hw.mac.type == e1000_ich8lan ||
2d0e5700 462 adapter->hw.mac.type == e1000_ich9lan ||
9c80d176 463 adapter->hw.mac.type == e1000_ich10lan ||
2d0e5700
SZ
464 adapter->hw.mac.type == e1000_pchlan ||
465 adapter->hw.mac.type == e1000_pch2lan) {
9c80d176
SZ
466 adapter->flash_rid = EM_BAR_FLASH;
467
468 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
469 &adapter->flash_rid, RF_ACTIVE);
470 if (adapter->flash == NULL) {
471 device_printf(dev, "Mapping of Flash failed\n");
472 error = ENXIO;
473 goto fail;
474 }
475 adapter->osdep.flash_bus_space_tag =
476 rman_get_bustag(adapter->flash);
477 adapter->osdep.flash_bus_space_handle =
478 rman_get_bushandle(adapter->flash);
984263bc 479
9c80d176
SZ
480 /*
481 * This is used in the shared code
482 * XXX this goof is actually not used.
483 */
484 adapter->hw.flash_address = (uint8_t *)adapter->flash;
485 }
0d366ee7 486
0bbb59f3
SZ
487 switch (adapter->hw.mac.type) {
488 case e1000_82571:
489 case e1000_82572:
490 /*
491 * Pullup extra 4bytes into the first data segment, see:
492 * 82571/82572 specification update errata #7
493 *
494 * NOTE:
495 * 4bytes instead of 2bytes, which are mentioned in the
496 * errata, are pulled; mainly to keep rest of the data
497 * properly aligned.
498 */
499 adapter->flags |= EM_FLAG_TSO_PULLEX;
500 /* FALL THROUGH */
501
502 case e1000_82573:
503 case e1000_82574:
504 case e1000_80003es2lan:
505 adapter->flags |= EM_FLAG_TSO;
506 break;
507
508 default:
509 break;
510 }
511
9c80d176
SZ
512 /* Do Shared Code initialization */
513 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
514 device_printf(dev, "Setup of Shared code failed\n");
515 error = ENXIO;
516 goto fail;
f647ad3d 517 }
7ea52455 518
9c80d176
SZ
519 e1000_get_bus_info(&adapter->hw);
520
1eca7b82 521 /*
9c80d176 522 * Validate number of transmit and receive descriptors. It
1eca7b82 523 * must not exceed hardware maximum, and must be multiple
9c80d176 524 * of E1000_DBA_ALIGN.
1eca7b82 525 */
9c80d176
SZ
526 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 ||
527 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) ||
528 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) ||
529 em_txd < EM_MIN_TXD) {
1eca7b82 530 device_printf(dev, "Using %d TX descriptors instead of %d!\n",
9c80d176 531 EM_DEFAULT_TXD, em_txd);
1eca7b82
SZ
532 adapter->num_tx_desc = EM_DEFAULT_TXD;
533 } else {
534 adapter->num_tx_desc = em_txd;
535 }
9c80d176
SZ
536 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 ||
537 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) ||
538 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) ||
539 em_rxd < EM_MIN_RXD) {
1eca7b82 540 device_printf(dev, "Using %d RX descriptors instead of %d!\n",
9c80d176 541 EM_DEFAULT_RXD, em_rxd);
1eca7b82
SZ
542 adapter->num_rx_desc = EM_DEFAULT_RXD;
543 } else {
544 adapter->num_rx_desc = em_rxd;
545 }
546
9c80d176
SZ
547 adapter->hw.mac.autoneg = DO_AUTO_NEG;
548 adapter->hw.phy.autoneg_wait_to_complete = FALSE;
549 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
550 adapter->rx_buffer_len = MCLBYTES;
e94c2bf4 551
9c80d176
SZ
552 /*
553 * Interrupt throttle rate
554 */
555 if (em_int_throttle_ceil == 0) {
556 adapter->int_throttle_ceil = 0;
557 } else {
558 int throttle = em_int_throttle_ceil;
f647ad3d 559
9c80d176
SZ
560 if (throttle < 0)
561 throttle = EM_DEFAULT_ITR;
0d366ee7 562
9c80d176
SZ
563 /* Recalculate the tunable value to get the exact frequency. */
564 throttle = 1000000000 / 256 / throttle;
664c7645
SZ
565
566 /* Upper 16bits of ITR is reserved and should be zero */
567 if (throttle & 0xffff0000)
568 throttle = 1000000000 / 256 / EM_DEFAULT_ITR;
569
9c80d176
SZ
570 adapter->int_throttle_ceil = 1000000000 / 256 / throttle;
571 }
984263bc 572
9c80d176
SZ
573 e1000_init_script_state_82541(&adapter->hw, TRUE);
574 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
575
576 /* Copper options */
577 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
578 adapter->hw.phy.mdix = AUTO_ALL_MODES;
579 adapter->hw.phy.disable_polarity_correction = FALSE;
580 adapter->hw.phy.ms_type = EM_MASTER_SLAVE;
581 }
582
583 /* Set the frame limits assuming standard ethernet sized frames. */
584 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN;
585 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN;
984263bc 586
9c80d176
SZ
587 /* This controls when hardware reports transmit completion status. */
588 adapter->hw.mac.report_tx_early = 1;
984263bc 589
87307ba1 590 /*
9c80d176 591 * Create top level busdma tag
984263bc 592 */
9c80d176
SZ
593 error = bus_dma_tag_create(NULL, 1, 0,
594 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
595 NULL, NULL,
596 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT,
597 0, &adapter->parent_dtag);
598 if (error) {
599 device_printf(dev, "could not create top level DMA tag\n");
af82d4bb 600 goto fail;
9c80d176 601 }
af82d4bb 602
9c80d176
SZ
603 /*
604 * Allocate Transmit Descriptor ring
605 */
606 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc),
1eca7b82 607 EM_DBA_ALIGN);
87307ba1
SZ
608 error = em_dma_malloc(adapter, tsize, &adapter->txdma);
609 if (error) {
9c80d176 610 device_printf(dev, "Unable to allocate tx_desc memory\n");
af82d4bb 611 goto fail;
984263bc 612 }
9c80d176 613 adapter->tx_desc_base = adapter->txdma.dma_vaddr;
984263bc 614
9c80d176
SZ
615 /*
616 * Allocate Receive Descriptor ring
617 */
618 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc),
1eca7b82 619 EM_DBA_ALIGN);
87307ba1
SZ
620 error = em_dma_malloc(adapter, rsize, &adapter->rxdma);
621 if (error) {
9ccd8c1f 622 device_printf(dev, "Unable to allocate rx_desc memory\n");
af82d4bb 623 goto fail;
984263bc 624 }
9c80d176
SZ
625 adapter->rx_desc_base = adapter->rxdma.dma_vaddr;
626
2d0e5700
SZ
627 /* Allocate multicast array memory. */
628 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES,
629 M_DEVBUF, M_WAITOK);
630
631 /* Indicate SOL/IDER usage */
632 if (e1000_check_reset_block(&adapter->hw)) {
633 device_printf(dev,
634 "PHY reset is blocked due to SOL/IDER session.\n");
635 }
636
637 /*
638 * Start from a known state, this is important in reading the
639 * nvm and mac from that.
640 */
641 e1000_reset_hw(&adapter->hw);
642
9c80d176
SZ
643 /* Make sure we have a good EEPROM before we read from it */
644 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
645 /*
646 * Some PCI-E parts fail the first check due to
647 * the link being in sleep state, call it again,
648 * if it fails a second time its a real issue.
649 */
650 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
651 device_printf(dev,
652 "The EEPROM Checksum Is Not Valid\n");
653 error = EIO;
654 goto fail;
655 }
656 }
984263bc 657
984263bc 658 /* Copy the permanent MAC address out of the EEPROM */
9c80d176
SZ
659 if (e1000_read_mac_addr(&adapter->hw) < 0) {
660 device_printf(dev, "EEPROM read error while reading MAC"
661 " address\n");
984263bc 662 error = EIO;
af82d4bb 663 goto fail;
984263bc 664 }
9c80d176 665 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) {
87307ba1 666 device_printf(dev, "Invalid MAC address\n");
984263bc 667 error = EIO;
af82d4bb 668 goto fail;
984263bc
MD
669 }
670
9c80d176
SZ
671 /* Allocate transmit descriptors and buffers */
672 error = em_create_tx_ring(adapter);
673 if (error) {
674 device_printf(dev, "Could not setup transmit structures\n");
675 goto fail;
676 }
677
678 /* Allocate receive descriptors and buffers */
679 error = em_create_rx_ring(adapter);
680 if (error) {
681 device_printf(dev, "Could not setup receive structures\n");
682 goto fail;
683 }
684
685 /* Manually turn off all interrupts */
686 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
687
9c80d176 688 /* Determine if we have to control management hardware */
79878f87
SZ
689 if (e1000_enable_mng_pass_thru(&adapter->hw))
690 adapter->flags |= EM_FLAG_HAS_MGMT;
9c80d176
SZ
691
692 /*
693 * Setup Wake-on-Lan
694 */
2d0e5700
SZ
695 apme_mask = EM_EEPROM_APME;
696 eeprom_data = 0;
9c80d176
SZ
697 switch (adapter->hw.mac.type) {
698 case e1000_82542:
699 case e1000_82543:
700 break;
701
2d0e5700
SZ
702 case e1000_82573:
703 case e1000_82583:
79878f87 704 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700
SZ
705 /* FALL THROUGH */
706
9c80d176
SZ
707 case e1000_82546:
708 case e1000_82546_rev_3:
709 case e1000_82571:
2d0e5700 710 case e1000_82572:
9c80d176
SZ
711 case e1000_80003es2lan:
712 if (adapter->hw.bus.func == 1) {
713 e1000_read_nvm(&adapter->hw,
714 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
715 } else {
716 e1000_read_nvm(&adapter->hw,
717 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
718 }
2d0e5700
SZ
719 break;
720
721 case e1000_ich8lan:
722 case e1000_ich9lan:
723 case e1000_ich10lan:
724 case e1000_pchlan:
725 case e1000_pch2lan:
726 apme_mask = E1000_WUC_APME;
79878f87 727 adapter->flags |= EM_FLAG_HAS_AMT;
2d0e5700 728 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
9c80d176
SZ
729 break;
730
731 default:
2d0e5700
SZ
732 e1000_read_nvm(&adapter->hw,
733 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
9c80d176
SZ
734 break;
735 }
2d0e5700
SZ
736 if (eeprom_data & apme_mask)
737 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC;
738
9c80d176
SZ
739 /*
740 * We have the eeprom settings, now apply the special cases
741 * where the eeprom may be wrong or the board won't support
742 * wake on lan on a particular port
743 */
744 device_id = pci_get_device(dev);
745 switch (device_id) {
746 case E1000_DEV_ID_82546GB_PCIE:
747 adapter->wol = 0;
748 break;
749
750 case E1000_DEV_ID_82546EB_FIBER:
751 case E1000_DEV_ID_82546GB_FIBER:
752 case E1000_DEV_ID_82571EB_FIBER:
753 /*
754 * Wake events only supported on port A for dual fiber
755 * regardless of eeprom setting
756 */
757 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
758 E1000_STATUS_FUNC_1)
759 adapter->wol = 0;
760 break;
761
762 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
763 case E1000_DEV_ID_82571EB_QUAD_COPPER:
764 case E1000_DEV_ID_82571EB_QUAD_FIBER:
765 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
766 /* if quad port adapter, disable WoL on all but port A */
767 if (em_global_quad_port_a != 0)
768 adapter->wol = 0;
769 /* Reset for multiple quad port adapters */
770 if (++em_global_quad_port_a == 4)
771 em_global_quad_port_a = 0;
772 break;
773 }
774
775 /* XXX disable wol */
776 adapter->wol = 0;
777
2d0e5700
SZ
778 /* Setup OS specific network interface */
779 em_setup_ifp(adapter);
780
781 /* Add sysctl tree, must after em_setup_ifp() */
782 em_add_sysctl(adapter);
783
784 /* Reset the hardware */
785 error = em_reset(adapter);
786 if (error) {
787 device_printf(dev, "Unable to reset the hardware\n");
788 goto fail;
789 }
790
791 /* Initialize statistics */
792 em_update_stats(adapter);
793
794 adapter->hw.mac.get_link_status = 1;
795 em_update_link_status(adapter);
796
9c80d176
SZ
797 /* Do we need workaround for 82544 PCI-X adapter? */
798 if (adapter->hw.bus.type == e1000_bus_type_pcix &&
799 adapter->hw.mac.type == e1000_82544)
f647ad3d 800 adapter->pcix_82544 = TRUE;
87307ba1 801 else
f647ad3d 802 adapter->pcix_82544 = FALSE;
af82d4bb 803
9c80d176
SZ
804 if (adapter->pcix_82544) {
805 /*
806 * 82544 on PCI-X may split one TX segment
807 * into two TX descs, so we double its number
808 * of spare TX desc here.
809 */
810 adapter->spare_tx_desc = 2 * EM_TX_SPARE;
811 } else {
812 adapter->spare_tx_desc = EM_TX_SPARE;
813 }
0bbb59f3
SZ
814 if (adapter->flags & EM_FLAG_TSO)
815 adapter->spare_tx_desc = EM_TX_SPARE_TSO;
9c80d176 816
9f60d74b
SZ
817 /*
818 * Keep following relationship between spare_tx_desc, oact_tx_desc
819 * and tx_int_nsegs:
820 * (spare_tx_desc + EM_TX_RESERVED) <=
821 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs
822 */
823 adapter->oact_tx_desc = adapter->num_tx_desc / 8;
824 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX)
825 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX;
826 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED)
827 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED;
828
829 adapter->tx_int_nsegs = adapter->num_tx_desc / 16;
830 if (adapter->tx_int_nsegs < adapter->oact_tx_desc)
831 adapter->tx_int_nsegs = adapter->oact_tx_desc;
832
2d0e5700 833 /* Non-AMT based hardware can now take control from firmware */
79878f87
SZ
834 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
835 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571)
2d0e5700
SZ
836 em_get_hw_control(adapter);
837
87ab432b
SZ
838 /*
839 * Missing Interrupt Following ICR read:
840 *
a835687d
SZ
841 * 82571/82572 specification update errata #76
842 * 82573 specification update errata #31
843 * 82574 specification update errata #12
844 * 82583 specification update errata #4
87ab432b
SZ
845 */
846 intr_func = em_intr;
847 if ((adapter->flags & EM_FLAG_SHARED_INTR) &&
848 (adapter->hw.mac.type == e1000_82571 ||
849 adapter->hw.mac.type == e1000_82572 ||
850 adapter->hw.mac.type == e1000_82573 ||
851 adapter->hw.mac.type == e1000_82574 ||
852 adapter->hw.mac.type == e1000_82583))
853 intr_func = em_intr_mask;
854
9c80d176 855 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE,
87ab432b 856 intr_func, adapter, &adapter->intr_tag,
9c80d176 857 ifp->if_serializer);
af82d4bb 858 if (error) {
9c80d176
SZ
859 device_printf(dev, "Failed to register interrupt handler");
860 ether_ifdetach(&adapter->arpcom.ac_if);
af82d4bb
JS
861 goto fail;
862 }
863
a749d1d2 864 ifp->if_cpuid = rman_get_cpuid(adapter->intr_res);
9db4b353 865 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
9c80d176 866 return (0);
af82d4bb
JS
867fail:
868 em_detach(dev);
9c80d176 869 return (error);
984263bc
MD
870}
871
984263bc
MD
872static int
873em_detach(device_t dev)
874{
78195a76 875 struct adapter *adapter = device_get_softc(dev);
984263bc 876
af82d4bb 877 if (device_is_attached(dev)) {
9c80d176 878 struct ifnet *ifp = &adapter->arpcom.ac_if;
cdf89432
SZ
879
880 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 881
af82d4bb 882 em_stop(adapter);
9c80d176
SZ
883
884 e1000_phy_hw_reset(&adapter->hw);
885
886 em_rel_mgmt(adapter);
2d0e5700 887 em_rel_hw_control(adapter);
9c80d176
SZ
888
889 if (adapter->wol) {
890 E1000_WRITE_REG(&adapter->hw, E1000_WUC,
891 E1000_WUC_PME_EN);
892 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
893 em_enable_wol(dev);
894 }
895
896 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag);
897
cdf89432
SZ
898 lwkt_serialize_exit(ifp->if_serializer);
899
900 ether_ifdetach(ifp);
a19a8754 901 } else if (adapter->memory != NULL) {
2d0e5700 902 em_rel_hw_control(adapter);
7ea52455 903 }
cdf89432
SZ
904 bus_generic_detach(dev);
905
9c80d176
SZ
906 em_free_pci_res(adapter);
907
908 em_destroy_tx_ring(adapter, adapter->num_tx_desc);
909 em_destroy_rx_ring(adapter, adapter->num_rx_desc);
af82d4bb 910
984263bc 911 /* Free Transmit Descriptor ring */
9c80d176 912 if (adapter->tx_desc_base)
9ccd8c1f 913 em_dma_free(adapter, &adapter->txdma);
984263bc 914
984263bc 915 /* Free Receive Descriptor ring */
9c80d176 916 if (adapter->rx_desc_base)
9ccd8c1f 917 em_dma_free(adapter, &adapter->rxdma);
9c80d176
SZ
918
919 /* Free top level busdma tag */
920 if (adapter->parent_dtag != NULL)
921 bus_dma_tag_destroy(adapter->parent_dtag);
984263bc 922
1eca7b82 923 /* Free sysctl tree */
9c80d176 924 if (adapter->sysctl_tree != NULL)
1eca7b82 925 sysctl_ctx_free(&adapter->sysctl_ctx);
984263bc 926
a19a8754
SZ
927 if (adapter->mta != NULL)
928 kfree(adapter->mta, M_DEVBUF);
929
87307ba1 930 return (0);
984263bc
MD
931}
932
984263bc
MD
933static int
934em_shutdown(device_t dev)
935{
9c80d176 936 return em_suspend(dev);
87307ba1
SZ
937}
938
87307ba1
SZ
939static int
940em_suspend(device_t dev)
941{
942 struct adapter *adapter = device_get_softc(dev);
9c80d176 943 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
944
945 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 946
87307ba1 947 em_stop(adapter);
9c80d176
SZ
948
949 em_rel_mgmt(adapter);
2d0e5700 950 em_rel_hw_control(adapter);
9c80d176 951
2d0e5700 952 if (adapter->wol) {
9c80d176
SZ
953 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
954 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
955 em_enable_wol(dev);
2d0e5700 956 }
9c80d176 957
87307ba1 958 lwkt_serialize_exit(ifp->if_serializer);
9c80d176
SZ
959
960 return bus_generic_suspend(dev);
87307ba1
SZ
961}
962
963static int
964em_resume(device_t dev)
965{
966 struct adapter *adapter = device_get_softc(dev);
9c80d176 967 struct ifnet *ifp = &adapter->arpcom.ac_if;
87307ba1
SZ
968
969 lwkt_serialize_enter(ifp->if_serializer);
9c80d176 970
87307ba1 971 em_init(adapter);
9c80d176 972 em_get_mgmt(adapter);
9db4b353 973 if_devstart(ifp);
9c80d176 974
87307ba1
SZ
975 lwkt_serialize_exit(ifp->if_serializer);
976
977 return bus_generic_resume(dev);
984263bc
MD
978}
979
984263bc
MD
980static void
981em_start(struct ifnet *ifp)
982{
f647ad3d 983 struct adapter *adapter = ifp->if_softc;
9c80d176 984 struct mbuf *m_head;
984263bc 985
1eca7b82 986 ASSERT_SERIALIZED(ifp->if_serializer);
78195a76 987
87307ba1
SZ
988 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
989 return;
9c80d176 990
9db4b353
SZ
991 if (!adapter->link_active) {
992 ifq_purge(&ifp->if_snd);
f647ad3d 993 return;
9db4b353 994 }
9c80d176 995
e26dc3e9 996 while (!ifq_is_empty(&ifp->if_snd)) {
9f60d74b
SZ
997 /* Now do we at least have a minimal? */
998 if (EM_IS_OACTIVE(adapter)) {
999 em_tx_collect(adapter);
9c80d176 1000 if (EM_IS_OACTIVE(adapter)) {
9c80d176 1001 ifp->if_flags |= IFF_OACTIVE;
9f60d74b 1002 adapter->no_tx_desc_avail1++;
9c80d176
SZ
1003 break;
1004 }
1005 }
1006
1007 logif(pkt_txqueue);
9db4b353 1008 m_head = ifq_dequeue(&ifp->if_snd, NULL);
f647ad3d
JS
1009 if (m_head == NULL)
1010 break;
984263bc 1011
9c80d176 1012 if (em_encap(adapter, &m_head)) {
002b3a05 1013 ifp->if_oerrors++;
9f60d74b
SZ
1014 em_tx_collect(adapter);
1015 continue;
f647ad3d 1016 }
984263bc
MD
1017
1018 /* Send a copy of the frame to the BPF listener */
b637f170 1019 ETHER_BPF_MTAP(ifp, m_head);
87307ba1
SZ
1020
1021 /* Set timeout in case hardware has problems transmitting. */
1022 ifp->if_timer = EM_TX_TIMEOUT;
f647ad3d 1023 }
984263bc
MD
1024}
1025
984263bc 1026static int
bd4539cc 1027em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
984263bc 1028{
f647ad3d 1029 struct adapter *adapter = ifp->if_softc;
9c80d176 1030 struct ifreq *ifr = (struct ifreq *)data;
1eca7b82 1031 uint16_t eeprom_data = 0;
9c80d176
SZ
1032 int max_frame_size, mask, reinit;
1033 int error = 0;
0d366ee7 1034
9c80d176 1035 ASSERT_SERIALIZED(ifp->if_serializer);
0d366ee7 1036
984263bc 1037 switch (command) {
984263bc 1038 case SIOCSIFMTU:
9c80d176
SZ
1039 switch (adapter->hw.mac.type) {
1040 case e1000_82573:
1eca7b82
SZ
1041 /*
1042 * 82573 only supports jumbo frames
1043 * if ASPM is disabled.
1044 */
9c80d176
SZ
1045 e1000_read_nvm(&adapter->hw,
1046 NVM_INIT_3GIO_3, 1, &eeprom_data);
1047 if (eeprom_data & NVM_WORD1A_ASPM_MASK) {
1eca7b82
SZ
1048 max_frame_size = ETHER_MAX_LEN;
1049 break;
1050 }
9c80d176
SZ
1051 /* FALL THROUGH */
1052
1053 /* Limit Jumbo Frame size */
1054 case e1000_82571:
1055 case e1000_82572:
1056 case e1000_ich9lan:
1057 case e1000_ich10lan:
2d0e5700 1058 case e1000_pch2lan:
9c80d176 1059 case e1000_82574:
6d5e2922 1060 case e1000_82583:
9c80d176 1061 case e1000_80003es2lan:
1eca7b82 1062 max_frame_size = 9234;
7ea52455 1063 break;
9c80d176 1064
2d0e5700
SZ
1065 case e1000_pchlan:
1066 max_frame_size = 4096;
1067 break;
1068
9c80d176
SZ
1069 /* Adapters that do not support jumbo frames */
1070 case e1000_82542:
1071 case e1000_ich8lan:
7ea52455
SZ
1072 max_frame_size = ETHER_MAX_LEN;
1073 break;
9c80d176 1074
7ea52455
SZ
1075 default:
1076 max_frame_size = MAX_JUMBO_FRAME_SIZE;
1077 break;
1078 }
9c80d176
SZ
1079 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
1080 ETHER_CRC_LEN) {
984263bc 1081 error = EINVAL;
9c80d176 1082 break;
984263bc 1083 }
9c80d176
SZ
1084
1085 ifp->if_mtu = ifr->ifr_mtu;
1086 adapter->max_frame_size =
1087 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1088
1089 if (ifp->if_flags & IFF_RUNNING)
1090 em_init(adapter);
984263bc 1091 break;
9c80d176 1092
984263bc 1093 case SIOCSIFFLAGS:
984263bc 1094 if (ifp->if_flags & IFF_UP) {
9c80d176
SZ
1095 if ((ifp->if_flags & IFF_RUNNING)) {
1096 if ((ifp->if_flags ^ adapter->if_flags) &
1097 (IFF_PROMISC | IFF_ALLMULTI)) {
1098 em_disable_promisc(adapter);
1099 em_set_promisc(adapter);
1100 }
1101 } else {
78195a76 1102 em_init(adapter);
87307ba1 1103 }
9c80d176
SZ
1104 } else if (ifp->if_flags & IFF_RUNNING) {
1105 em_stop(adapter);
984263bc 1106 }
87307ba1 1107 adapter->if_flags = ifp->if_flags;
984263bc 1108 break;
9c80d176 1109
984263bc
MD
1110 case SIOCADDMULTI:
1111 case SIOCDELMULTI:
984263bc
MD
1112 if (ifp->if_flags & IFF_RUNNING) {
1113 em_disable_intr(adapter);
1114 em_set_multi(adapter);
9c80d176
SZ
1115 if (adapter->hw.mac.type == e1000_82542 &&
1116 adapter->hw.revision_id == E1000_REVISION_2)
1117 em_init_rx_unit(adapter);
1eca7b82 1118#ifdef DEVICE_POLLING
9c80d176 1119 if (!(ifp->if_flags & IFF_POLLING))
1eca7b82 1120#endif
9c80d176 1121 em_enable_intr(adapter);
984263bc
MD
1122 }
1123 break;
9c80d176 1124
984263bc 1125 case SIOCSIFMEDIA:
87307ba1 1126 /* Check SOL/IDER usage */
9c80d176
SZ
1127 if (e1000_check_reset_block(&adapter->hw)) {
1128 device_printf(adapter->dev, "Media change is"
1129 " blocked due to SOL/IDER session.\n");
87307ba1
SZ
1130 break;
1131 }
9c80d176
SZ
1132 /* FALL THROUGH */
1133
984263bc 1134 case SIOCGIFMEDIA:
984263bc
MD
1135 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
1136 break;
9c80d176 1137
984263bc 1138 case SIOCSIFCAP:
9c80d176 1139 reinit = 0;
984263bc 1140 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
f54de229
SZ
1141 if (mask & IFCAP_RXCSUM) {
1142 ifp->if_capenable ^= IFCAP_RXCSUM;
1eca7b82 1143 reinit = 1;
984263bc 1144 }
f54de229
SZ
1145 if (mask & IFCAP_TXCSUM) {
1146 ifp->if_capenable ^= IFCAP_TXCSUM;
1147 if (ifp->if_capenable & IFCAP_TXCSUM)
1148 ifp->if_hwassist |= EM_CSUM_FEATURES;
1149 else
1150 ifp->if_hwassist &= ~EM_CSUM_FEATURES;
1151 }
0bbb59f3
SZ
1152 if (mask & IFCAP_TSO) {
1153 ifp->if_capenable ^= IFCAP_TSO;
1154 if (ifp->if_capenable & IFCAP_TSO)
1155 ifp->if_hwassist |= CSUM_TSO;
1156 else
1157 ifp->if_hwassist &= ~CSUM_TSO;
1158 }
1eca7b82
SZ
1159 if (mask & IFCAP_VLAN_HWTAGGING) {
1160 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1161 reinit = 1;
1162 }
9c80d176 1163 if (reinit && (ifp->if_flags & IFF_RUNNING))
1eca7b82 1164 em_init(adapter);
984263bc 1165 break;
9c80d176 1166
984263bc 1167 default:
1eca7b82
SZ
1168 error = ether_ioctl(ifp, command, data);
1169 break;
984263bc 1170 }
87307ba1 1171 return (error);
984263bc
MD
1172}
1173
984263bc
MD
1174static void
1175em_watchdog(struct ifnet *ifp)
1176{
1eca7b82 1177 struct adapter *adapter = ifp->if_softc;
984263bc 1178
9c80d176
SZ
1179 ASSERT_SERIALIZED(ifp->if_serializer);
1180
1181 /*
1182 * The timer is set to 5 every time start queues a packet.
1183 * Then txeof keeps resetting it as long as it cleans at
1184 * least one descriptor.
1185 * Finally, anytime all descriptors are clean the timer is
1186 * set to 0.
1187 */
1188
9f60d74b
SZ
1189 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1190 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) {
1191 /*
1192 * If we reach here, all TX jobs are completed and
1193 * the TX engine should have been idled for some time.
1194 * We don't need to call if_devstart() here.
1195 */
1196 ifp->if_flags &= ~IFF_OACTIVE;
1197 ifp->if_timer = 0;
1198 return;
1199 }
1200
1eca7b82
SZ
1201 /*
1202 * If we are in this routine because of pause frames, then
984263bc
MD
1203 * don't reset the hardware.
1204 */
9c80d176
SZ
1205 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
1206 E1000_STATUS_TXOFF) {
984263bc
MD
1207 ifp->if_timer = EM_TX_TIMEOUT;
1208 return;
1209 }
1210
9c80d176 1211 if (e1000_check_for_link(&adapter->hw) == 0)
f647ad3d 1212 if_printf(ifp, "watchdog timeout -- resetting\n");
984263bc 1213
9c80d176
SZ
1214 ifp->if_oerrors++;
1215 adapter->watchdog_events++;
1216
984263bc
MD
1217 em_init(adapter);
1218
9c80d176
SZ
1219 if (!ifq_is_empty(&ifp->if_snd))
1220 if_devstart(ifp);
984263bc
MD
1221}
1222
984263bc 1223static void
9c80d176 1224em_init(void *xsc)
984263bc 1225{
9c80d176
SZ
1226 struct adapter *adapter = xsc;
1227 struct ifnet *ifp = &adapter->arpcom.ac_if;
1228 device_t dev = adapter->dev;
eac00e59 1229 uint32_t pba;
984263bc 1230
87307ba1
SZ
1231 ASSERT_SERIALIZED(ifp->if_serializer);
1232
984263bc
MD
1233 em_stop(adapter);
1234
eac00e59
SZ
1235 /*
1236 * Packet Buffer Allocation (PBA)
1237 * Writing PBA sets the receive portion of the buffer
1238 * the remainder is used for the transmit buffer.
1eca7b82
SZ
1239 *
1240 * Devices before the 82547 had a Packet Buffer of 64K.
1241 * Default allocation: PBA=48K for Rx, leaving 16K for Tx.
1242 * After the 82547 the buffer was reduced to 40K.
1243 * Default allocation: PBA=30K for Rx, leaving 10K for Tx.
1244 * Note: default does not leave enough room for Jumbo Frame >10k.
eac00e59 1245 */
9c80d176
SZ
1246 switch (adapter->hw.mac.type) {
1247 case e1000_82547:
1248 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */
1249 if (adapter->max_frame_size > 8192)
eac00e59 1250 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
7ea52455
SZ
1251 else
1252 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
eac00e59
SZ
1253 adapter->tx_fifo_head = 0;
1254 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT;
1255 adapter->tx_fifo_size =
9c80d176 1256 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT;
7ea52455 1257 break;
9c80d176 1258
87307ba1 1259 /* Total Packet Buffer on these is 48K */
9c80d176
SZ
1260 case e1000_82571:
1261 case e1000_82572:
1262 case e1000_80003es2lan:
7ea52455
SZ
1263 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
1264 break;
9c80d176
SZ
1265
1266 case e1000_82573: /* 82573: Total Packet Buffer is 32K */
7ea52455
SZ
1267 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */
1268 break;
9c80d176
SZ
1269
1270 case e1000_82574:
2d0e5700 1271 case e1000_82583:
9c80d176 1272 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */
1eca7b82 1273 break;
9c80d176 1274
2d0e5700
SZ
1275 case e1000_ich8lan:
1276 pba = E1000_PBA_8K;
1277 break;
1278
9c80d176
SZ
1279 case e1000_ich9lan:
1280 case e1000_ich10lan:
1281#define E1000_PBA_10K 0x000A
b0ff1d56
MS
1282 pba = E1000_PBA_10K;
1283 break;
9c80d176 1284
2d0e5700
SZ
1285 case e1000_pchlan:
1286 case e1000_pch2lan:
1287 pba = E1000_PBA_26K;
9c80d176
SZ
1288 break;
1289
7ea52455
SZ
1290 default:
1291 /* Devices before 82547 had a Packet Buffer of 64K. */
9c80d176 1292 if (adapter->max_frame_size > 8192)
7ea52455
SZ
1293 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1294 else
1295 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
eac00e59 1296 }
9c80d176 1297 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
a4a205fa 1298
0d366ee7 1299 /* Get the latest mac address, User can use a LAA */
9c80d176
SZ
1300 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN);
1301
1302 /* Put the address into the Receive Address Array */
1303 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
1304
1305 /*
1306 * With the 82571 adapter, RAR[0] may be overwritten
1307 * when the other port is reset, we make a duplicate
1308 * in RAR[14] for that eventuality, this assures
1309 * the interface continues to function.
1310 */
1311 if (adapter->hw.mac.type == e1000_82571) {
1312 e1000_set_laa_state_82571(&adapter->hw, TRUE);
1313 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr,
1314 E1000_RAR_ENTRIES - 1);
1315 }
0d366ee7 1316
2d0e5700
SZ
1317 /* Reset the hardware */
1318 if (em_reset(adapter)) {
1319 device_printf(dev, "Unable to reset the hardware\n");
9c80d176 1320 /* XXX em_stop()? */
984263bc
MD
1321 return;
1322 }
87307ba1 1323 em_update_link_status(adapter);
984263bc 1324
9c80d176
SZ
1325 /* Setup VLAN support, basic and offload if available */
1326 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
984263bc 1327
9c80d176
SZ
1328 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1329 uint32_t ctrl;
1330
1331 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
1332 ctrl |= E1000_CTRL_VME;
1333 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
87307ba1
SZ
1334 }
1335
9c80d176
SZ
1336 /* Configure for OS presence */
1337 em_get_mgmt(adapter);
1338
984263bc 1339 /* Prepare transmit descriptors and buffers */
9c80d176
SZ
1340 em_init_tx_ring(adapter);
1341 em_init_tx_unit(adapter);
984263bc
MD
1342
1343 /* Setup Multicast table */
1344 em_set_multi(adapter);
1345
1346 /* Prepare receive descriptors and buffers */
9c80d176
SZ
1347 if (em_init_rx_ring(adapter)) {
1348 device_printf(dev, "Could not setup receive structures\n");
984263bc 1349 em_stop(adapter);
984263bc
MD
1350 return;
1351 }
9c80d176 1352 em_init_rx_unit(adapter);
7ea52455 1353
87307ba1 1354 /* Don't lose promiscuous settings */
0d366ee7 1355 em_set_promisc(adapter);
984263bc 1356
984263bc
MD
1357 ifp->if_flags |= IFF_RUNNING;
1358 ifp->if_flags &= ~IFF_OACTIVE;
1359
9c80d176
SZ
1360 callout_reset(&adapter->timer, hz, em_timer, adapter);
1361 e1000_clear_hw_cntrs_base_generic(&adapter->hw);
1362
1363 /* MSI/X configuration for 82574 */
1364 if (adapter->hw.mac.type == e1000_82574) {
1365 int tmp;
1366
1367 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
1368 tmp |= E1000_CTRL_EXT_PBA_CLR;
1369 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp);
1370 /*
2d0e5700 1371 * XXX MSIX
9c80d176
SZ
1372 * Set the IVAR - interrupt vector routing.
1373 * Each nibble represents a vector, high bit
1374 * is enable, other 3 bits are the MSIX table
1375 * entry, we map RXQ0 to 0, TXQ0 to 1, and
1376 * Link (other) to 2, hence the magic number.
1377 */
1378 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908);
1379 }
1eca7b82
SZ
1380
1381#ifdef DEVICE_POLLING
9c80d176
SZ
1382 /*
1383 * Only enable interrupts if we are not polling, make sure
1384 * they are off otherwise.
1385 */
1eca7b82
SZ
1386 if (ifp->if_flags & IFF_POLLING)
1387 em_disable_intr(adapter);
1388 else
9c80d176
SZ
1389#endif /* DEVICE_POLLING */
1390 em_enable_intr(adapter);
0d366ee7 1391
2d0e5700 1392 /* AMT based hardware can now take control from firmware */
79878f87
SZ
1393 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) ==
1394 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) &&
2d0e5700
SZ
1395 adapter->hw.mac.type >= e1000_82571)
1396 em_get_hw_control(adapter);
1397
0d366ee7 1398 /* Don't reset the phy next time init gets called */
9c80d176 1399 adapter->hw.phy.reset_disable = TRUE;
984263bc
MD
1400}
1401
984263bc 1402#ifdef DEVICE_POLLING
f647ad3d
JS
1403
1404static void
984263bc
MD
1405em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1406{
f647ad3d
JS
1407 struct adapter *adapter = ifp->if_softc;
1408 uint32_t reg_icr;
984263bc 1409
78195a76
MD
1410 ASSERT_SERIALIZED(ifp->if_serializer);
1411
9c80d176 1412 switch (cmd) {
9c095379
MD
1413 case POLL_REGISTER:
1414 em_disable_intr(adapter);
1415 break;
9c80d176 1416
9c095379 1417 case POLL_DEREGISTER:
f647ad3d 1418 em_enable_intr(adapter);
9c095379 1419 break;
9c80d176 1420
9c095379 1421 case POLL_AND_CHECK_STATUS:
9c80d176 1422 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
f647ad3d 1423 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
9ccd8c1f 1424 callout_stop(&adapter->timer);
9c80d176 1425 adapter->hw.mac.get_link_status = 1;
87307ba1 1426 em_update_link_status(adapter);
9c80d176 1427 callout_reset(&adapter->timer, hz, em_timer, adapter);
f647ad3d 1428 }
9c80d176 1429 /* FALL THROUGH */
9c095379
MD
1430 case POLL_ONLY:
1431 if (ifp->if_flags & IFF_RUNNING) {
87307ba1
SZ
1432 em_rxeof(adapter, count);
1433 em_txeof(adapter);
1eca7b82 1434
9c095379 1435 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 1436 if_devstart(ifp);
9c095379
MD
1437 }
1438 break;
f647ad3d 1439 }
984263bc 1440}
9c095379 1441
984263bc
MD
1442#endif /* DEVICE_POLLING */
1443
984263bc 1444static void
9c80d176 1445em_intr(void *xsc)
984263bc 1446{
87ab432b
SZ
1447 em_intr_body(xsc, TRUE);
1448}
1449
1450static void
1451em_intr_body(struct adapter *adapter, boolean_t chk_asserted)
1452{
9c80d176 1453 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1454 uint32_t reg_icr;
984263bc 1455
07855a48 1456 logif(intr_beg);
78195a76
MD
1457 ASSERT_SERIALIZED(ifp->if_serializer);
1458
9c80d176
SZ
1459 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
1460
87ab432b
SZ
1461 if (chk_asserted &&
1462 ((adapter->hw.mac.type >= e1000_82571 &&
1463 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) ||
1464 reg_icr == 0)) {
07855a48 1465 logif(intr_end);
984263bc 1466 return;
07855a48 1467 }
984263bc 1468
87307ba1 1469 /*
9c80d176
SZ
1470 * XXX: some laptops trigger several spurious interrupts
1471 * on em(4) when in the resume cycle. The ICR register
1472 * reports all-ones value in this case. Processing such
1473 * interrupts would lead to a freeze. I don't know why.
87307ba1
SZ
1474 */
1475 if (reg_icr == 0xffffffff) {
1476 logif(intr_end);
1477 return;
984263bc
MD
1478 }
1479
79938e61 1480 if (ifp->if_flags & IFF_RUNNING) {
9f60d74b 1481 if (reg_icr &
6643d744 1482 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO))
9f60d74b 1483 em_rxeof(adapter, -1);
6643d744 1484 if (reg_icr & E1000_ICR_TXDW) {
9f60d74b
SZ
1485 em_txeof(adapter);
1486 if (!ifq_is_empty(&ifp->if_snd))
1487 if_devstart(ifp);
1488 }
f647ad3d 1489 }
984263bc 1490
87307ba1
SZ
1491 /* Link status change */
1492 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
1493 callout_stop(&adapter->timer);
9c80d176 1494 adapter->hw.mac.get_link_status = 1;
87307ba1 1495 em_update_link_status(adapter);
9c80d176
SZ
1496
1497 /* Deal with TX cruft when link lost */
1498 em_tx_purge(adapter);
1499
1500 callout_reset(&adapter->timer, hz, em_timer, adapter);
87307ba1
SZ
1501 }
1502
1503 if (reg_icr & E1000_ICR_RXO)
1504 adapter->rx_overruns++;
1505
07855a48 1506 logif(intr_end);
984263bc
MD
1507}
1508
984263bc 1509static void
87ab432b
SZ
1510em_intr_mask(void *xsc)
1511{
1512 struct adapter *adapter = xsc;
1513
1514 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
1515 /*
1516 * NOTE:
1517 * ICR.INT_ASSERTED bit will never be set if IMS is 0,
1518 * so don't check it.
1519 */
1520 em_intr_body(adapter, FALSE);
1521 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
1522}
1523
1524static void
984263bc
MD
1525em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1526{
87307ba1 1527 struct adapter *adapter = ifp->if_softc;
1eca7b82 1528 u_char fiber_type = IFM_1000_SX;
984263bc 1529
78195a76
MD
1530 ASSERT_SERIALIZED(ifp->if_serializer);
1531
87307ba1 1532 em_update_link_status(adapter);
984263bc
MD
1533
1534 ifmr->ifm_status = IFM_AVALID;
1535 ifmr->ifm_active = IFM_ETHER;
1536
1537 if (!adapter->link_active)
1538 return;
1539
1540 ifmr->ifm_status |= IFM_ACTIVE;
1541
9c80d176
SZ
1542 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
1543 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
1544 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
1545 fiber_type = IFM_1000_LX;
1546 ifmr->ifm_active |= fiber_type | IFM_FDX;
984263bc
MD
1547 } else {
1548 switch (adapter->link_speed) {
1549 case 10:
1550 ifmr->ifm_active |= IFM_10_T;
1551 break;
1552 case 100:
1553 ifmr->ifm_active |= IFM_100_TX;
1554 break;
9c80d176 1555
984263bc 1556 case 1000:
7f259627 1557 ifmr->ifm_active |= IFM_1000_T;
984263bc
MD
1558 break;
1559 }
1560 if (adapter->link_duplex == FULL_DUPLEX)
1561 ifmr->ifm_active |= IFM_FDX;
1562 else
1563 ifmr->ifm_active |= IFM_HDX;
1564 }
984263bc
MD
1565}
1566
984263bc
MD
1567static int
1568em_media_change(struct ifnet *ifp)
1569{
87307ba1
SZ
1570 struct adapter *adapter = ifp->if_softc;
1571 struct ifmedia *ifm = &adapter->media;
984263bc 1572
78195a76 1573 ASSERT_SERIALIZED(ifp->if_serializer);
9c095379 1574
87307ba1
SZ
1575 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1576 return (EINVAL);
1577
984263bc
MD
1578 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1579 case IFM_AUTO:
9c80d176
SZ
1580 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1581 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
984263bc 1582 break;
9c80d176 1583
1eca7b82 1584 case IFM_1000_LX:
984263bc 1585 case IFM_1000_SX:
7f259627 1586 case IFM_1000_T:
9c80d176
SZ
1587 adapter->hw.mac.autoneg = DO_AUTO_NEG;
1588 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
984263bc 1589 break;
9c80d176 1590
984263bc 1591 case IFM_100_TX:
9c80d176
SZ
1592 adapter->hw.mac.autoneg = FALSE;
1593 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1594 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1595 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
984263bc 1596 else
9c80d176 1597 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
984263bc 1598 break;
9c80d176 1599
984263bc 1600 case IFM_10_T:
9c80d176
SZ
1601 adapter->hw.mac.autoneg = FALSE;
1602 adapter->hw.phy.autoneg_advertised = 0;
984263bc 1603 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
9c80d176 1604 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
984263bc 1605 else
9c80d176 1606 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
984263bc 1607 break;
9c80d176 1608
984263bc 1609 default:
f647ad3d 1610 if_printf(ifp, "Unsupported media type\n");
9c80d176 1611 break;
984263bc 1612 }
9c80d176 1613
f647ad3d 1614 /*
9c80d176 1615 * As the speed/duplex settings my have changed we need to
f647ad3d
JS
1616 * reset the PHY.
1617 */
9c80d176 1618 adapter->hw.phy.reset_disable = FALSE;
984263bc 1619
78195a76 1620 em_init(adapter);
984263bc 1621
9c80d176 1622 return (0);
9ccd8c1f
JS
1623}
1624
984263bc 1625static int
9c80d176 1626em_encap(struct adapter *adapter, struct mbuf **m_headp)
9ccd8c1f 1627{
9c80d176 1628 bus_dma_segment_t segs[EM_MAX_SCATTER];
1eca7b82 1629 bus_dmamap_t map;
9c80d176
SZ
1630 struct em_buffer *tx_buffer, *tx_buffer_mapped;
1631 struct e1000_tx_desc *ctxd = NULL;
002b3a05 1632 struct mbuf *m_head = *m_headp;
9f60d74b 1633 uint32_t txd_upper, txd_lower, txd_used, cmd = 0;
9c80d176 1634 int maxsegs, nsegs, i, j, first, last = 0, error;
984263bc 1635
0bbb59f3
SZ
1636 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1637 error = em_tso_pullup(adapter, m_headp);
1638 if (error)
1639 return error;
1640 m_head = *m_headp;
1641 }
1642
9c80d176
SZ
1643 txd_upper = txd_lower = 0;
1644 txd_used = 0;
87307ba1
SZ
1645
1646 /*
9c80d176
SZ
1647 * Capture the first descriptor index, this descriptor
1648 * will have the index of the EOP which is the only one
1649 * that now gets a DONE bit writeback.
87307ba1 1650 */
9c80d176
SZ
1651 first = adapter->next_avail_tx_desc;
1652 tx_buffer = &adapter->tx_buffer_area[first];
1653 tx_buffer_mapped = tx_buffer;
1654 map = tx_buffer->map;
87307ba1 1655
9c80d176
SZ
1656 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED;
1657 KASSERT(maxsegs >= adapter->spare_tx_desc,
ed20d0e3 1658 ("not enough spare TX desc"));
9c80d176
SZ
1659 if (adapter->pcix_82544) {
1660 /* Half it; see the comment in em_attach() */
1661 maxsegs >>= 1;
9ccd8c1f 1662 }
9c80d176
SZ
1663 if (maxsegs > EM_MAX_SCATTER)
1664 maxsegs = EM_MAX_SCATTER;
984263bc 1665
9c80d176
SZ
1666 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp,
1667 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
1668 if (error) {
1669 if (error == ENOBUFS)
1670 adapter->mbuf_alloc_failed++;
1671 else
1672 adapter->no_tx_dma_setup++;
984263bc 1673
9c80d176
SZ
1674 m_freem(*m_headp);
1675 *m_headp = NULL;
1676 return error;
7ea52455 1677 }
9c80d176 1678 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE);
984263bc 1679
9c80d176 1680 m_head = *m_headp;
9f60d74b 1681 adapter->tx_nsegs += nsegs;
9c80d176 1682
0bbb59f3
SZ
1683 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1684 /* TSO will consume one TX desc */
1685 adapter->tx_nsegs += em_tso_setup(adapter, m_head,
1686 &txd_upper, &txd_lower);
1687 } else if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) {
9c80d176 1688 /* TX csum offloading will consume one TX desc */
9f60d74b
SZ
1689 adapter->tx_nsegs += em_txcsum(adapter, m_head,
1690 &txd_upper, &txd_lower);
9c80d176 1691 }
984263bc 1692 i = adapter->next_avail_tx_desc;
87307ba1
SZ
1693
1694 /* Set up our transmit descriptors */
9c80d176 1695 for (j = 0; j < nsegs; j++) {
9ccd8c1f
JS
1696 /* If adapter is 82544 and on PCIX bus */
1697 if(adapter->pcix_82544) {
87307ba1
SZ
1698 DESC_ARRAY desc_array;
1699 uint32_t array_elements, counter;
1700
9c80d176 1701 /*
f647ad3d
JS
1702 * Check the Address and Length combination and
1703 * split the data accordingly
9ccd8c1f 1704 */
9c80d176
SZ
1705 array_elements = em_82544_fill_desc(segs[j].ds_addr,
1706 segs[j].ds_len, &desc_array);
9ccd8c1f 1707 for (counter = 0; counter < array_elements; counter++) {
9c80d176
SZ
1708 KKASSERT(txd_used < adapter->num_tx_desc_avail);
1709
9ccd8c1f 1710 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176
SZ
1711 ctxd = &adapter->tx_desc_base[i];
1712
1713 ctxd->buffer_addr = htole64(
1714 desc_array.descriptor[counter].address);
1715 ctxd->lower.data = htole32(
2af74b85 1716 E1000_TXD_CMD_IFCS | txd_lower |
9c80d176
SZ
1717 desc_array.descriptor[counter].length);
1718 ctxd->upper.data = htole32(txd_upper);
87307ba1
SZ
1719
1720 last = i;
9ccd8c1f
JS
1721 if (++i == adapter->num_tx_desc)
1722 i = 0;
1723
9ccd8c1f 1724 txd_used++;
9c80d176 1725 }
9ccd8c1f 1726 } else {
0d366ee7 1727 tx_buffer = &adapter->tx_buffer_area[i];
9c80d176 1728 ctxd = &adapter->tx_desc_base[i];
9ccd8c1f 1729
9c80d176 1730 ctxd->buffer_addr = htole64(segs[j].ds_addr);
2af74b85 1731 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS |
9c80d176
SZ
1732 txd_lower | segs[j].ds_len);
1733 ctxd->upper.data = htole32(txd_upper);
984263bc 1734
87307ba1 1735 last = i;
0d366ee7
MD
1736 if (++i == adapter->num_tx_desc)
1737 i = 0;
0d366ee7 1738 }
984263bc 1739 }
9ccd8c1f 1740
984263bc 1741 adapter->next_avail_tx_desc = i;
9c80d176
SZ
1742 if (adapter->pcix_82544) {
1743 KKASSERT(adapter->num_tx_desc_avail > txd_used);
9ccd8c1f 1744 adapter->num_tx_desc_avail -= txd_used;
9c80d176
SZ
1745 } else {
1746 KKASSERT(adapter->num_tx_desc_avail > nsegs);
1747 adapter->num_tx_desc_avail -= nsegs;
1748 }
984263bc 1749
9c80d176 1750 /* Handle VLAN tag */
83790f85 1751 if (m_head->m_flags & M_VLANTAG) {
9c80d176
SZ
1752 /* Set the vlan id. */
1753 ctxd->upper.fields.special =
1754 htole16(m_head->m_pkthdr.ether_vlantag);
9ccd8c1f 1755
f647ad3d 1756 /* Tell hardware to add tag */
9c80d176 1757 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE);
f647ad3d 1758 }
984263bc
MD
1759
1760 tx_buffer->m_head = m_head;
9c80d176 1761 tx_buffer_mapped->map = tx_buffer->map;
1eca7b82 1762 tx_buffer->map = map;
9ccd8c1f 1763
9f60d74b
SZ
1764 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) {
1765 adapter->tx_nsegs = 0;
4e4e8481
SZ
1766
1767 /*
1768 * Report Status (RS) is turned on
1769 * every tx_int_nsegs descriptors.
1770 */
9f60d74b
SZ
1771 cmd = E1000_TXD_CMD_RS;
1772
b4b0a2b4
SZ
1773 /*
1774 * Keep track of the descriptor, which will
1775 * be written back by hardware.
1776 */
9f60d74b
SZ
1777 adapter->tx_dd[adapter->tx_dd_tail] = last;
1778 EM_INC_TXDD_IDX(adapter->tx_dd_tail);
1779 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head);
1780 }
1781
9ccd8c1f 1782 /*
984263bc 1783 * Last Descriptor of Packet needs End Of Packet (EOP)
87307ba1 1784 */
9f60d74b 1785 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd);
87307ba1
SZ
1786
1787 /*
9c80d176 1788 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
984263bc
MD
1789 * that this frame is available to transmit.
1790 */
9c80d176 1791 if (adapter->hw.mac.type == e1000_82547 &&
984263bc 1792 adapter->link_duplex == HALF_DUPLEX) {
cfefda96 1793 em_82547_move_tail_serialized(adapter);
9ccd8c1f 1794 } else {
9c80d176
SZ
1795 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i);
1796 if (adapter->hw.mac.type == e1000_82547) {
cfefda96 1797 em_82547_update_fifo_head(adapter,
9c80d176 1798 m_head->m_pkthdr.len);
984263bc
MD
1799 }
1800 }
87307ba1 1801 return (0);
984263bc
MD
1802}
1803
9c80d176 1804/*
984263bc 1805 * 82547 workaround to avoid controller hang in half-duplex environment.
87307ba1 1806 * The workaround is to avoid queuing a large packet that would span
9c80d176
SZ
1807 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers
1808 * in this case. We do that only when FIFO is quiescent.
1809 */
9c095379 1810static void
1eca7b82 1811em_82547_move_tail_serialized(struct adapter *adapter)
9c095379 1812{
9c80d176
SZ
1813 struct e1000_tx_desc *tx_desc;
1814 uint16_t hw_tdt, sw_tdt, length = 0;
1815 bool eop = 0;
984263bc 1816
9c80d176
SZ
1817 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer);
1818
1819 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0));
984263bc 1820 sw_tdt = adapter->next_avail_tx_desc;
f647ad3d 1821
984263bc
MD
1822 while (hw_tdt != sw_tdt) {
1823 tx_desc = &adapter->tx_desc_base[hw_tdt];
1824 length += tx_desc->lower.flags.length;
1825 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP;
87307ba1 1826 if (++hw_tdt == adapter->num_tx_desc)
984263bc
MD
1827 hw_tdt = 0;
1828
87307ba1 1829 if (eop) {
984263bc 1830 if (em_82547_fifo_workaround(adapter, length)) {
eac00e59 1831 adapter->tx_fifo_wrk_cnt++;
9ccd8c1f
JS
1832 callout_reset(&adapter->tx_fifo_timer, 1,
1833 em_82547_move_tail, adapter);
1834 break;
984263bc 1835 }
9c80d176 1836 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt);
9ccd8c1f
JS
1837 em_82547_update_fifo_head(adapter, length);
1838 length = 0;
984263bc 1839 }
9c80d176
SZ
1840 }
1841}
1842
1843static void
1844em_82547_move_tail(void *xsc)
1845{
1846 struct adapter *adapter = xsc;
1847 struct ifnet *ifp = &adapter->arpcom.ac_if;
1848
1849 lwkt_serialize_enter(ifp->if_serializer);
1850 em_82547_move_tail_serialized(adapter);
1851 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
1852}
1853
1854static int
1855em_82547_fifo_workaround(struct adapter *adapter, int len)
1856{
1857 int fifo_space, fifo_pkt_len;
1858
1eca7b82 1859 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
984263bc
MD
1860
1861 if (adapter->link_duplex == HALF_DUPLEX) {
eac00e59 1862 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
984263bc
MD
1863
1864 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) {
f647ad3d 1865 if (em_82547_tx_fifo_reset(adapter))
87307ba1 1866 return (0);
f647ad3d 1867 else
87307ba1 1868 return (1);
984263bc
MD
1869 }
1870 }
87307ba1 1871 return (0);
984263bc
MD
1872}
1873
1874static void
1875em_82547_update_fifo_head(struct adapter *adapter, int len)
1876{
1eca7b82 1877 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR);
f647ad3d 1878
984263bc
MD
1879 /* tx_fifo_head is always 16 byte aligned */
1880 adapter->tx_fifo_head += fifo_pkt_len;
eac00e59
SZ
1881 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
1882 adapter->tx_fifo_head -= adapter->tx_fifo_size;
984263bc
MD
1883}
1884
984263bc
MD
1885static int
1886em_82547_tx_fifo_reset(struct adapter *adapter)
7ea52455 1887{
984263bc
MD
1888 uint32_t tctl;
1889
9c80d176
SZ
1890 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
1891 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
1892 (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
1893 E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
1894 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
1895 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) &&
1896 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) {
984263bc 1897 /* Disable TX unit */
9c80d176
SZ
1898 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
1899 E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
1900 tctl & ~E1000_TCTL_EN);
984263bc
MD
1901
1902 /* Reset FIFO pointers */
9c80d176
SZ
1903 E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
1904 adapter->tx_head_addr);
1905 E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
1906 adapter->tx_head_addr);
1907 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
1908 adapter->tx_head_addr);
1909 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
1910 adapter->tx_head_addr);
984263bc
MD
1911
1912 /* Re-enable TX unit */
9c80d176 1913 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
1914 E1000_WRITE_FLUSH(&adapter->hw);
1915
1916 adapter->tx_fifo_head = 0;
eac00e59 1917 adapter->tx_fifo_reset_cnt++;
984263bc 1918
87307ba1 1919 return (TRUE);
eac00e59 1920 } else {
87307ba1 1921 return (FALSE);
984263bc
MD
1922 }
1923}
1924
1925static void
f647ad3d 1926em_set_promisc(struct adapter *adapter)
984263bc 1927{
9c80d176 1928 struct ifnet *ifp = &adapter->arpcom.ac_if;
1eca7b82 1929 uint32_t reg_rctl;
984263bc 1930
9c80d176 1931 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc
MD
1932
1933 if (ifp->if_flags & IFF_PROMISC) {
1934 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
9c80d176
SZ
1935 /* Turn this on if you want to see bad packets */
1936 if (em_debug_sbp)
1937 reg_rctl |= E1000_RCTL_SBP;
1938 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1939 } else if (ifp->if_flags & IFF_ALLMULTI) {
1940 reg_rctl |= E1000_RCTL_MPE;
1941 reg_rctl &= ~E1000_RCTL_UPE;
9c80d176 1942 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc 1943 }
984263bc
MD
1944}
1945
1946static void
f647ad3d 1947em_disable_promisc(struct adapter *adapter)
984263bc 1948{
f647ad3d 1949 uint32_t reg_rctl;
984263bc 1950
9c80d176 1951 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
984263bc 1952
9c80d176
SZ
1953 reg_rctl &= ~E1000_RCTL_UPE;
1954 reg_rctl &= ~E1000_RCTL_MPE;
1955 reg_rctl &= ~E1000_RCTL_SBP;
1956 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
984263bc
MD
1957}
1958
984263bc 1959static void
f647ad3d 1960em_set_multi(struct adapter *adapter)
984263bc 1961{
9c80d176 1962 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 1963 struct ifmultiaddr *ifma;
9c80d176 1964 uint32_t reg_rctl = 0;
2d0e5700 1965 uint8_t *mta;
f647ad3d 1966 int mcnt = 0;
f647ad3d 1967
2d0e5700
SZ
1968 mta = adapter->mta;
1969 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
1970
9c80d176
SZ
1971 if (adapter->hw.mac.type == e1000_82542 &&
1972 adapter->hw.revision_id == E1000_REVISION_2) {
1973 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
1974 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
1975 e1000_pci_clear_mwi(&adapter->hw);
f647ad3d 1976 reg_rctl |= E1000_RCTL_RST;
9c80d176 1977 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d
JS
1978 msec_delay(5);
1979 }
984263bc 1980
441d34b2 1981 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
f647ad3d
JS
1982 if (ifma->ifma_addr->sa_family != AF_LINK)
1983 continue;
1984
1985 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1986 break;
984263bc 1987
f647ad3d 1988 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
9c80d176 1989 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
f647ad3d
JS
1990 mcnt++;
1991 }
1992
1993 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
9c80d176 1994 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 1995 reg_rctl |= E1000_RCTL_MPE;
9c80d176 1996 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
7ea52455 1997 } else {
6a5a645e 1998 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
7ea52455 1999 }
f647ad3d 2000
9c80d176
SZ
2001 if (adapter->hw.mac.type == e1000_82542 &&
2002 adapter->hw.revision_id == E1000_REVISION_2) {
2003 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
f647ad3d 2004 reg_rctl &= ~E1000_RCTL_RST;
9c80d176 2005 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
f647ad3d 2006 msec_delay(5);
9c80d176
SZ
2007 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2008 e1000_pci_set_mwi(&adapter->hw);
f647ad3d
JS
2009 }
2010}
984263bc 2011
9c80d176
SZ
2012/*
2013 * This routine checks for link status and updates statistics.
2014 */
984263bc 2015static void
9c80d176 2016em_timer(void *xsc)
984263bc 2017{
9c80d176
SZ
2018 struct adapter *adapter = xsc;
2019 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 2020
78195a76 2021 lwkt_serialize_enter(ifp->if_serializer);
984263bc 2022
87307ba1 2023 em_update_link_status(adapter);
9c80d176
SZ
2024 em_update_stats(adapter);
2025
2026 /* Reset LAA into RAR[0] on 82571 */
2027 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
2028 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2029
2030 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING))
984263bc 2031 em_print_hw_stats(adapter);
9c80d176 2032
984263bc
MD
2033 em_smartspeed(adapter);
2034
9c80d176 2035 callout_reset(&adapter->timer, hz, em_timer, adapter);
984263bc 2036
78195a76 2037 lwkt_serialize_exit(ifp->if_serializer);
984263bc
MD
2038}
2039
2040static void
87307ba1 2041em_update_link_status(struct adapter *adapter)
984263bc 2042{
9c80d176
SZ
2043 struct e1000_hw *hw = &adapter->hw;
2044 struct ifnet *ifp = &adapter->arpcom.ac_if;
2045 device_t dev = adapter->dev;
2046 uint32_t link_check = 0;
2047
2048 /* Get the cached link value or read phy for real */
2049 switch (hw->phy.media_type) {
2050 case e1000_media_type_copper:
2051 if (hw->mac.get_link_status) {
2052 /* Do the work to read phy */
2053 e1000_check_for_link(hw);
2054 link_check = !hw->mac.get_link_status;
2055 if (link_check) /* ESB2 fix */
2056 e1000_cfg_on_link_up(hw);
2057 } else {
2058 link_check = TRUE;
984263bc 2059 }
9c80d176
SZ
2060 break;
2061
2062 case e1000_media_type_fiber:
2063 e1000_check_for_link(hw);
2064 link_check =
2065 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU;
2066 break;
2067
2068 case e1000_media_type_internal_serdes:
2069 e1000_check_for_link(hw);
2070 link_check = adapter->hw.mac.serdes_has_link;
2071 break;
2072
2073 case e1000_media_type_unknown:
2074 default:
2075 break;
2076 }
2077
2078 /* Now check for a transition */
2079 if (link_check && adapter->link_active == 0) {
2080 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
2081 &adapter->link_duplex);
cb5a6be6
SZ
2082
2083 /*
2084 * Check if we should enable/disable SPEED_MODE bit on
2085 * 82571/82572
2086 */
2d0e5700
SZ
2087 if (adapter->link_speed != SPEED_1000 &&
2088 (hw->mac.type == e1000_82571 ||
2089 hw->mac.type == e1000_82572)) {
9c80d176
SZ
2090 int tarc0;
2091
2092 tarc0 = E1000_READ_REG(hw, E1000_TARC(0));
2d0e5700 2093 tarc0 &= ~SPEED_MODE_BIT;
9c80d176 2094 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0);
984263bc 2095 }
9c80d176
SZ
2096 if (bootverbose) {
2097 device_printf(dev, "Link is up %d Mbps %s\n",
2098 adapter->link_speed,
2099 ((adapter->link_duplex == FULL_DUPLEX) ?
2100 "Full Duplex" : "Half Duplex"));
2101 }
2102 adapter->link_active = 1;
2103 adapter->smartspeed = 0;
2104 ifp->if_baudrate = adapter->link_speed * 1000000;
2105 ifp->if_link_state = LINK_STATE_UP;
2106 if_link_state_change(ifp);
2107 } else if (!link_check && adapter->link_active == 1) {
2108 ifp->if_baudrate = adapter->link_speed = 0;
2109 adapter->link_duplex = 0;
2110 if (bootverbose)
2111 device_printf(dev, "Link is Down\n");
2112 adapter->link_active = 0;
2113#if 0
2114 /* Link down, disable watchdog */
2115 if->if_timer = 0;
2116#endif
2117 ifp->if_link_state = LINK_STATE_DOWN;
2118 if_link_state_change(ifp);
984263bc 2119 }
984263bc
MD
2120}
2121
984263bc 2122static void
9c80d176 2123em_stop(struct adapter *adapter)
984263bc 2124{
9c80d176
SZ
2125 struct ifnet *ifp = &adapter->arpcom.ac_if;
2126 int i;
984263bc 2127
1eca7b82
SZ
2128 ASSERT_SERIALIZED(ifp->if_serializer);
2129
984263bc 2130 em_disable_intr(adapter);
9c80d176 2131
9ccd8c1f
JS
2132 callout_stop(&adapter->timer);
2133 callout_stop(&adapter->tx_fifo_timer);
984263bc 2134
984263bc 2135 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
af82d4bb 2136 ifp->if_timer = 0;
9c80d176
SZ
2137
2138 e1000_reset_hw(&adapter->hw);
2139 if (adapter->hw.mac.type >= e1000_82544)
2140 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
2141
2142 for (i = 0; i < adapter->num_tx_desc; i++) {
2143 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i];
2144
2145 if (tx_buffer->m_head != NULL) {
2146 bus_dmamap_unload(adapter->txtag, tx_buffer->map);
2147 m_freem(tx_buffer->m_head);
2148 tx_buffer->m_head = NULL;
2149 }
9c80d176
SZ
2150 }
2151
2152 for (i = 0; i < adapter->num_rx_desc; i++) {
2153 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i];
2154
2155 if (rx_buffer->m_head != NULL) {
2156 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
2157 m_freem(rx_buffer->m_head);
2158 rx_buffer->m_head = NULL;
2159 }
2160 }
c9ff32cc
SZ
2161
2162 if (adapter->fmp != NULL)
2163 m_freem(adapter->fmp);
2164 adapter->fmp = NULL;
2165 adapter->lmp = NULL;
51e6819f
SZ
2166
2167 adapter->csum_flags = 0;
ed4fc0fe 2168 adapter->csum_lhlen = 0;
51e6819f 2169 adapter->csum_iphlen = 0;
0bbb59f3
SZ
2170 adapter->csum_thlen = 0;
2171 adapter->csum_mss = 0;
2172 adapter->csum_pktlen = 0;
9f60d74b
SZ
2173
2174 adapter->tx_dd_head = 0;
2175 adapter->tx_dd_tail = 0;
2176 adapter->tx_nsegs = 0;
984263bc
MD
2177}
2178
9c80d176
SZ
2179static int
2180em_get_hw_info(struct adapter *adapter)
984263bc
MD
2181{
2182 device_t dev = adapter->dev;
2183
984263bc
MD
2184 /* Save off the information about this board */
2185 adapter->hw.vendor_id = pci_get_vendor(dev);
2186 adapter->hw.device_id = pci_get_device(dev);
f647ad3d
JS
2187 adapter->hw.revision_id = pci_get_revid(dev);
2188 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev);
9c80d176 2189 adapter->hw.subsystem_device_id = pci_get_subdevice(dev);
984263bc 2190
9c80d176
SZ
2191 /* Do Shared Code Init and Setup */
2192 if (e1000_set_mac_type(&adapter->hw))
2193 return ENXIO;
2194 return 0;
984263bc
MD
2195}
2196
1eca7b82 2197static int
9c80d176 2198em_alloc_pci_res(struct adapter *adapter)
1eca7b82 2199{
9c80d176 2200 device_t dev = adapter->dev;
053f3ae6 2201 u_int intr_flags;
84e26aaa 2202 int val, rid, msi_enable;
9c80d176
SZ
2203
2204 /* Enable bus mastering */
2205 pci_enable_busmaster(dev);
1eca7b82 2206
9c80d176
SZ
2207 adapter->memory_rid = EM_BAR_MEM;
2208 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
2209 &adapter->memory_rid, RF_ACTIVE);
2210 if (adapter->memory == NULL) {
1eca7b82 2211 device_printf(dev, "Unable to allocate bus resource: memory\n");
9c80d176 2212 return (ENXIO);
1eca7b82
SZ
2213 }
2214 adapter->osdep.mem_bus_space_tag =
9c80d176 2215 rman_get_bustag(adapter->memory);
1eca7b82 2216 adapter->osdep.mem_bus_space_handle =
9c80d176
SZ
2217 rman_get_bushandle(adapter->memory);
2218
2219 /* XXX This is quite goofy, it is not actually used */
1eca7b82
SZ
2220 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle;
2221
9c80d176
SZ
2222 /* Only older adapters use IO mapping */
2223 if (adapter->hw.mac.type > e1000_82543 &&
2224 adapter->hw.mac.type < e1000_82571) {
1eca7b82 2225 /* Figure our where our IO BAR is ? */
9c80d176 2226 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) {
1eca7b82 2227 val = pci_read_config(dev, rid, 4);
87307ba1 2228 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) {
1eca7b82
SZ
2229 adapter->io_rid = rid;
2230 break;
2231 }
2232 rid += 4;
87307ba1
SZ
2233 /* check for 64bit BAR */
2234 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT)
2235 rid += 4;
1eca7b82 2236 }
9c80d176 2237 if (rid >= PCIR_CARDBUSCIS) {
87307ba1
SZ
2238 device_printf(dev, "Unable to locate IO BAR\n");
2239 return (ENXIO);
9c80d176
SZ
2240 }
2241 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
2242 &adapter->io_rid, RF_ACTIVE);
2243 if (adapter->ioport == NULL) {
1eca7b82 2244 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2245 "ioport\n");
2246 return (ENXIO);
1eca7b82 2247 }
87307ba1
SZ
2248 adapter->hw.io_base = 0;
2249 adapter->osdep.io_bus_space_tag =
9c80d176 2250 rman_get_bustag(adapter->ioport);
87307ba1 2251 adapter->osdep.io_bus_space_handle =
9c80d176 2252 rman_get_bushandle(adapter->ioport);
1eca7b82
SZ
2253 }
2254
84e26aaa 2255 /*
a835687d
SZ
2256 * Don't enable MSI-X on 82574, see:
2257 * 82574 specification update errata #15
2258 *
84e26aaa 2259 * Don't enable MSI on PCI/PCI-X chips, see:
a835687d
SZ
2260 * 82540 specification update errata #6
2261 * 82545 specification update errata #4
84e26aaa
SZ
2262 *
2263 * Don't enable MSI on 82571/82572, see:
a835687d 2264 * 82571/82572 specification update errata #63
84e26aaa
SZ
2265 */
2266 msi_enable = em_msi_enable;
2267 if (msi_enable &&
2268 (!pci_is_pcie(dev) ||
2269 adapter->hw.mac.type == e1000_82571 ||
2270 adapter->hw.mac.type == e1000_82572))
2271 msi_enable = 0;
2272
2273 adapter->intr_type = pci_alloc_1intr(dev, msi_enable,
053f3ae6
SZ
2274 &adapter->intr_rid, &intr_flags);
2275
87ab432b
SZ
2276 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) {
2277 int unshared;
2278
2279 unshared = device_getenv_int(dev, "irq.unshared", 0);
2280 if (!unshared) {
2281 adapter->flags |= EM_FLAG_SHARED_INTR;
2282 if (bootverbose)
2283 device_printf(dev, "IRQ shared\n");
2284 } else {
2285 intr_flags &= ~RF_SHAREABLE;
2286 if (bootverbose)
2287 device_printf(dev, "IRQ unshared\n");
2288 }
2289 }
2290
9c80d176 2291 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
053f3ae6 2292 &adapter->intr_rid, intr_flags);
9c80d176 2293 if (adapter->intr_res == NULL) {
1eca7b82 2294 device_printf(dev, "Unable to allocate bus resource: "
9c80d176
SZ
2295 "interrupt\n");
2296 return (ENXIO);
1eca7b82
SZ
2297 }
2298
9c80d176 2299 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
1eca7b82 2300 adapter->hw.back = &adapter->osdep;
a483bd34 2301 return (0);
1eca7b82
SZ
2302}
2303
2304static void
9c80d176 2305em_free_pci_res(struct adapter *adapter)
1eca7b82 2306{
9c80d176 2307 device_t dev = adapter->dev;
1eca7b82 2308
9c80d176
SZ
2309 if (adapter->intr_res != NULL) {
2310 bus_release_resource(dev, SYS_RES_IRQ,
2311 adapter->intr_rid, adapter->intr_res);
1eca7b82 2312 }
9c80d176 2313
053f3ae6
SZ
2314 if (adapter->intr_type == PCI_INTR_TYPE_MSI)
2315 pci_release_msi(dev);
2316
9c80d176
SZ
2317 if (adapter->memory != NULL) {
2318 bus_release_resource(dev, SYS_RES_MEMORY,
2319 adapter->memory_rid, adapter->memory);
1eca7b82
SZ
2320 }
2321
9c80d176
SZ
2322 if (adapter->flash != NULL) {
2323 bus_release_resource(dev, SYS_RES_MEMORY,
2324 adapter->flash_rid, adapter->flash);
1eca7b82
SZ
2325 }
2326
9c80d176
SZ
2327 if (adapter->ioport != NULL) {
2328 bus_release_resource(dev, SYS_RES_IOPORT,
2329 adapter->io_rid, adapter->ioport);
1eca7b82
SZ
2330 }
2331}
2332
984263bc 2333static int
2d0e5700 2334em_reset(struct adapter *adapter)
984263bc 2335{
9c80d176
SZ
2336 device_t dev = adapter->dev;
2337 uint16_t rx_buffer_size;
7ea52455 2338
984263bc
MD
2339 /* When hardware is reset, fifo_head is also reset */
2340 adapter->tx_fifo_head = 0;
2341
87307ba1 2342 /* Set up smart power down as default off on newer adapters. */
1eca7b82 2343 if (!em_smart_pwr_down &&
9c80d176
SZ
2344 (adapter->hw.mac.type == e1000_82571 ||
2345 adapter->hw.mac.type == e1000_82572)) {
1eca7b82
SZ
2346 uint16_t phy_tmp = 0;
2347
87307ba1 2348 /* Speed up time to link by disabling smart power down. */
9c80d176
SZ
2349 e1000_read_phy_reg(&adapter->hw,
2350 IGP02E1000_PHY_POWER_MGMT, &phy_tmp);
1eca7b82 2351 phy_tmp &= ~IGP02E1000_PM_SPD;
9c80d176
SZ
2352 e1000_write_phy_reg(&adapter->hw,
2353 IGP02E1000_PHY_POWER_MGMT, phy_tmp);
1eca7b82
SZ
2354 }
2355
7ea52455 2356 /*
87307ba1
SZ
2357 * These parameters control the automatic generation (Tx) and
2358 * response (Rx) to Ethernet PAUSE frames.
7ea52455
SZ
2359 * - High water mark should allow for at least two frames to be
2360 * received after sending an XOFF.
2361 * - Low water mark works best when it is very near the high water mark.
2362 * This allows the receiver to restart by sending XON when it has
9c80d176
SZ
2363 * drained a bit. Here we use an arbitary value of 1500 which will
2364 * restart after one full frame is pulled from the buffer. There
7ea52455
SZ
2365 * could be several smaller frames in the buffer and if so they will
2366 * not trigger the XON until their total number reduces the buffer
2367 * by 1500.
2368 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
2369 */
9c80d176
SZ
2370 rx_buffer_size =
2371 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10;
7ea52455 2372
9c80d176
SZ
2373 adapter->hw.fc.high_water = rx_buffer_size -
2374 roundup2(adapter->max_frame_size, 1024);
2375 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500;
2376
2377 if (adapter->hw.mac.type == e1000_80003es2lan)
2378 adapter->hw.fc.pause_time = 0xFFFF;
1eca7b82 2379 else
9c80d176 2380 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME;
2d0e5700 2381
9c80d176 2382 adapter->hw.fc.send_xon = TRUE;
2d0e5700 2383
9c80d176 2384 adapter->hw.fc.requested_mode = e1000_fc_full;
7ea52455 2385
2d0e5700
SZ
2386 /* Workaround: no TX flow ctrl for PCH */
2387 if (adapter->hw.mac.type == e1000_pchlan)
2388 adapter->hw.fc.requested_mode = e1000_fc_rx_pause;
2389
2390 /* Override - settings for PCH2LAN, ya its magic :) */
2391 if (adapter->hw.mac.type == e1000_pch2lan) {
2392 adapter->hw.fc.high_water = 0x5C20;
2393 adapter->hw.fc.low_water = 0x5048;
2394 adapter->hw.fc.pause_time = 0x0650;
2395 adapter->hw.fc.refresh_time = 0x0400;
2396
2397 /* Jumbos need adjusted PBA */
2398 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU)
2399 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12);
2400 else
2401 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26);
2402 }
2403
2404 /* Issue a global reset */
2405 e1000_reset_hw(&adapter->hw);
2406 if (adapter->hw.mac.type >= e1000_82544)
2407 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
6d5e2922 2408 em_disable_aspm(adapter);
2d0e5700 2409
9c80d176
SZ
2410 if (e1000_init_hw(&adapter->hw) < 0) {
2411 device_printf(dev, "Hardware Initialization Failed\n");
87307ba1 2412 return (EIO);
984263bc
MD
2413 }
2414
2d0e5700
SZ
2415 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
2416 e1000_get_phy_info(&adapter->hw);
9c80d176 2417 e1000_check_for_link(&adapter->hw);
984263bc 2418
87307ba1 2419 return (0);
984263bc
MD
2420}
2421
984263bc 2422static void
9c80d176 2423em_setup_ifp(struct adapter *adapter)
984263bc 2424{
9c80d176 2425 struct ifnet *ifp = &adapter->arpcom.ac_if;
984263bc 2426
9c80d176
SZ
2427 if_initname(ifp, device_get_name(adapter->dev),
2428 device_get_unit(adapter->dev));
984263bc
MD
2429 ifp->if_softc = adapter;
2430 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
9c80d176 2431 ifp->if_init = em_init;
984263bc
MD
2432 ifp->if_ioctl = em_ioctl;
2433 ifp->if_start = em_start;
9c095379
MD
2434#ifdef DEVICE_POLLING
2435 ifp->if_poll = em_poll;
2436#endif
984263bc 2437 ifp->if_watchdog = em_watchdog;
e26dc3e9 2438 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1);
19b1d5b8 2439 ifq_set_ready(&ifp->if_snd);
984263bc 2440
9c80d176 2441 ether_ifattach(ifp, adapter->hw.mac.addr, NULL);
984263bc 2442
0bbb59f3 2443 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
9c80d176 2444 if (adapter->hw.mac.type >= e1000_82543)
0bbb59f3
SZ
2445 ifp->if_capabilities |= IFCAP_HWCSUM;
2446 if (adapter->flags & EM_FLAG_TSO)
2447 ifp->if_capabilities |= IFCAP_TSO;
9c80d176 2448 ifp->if_capenable = ifp->if_capabilities;
984263bc 2449
9c80d176 2450 if (ifp->if_capenable & IFCAP_TXCSUM)
0bbb59f3
SZ
2451 ifp->if_hwassist |= EM_CSUM_FEATURES;
2452 if (ifp->if_capenable & IFCAP_TSO)
2453 ifp->if_hwassist |= CSUM_TSO;
21fa6062 2454
f647ad3d
JS
2455 /*
2456 * Tell the upper layer(s) we support long frames.
2457 */
2458 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
984263bc 2459
87307ba1 2460 /*
984263bc
MD
2461 * Specify the media types supported by this adapter and register
2462 * callbacks to update media and link information
2463 */
9c80d176
SZ
2464 ifmedia_init(&adapter->media, IFM_IMASK,
2465 em_media_change, em_media_status);
2466 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2467 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
2468 u_char fiber_type = IFM_1000_SX; /* default type */
2469
2470 if (adapter->hw.mac.type == e1000_82545)
1eca7b82
SZ
2471 fiber_type = IFM_1000_LX;
2472 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX,
984263bc 2473 0, NULL);
87307ba1 2474 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL);
984263bc
MD
2475 } else {
2476 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
87307ba1 2477 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
984263bc 2478 0, NULL);
87307ba1 2479 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
984263bc 2480 0, NULL);
87307ba1 2481 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
984263bc 2482 0, NULL);
9c80d176
SZ
2483 if (adapter->hw.phy.type != e1000_phy_ife) {
2484 ifmedia_add(&adapter->media,
2485 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
2486 ifmedia_add(&adapter->media,
2487 IFM_ETHER | IFM_1000_T, 0, NULL);
2488 }
984263bc
MD
2489 }
2490 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
2491 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
984263bc
MD
2492}
2493
9c80d176
SZ
2494
2495/*
2496 * Workaround for SmartSpeed on 82541 and 82547 controllers
2497 */
984263bc
MD
2498static void
2499em_smartspeed(struct adapter *adapter)
2500{
f647ad3d
JS
2501 uint16_t phy_tmp;
2502
9c80d176
SZ
2503 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp ||
2504 adapter->hw.mac.autoneg == 0 ||
2505 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0)
984263bc
MD
2506 return;
2507
f647ad3d
JS
2508 if (adapter->smartspeed == 0) {
2509 /*
2510 * If Master/Slave config fault is asserted twice,
9c80d176 2511 * we assume back-to-back
f647ad3d 2512 */
9c80d176 2513 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d
JS
2514 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT))
2515 return;
9c80d176 2516 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp);
f647ad3d 2517 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) {
9c80d176
SZ
2518 e1000_read_phy_reg(&adapter->hw,
2519 PHY_1000T_CTRL, &phy_tmp);
f647ad3d
JS
2520 if (phy_tmp & CR_1000T_MS_ENABLE) {
2521 phy_tmp &= ~CR_1000T_MS_ENABLE;
9c80d176
SZ
2522 e1000_write_phy_reg(&adapter->hw,
2523 PHY_1000T_CTRL, phy_tmp);
f647ad3d 2524 adapter->smartspeed++;
9c80d176
SZ
2525 if (adapter->hw.mac.autoneg &&
2526 !e1000_phy_setup_autoneg(&adapter->hw) &&
2527 !e1000_read_phy_reg(&adapter->hw,
2528 PHY_CONTROL, &phy_tmp)) {
2529 phy_tmp |= MII_CR_AUTO_NEG_EN |
2530 MII_CR_RESTART_AUTO_NEG;
2531 e1000_write_phy_reg(&adapter->hw,
2532 PHY_CONTROL, phy_tmp);
f647ad3d
JS
2533 }
2534 }
2535 }
87307ba1 2536 return;
f647ad3d
JS
2537 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) {
2538 /* If still no link, perhaps using 2/3 pair cable */
9c80d176 2539 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp);
f647ad3d 2540 phy_tmp |= CR_1000T_MS_ENABLE;
9c80d176
SZ
2541 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp);
2542 if (adapter->hw.mac.autoneg &&
2543 !e1000_phy_setup_autoneg(&adapter->hw) &&
2544 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) {
2545 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
2546 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp);
f647ad3d
JS
2547 }
2548 }
9c80d176 2549
f647ad3d
JS
2550 /* Restart process after EM_SMARTSPEED_MAX iterations */
2551 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX)
2552 adapter->smartspeed = 0;
984263bc
MD
2553}
2554
9ccd8c1f
JS
2555static int
2556em_dma_malloc(struct adapter *adapter, bus_size_t size,
87307ba1 2557 struct em_dma_alloc *dma)
9ccd8c1f 2558{
9c80d176
SZ
2559 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag,
2560 EM_DBA_ALIGN, size, BUS_DMA_WAITOK,
2561 &dma->dma_tag, &dma->dma_map,
2562 &dma->dma_paddr);
2563 if (dma->dma_vaddr == NULL)
2564 return ENOMEM;
2565 else
2566 return 0;
9ccd8c1f
JS
2567}
2568
2569static void
2570em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma)
2571{
9c80d176
SZ
2572 if (dma->dma_tag == NULL)
2573 return;
2574 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2575 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
2576 bus_dma_tag_destroy(dma->dma_tag);
984263bc
MD
2577}
2578
984263bc 2579static int
9c80d176 2580em_create_tx_ring(struct adapter *adapter)
984263bc 2581{
9c80d176 2582 device_t dev = adapter->dev;
1eca7b82 2583 struct em_buffer *tx_buffer;
1eca7b82
SZ
2584 int error, i;
2585
87307ba1
SZ
2586 adapter->tx_buffer_area =
2587 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc,
2588 M_DEVBUF, M_WAITOK | M_ZERO);
984263bc 2589
9c80d176
SZ
2590 /*
2591 * Create DMA tags for tx buffers
2592 */
2593 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
2594 1, 0, /* alignment, bounds */
2595 BUS_SPACE_MAXADDR, /* lowaddr */
2596 BUS_SPACE_MAXADDR, /* highaddr */
2597 NULL, NULL, /* filter, filterarg */
2598 EM_TSO_SIZE, /* maxsize */
2599 EM_MAX_SCATTER, /* nsegments */
0bbb59f3 2600 PAGE_SIZE, /* maxsegsize */
9c80d176
SZ
2601 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW |
2602 BUS_DMA_ONEBPAGE, /* flags */
2603 &adapter->txtag);
2604 if (error) {
2605 device_printf(dev, "Unable to allocate TX DMA tag\n");
2606 kfree(adapter->tx_buffer_area, M_DEVBUF);
2607 adapter->tx_buffer_area = NULL;
2608 return error;
2609 }
2610
2611 /*
2612 * Create DMA maps for tx buffers
2613 */
1eca7b82 2614 for (i = 0; i < adapter->num_tx_desc; i++) {
9c80d176
SZ
2615 tx_buffer = &adapter->tx_buffer_area[i];
2616
2617 error = bus_dmamap_create(adapter->txtag,
2618 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2619 &tx_buffer->map);
1eca7b82 2620 if (error) {
9c80d176
SZ
2621 device_printf(dev, "Unable to create TX DMA map\n");
2622 em_destroy_tx_ring(adapter, i);
2623 return error;
1eca7b82 2624 }
1eca7b82 2625 }
9c80d176
SZ
2626 return (0);
2627}
9ccd8c1f 2628
9c80d176
SZ
2629static void
2630em_init_tx_ring(struct adapter *adapter)
2631{
2632 /* Clear the old ring contents */
2633 bzero(adapter->tx_desc_base,
2634 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc);
2635
2636 /* Reset state */
87307ba1
SZ
2637 adapter->next_avail_tx_desc = 0;
2638 adapter->next_tx_to_clean = 0;
984263bc 2639 adapter->num_tx_desc_avail = adapter->num_tx_desc;
984263bc
MD
2640}
2641
984263bc 2642static void
9c80d176 2643em_init_tx_unit(struct adapter *adapter)
984263bc 2644{
9c80d176 2645 uint32_t tctl, tarc, tipg = 0;
9ccd8c1f
JS
2646 uint64_t bus_addr;
2647
984263bc 2648 /* Setup the Base and Length of the Tx Descriptor Ring */
9ccd8c1f 2649 bus_addr = adapter->txdma.dma_paddr;
9c80d176
SZ
2650 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
2651 adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
2652 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0),
2653 (uint32_t)(bus_addr >> 32));
2654 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
2655 (uint32_t)bus_addr);
984263bc 2656 /* Setup the HW Tx Head and Tail descriptor pointers */
9c80d176
SZ
2657 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
2658 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
984263bc 2659
984263bc 2660 /* Set the default values for the Tx Inter Packet Gap timer */
9c80d176
SZ
2661 switch (adapter->hw.mac.type) {
2662 case e1000_82542:
2663 tipg = DEFAULT_82542_TIPG_IPGT;
2664 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2665 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
984263bc 2666 break;
9c80d176
SZ
2667
2668 case e1000_80003es2lan:
2669 tipg = DEFAULT_82543_TIPG_IPGR1;
2670 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 <<
2671 E1000_TIPG_IPGR2_SHIFT;
1eca7b82 2672 break;
9c80d176 2673
984263bc 2674 default:
9c80d176
SZ
2675 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
2676 adapter->hw.phy.media_type ==
2677 e1000_media_type_internal_serdes)
2678 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
984263bc 2679 else
9c80d176
SZ
2680 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
2681 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
2682 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
2683 break;
2684 }
2685
2686 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg);
91e8debf
SZ
2687
2688 /* NOTE: 0 is not allowed for TIDV */
2689 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1);
2690 if(adapter->hw.mac.type >= e1000_82540)
2691 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0);
984263bc 2692
9c80d176
SZ
2693 if (adapter->hw.mac.type == e1000_82571 ||
2694 adapter->hw.mac.type == e1000_82572) {
2695 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2696 tarc |= SPEED_MODE_BIT;
2697 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2698 } else if (adapter->hw.mac.type == e1000_80003es2lan) {
2699 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
2700 tarc |= 1;
2701 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc);
2702 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1));
2703 tarc |= 1;
2704 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc);
1eca7b82
SZ
2705 }
2706
984263bc 2707 /* Program the Transmit Control Register */
9c80d176
SZ
2708 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
2709 tctl &= ~E1000_TCTL_CT;
2710 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2711 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2712
2713 if (adapter->hw.mac.type >= e1000_82571)
2714 tctl |= E1000_TCTL_MULR;
1eca7b82 2715
87307ba1 2716 /* This write will effectively turn on the transmit unit. */
9c80d176 2717 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
984263bc
MD
2718}
2719
984263bc 2720static void
9c80d176 2721em_destroy_tx_ring(struct adapter *adapter, int ndesc)
984263bc 2722{
f647ad3d
JS
2723 struct em_buffer *tx_buffer;
2724 int i;
984263bc 2725
9c80d176
SZ
2726 if (adapter->tx_buffer_area == NULL)
2727 return;
984263bc 2728
9c80d176
SZ
2729 for (i = 0; i < ndesc; i++) {
2730 tx_buffer = &adapter->tx_buffer_area[i];
1eca7b82 2731
9c80d176
SZ
2732 KKASSERT(tx_buffer->m_head == NULL);
2733 bus_dmamap_destroy(adapter->txtag, tx_buffer->map);
9ccd8c1f 2734 }
9c80d176
SZ
2735 bus_dma_tag_destroy(adapter->txtag);
2736
2737 kfree(adapter->tx_buffer_area, M_DEVBUF);
2738 adapter->tx_buffer_area = NULL;
984263bc
MD
2739}
2740
9c80d176
SZ
2741/*
2742 * The offload context needs to be set when we transfer the first
2743 * packet of a particular protocol (TCP/UDP). This routine has been
002b3a05 2744 * enhanced to deal with inserted VLAN headers.
51e6819f
SZ
2745 *
2746 * If the new packet's ether header length, ip header length and
2747 * csum offloading type are same as the previous packet, we should
2748 * avoid allocating a new csum context descriptor; mainly to take
2749 * advantage of the pipeline effect of the TX data read request.
9f60d74b
SZ
2750 *
2751 * This function returns number of TX descrptors allocated for
2752 * csum context.
9c80d176 2753 */
9f60d74b 2754static int
9c80d176
SZ
2755em_txcsum(struct adapter *adapter, struct mbuf *mp,
2756 uint32_t *txd_upper, uint32_t *txd_lower)
984263bc 2757{
9c80d176 2758 struct e1000_context_desc *TXD;
51e6819f 2759 int curr_txd, ehdrlen, csum_flags;
9c80d176 2760 uint32_t cmd, hdr_len, ip_hlen;
984263bc 2761
51e6819f 2762 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES;
ed4fc0fe
SZ
2763 ip_hlen = mp->m_pkthdr.csum_iphlen;
2764 ehdrlen = mp->m_pkthdr.csum_lhlen;
51e6819f 2765
ed4fc0fe 2766 if (adapter->csum_lhlen == ehdrlen &&
51e6819f
SZ
2767 adapter->csum_iphlen == ip_hlen &&
2768 adapter->csum_flags == csum_flags) {
2769 /*
2770 * Same csum offload context as the previous packets;
2771 * just return.
2772 */
2773 *txd_upper = adapter->csum_txd_upper;
2774 *txd_lower = adapter->csum_txd_lower;
9f60d74b 2775 return 0;
984263bc
MD
2776 }
2777
51e6819f
SZ
2778 /*
2779 * Setup a new csum offload context.
2780 */
2781
2782 curr_txd = adapter->next_avail_tx_desc;
51e6819f
SZ
2783 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd];
2784
2785 cmd = 0;
2786
2787 /* Setup of IP header checksum. */
2788 if (csum_flags & CSUM_IP) {
2789 /*
2790 * Start offset for header checksum calculation.
2791 * End offset for header checksum calculation.
2792 * Offset of place to put the checksum.
2793 */
2794 TXD->lower_setup.ip_fields.ipcss = ehdrlen;
2795 TXD->lower_setup.ip_fields.ipcse =
2796 htole16(ehdrlen + ip_hlen - 1);
2797 TXD->lower_setup.ip_fields.ipcso =
2798 ehdrlen + offsetof(struct ip, ip_sum);
2799 cmd |= E1000_TXD_CMD_IP;
2800 *txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2801 }
2802 hdr_len = ehdrlen + ip_hlen;
2803
2804 if (csum_flags & CSUM_TCP) {
002b3a05
SZ
2805 /*
2806 * Start offset for payload checksum calculation.
2807 * End offset for payload checksum calculation.
2808 * Offset of place to put the checksum.
2809 */
2810 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2811 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2812 TXD->upper_setup.tcp_fields.tucso =
2813 hdr_len + offsetof(struct tcphdr, th_sum);
2814 cmd |= E1000_TXD_CMD_TCP;
2815 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
51e6819f 2816 } else if (csum_flags & CSUM_UDP) {
002b3a05
SZ
2817 /*
2818 * Start offset for header checksum calculation.
2819 * End offset for header checksum calculation.
2820 * Offset of place to put the checksum.
2821 */
2822 TXD->upper_setup.tcp_fields.tucss = hdr_len;
2823 TXD->upper_setup.tcp_fields.tucse = htole16(0);
2824 TXD->upper_setup.tcp_fields.tucso =
2825 hdr_len + offsetof(struct udphdr, uh_sum);
2826 *txd_upper |= E1000_TXD_POPTS_TXSM << 8;
9c80d176
SZ
2827 }
2828
2829 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */
2830 E1000_TXD_DTYP_D; /* Data descr */
51e6819f
SZ
2831
2832 /* Save the information for this csum offloading context */
ed4fc0fe 2833 adapter->csum_lhlen = ehdrlen;
51e6819f
SZ
2834 adapter->csum_iphlen = ip_hlen;
2835 adapter->csum_flags = csum_flags;
2836 adapter->csum_txd_upper = *txd_upper;
2837 adapter->csum_txd_lower = *txd_lower;
2838
9c80d176
SZ
2839 TXD->tcp_seg_setup.data = htole32(0);
2840 TXD->cmd_and_length =
2af74b85 2841 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd);
984263bc
MD
2842
2843 if (++curr_txd == adapter->num_tx_desc)
2844 curr_txd = 0;
2845
9c80d176 2846 KKASSERT(adapter->num_tx_desc_avail > 0);
984263bc 2847 adapter->num_tx_desc_avail--;
9c80d176 2848
984263bc 2849 adapter->next_avail_tx_desc = curr_txd;
9f60d74b 2850 return 1;
984263bc
MD
2851}
2852
984263bc 2853static void
87307ba1 2854em_txeof(struct adapter *adapter)
984263bc 2855{
9c80d176 2856 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2857 struct em_buffer *tx_buffer;
2858 int first, num_avail;
2859
2860 if (adapter->tx_dd_head == adapter->tx_dd_tail)
2861 return;
984263bc 2862
f647ad3d
JS
2863 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2864 return;
984263bc 2865
9c80d176 2866 num_avail = adapter->num_tx_desc_avail;
87307ba1 2867 first = adapter->next_tx_to_clean;
9c80d176 2868
9f60d74b 2869 while (adapter->tx_dd_head != adapter->tx_dd_tail) {
4e499730 2870 struct e1000_tx_desc *tx_desc;
9f60d74b 2871 int dd_idx = adapter->tx_dd[adapter->tx_dd_head];
984263bc 2872
9f60d74b 2873 tx_desc = &adapter->tx_desc_base[dd_idx];
9f60d74b
SZ
2874 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) {
2875 EM_INC_TXDD_IDX(adapter->tx_dd_head);
984263bc 2876
9f60d74b
SZ
2877 if (++dd_idx == adapter->num_tx_desc)
2878 dd_idx = 0;
9c80d176 2879
9f60d74b 2880 while (first != dd_idx) {
edbfa193
SZ
2881 logif(pkt_txclean);
2882
9f60d74b
SZ
2883 num_avail++;
2884
4e499730 2885 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
2886 if (tx_buffer->m_head) {
2887 ifp->if_opackets++;
2888 bus_dmamap_unload(adapter->txtag,
2889 tx_buffer->map);
2890 m_freem(tx_buffer->m_head);
2891 tx_buffer->m_head = NULL;
2892 }
2893
2894 if (++first == adapter->num_tx_desc)
2895 first = 0;
2896 }
87307ba1
SZ
2897 } else {
2898 break;
2899 }
f647ad3d 2900 }
9f60d74b
SZ
2901 adapter->next_tx_to_clean = first;
2902 adapter->num_tx_desc_avail = num_avail;
2903
2904 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2905 adapter->tx_dd_head = 0;
2906 adapter->tx_dd_tail = 0;
2907 }
2908
2909 if (!EM_IS_OACTIVE(adapter)) {
2910 ifp->if_flags &= ~IFF_OACTIVE;
2911
2912 /* All clean, turn off the timer */
2913 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2914 ifp->if_timer = 0;
2915 }
2916}
2917
2918static void
2919em_tx_collect(struct adapter *adapter)
2920{
2921 struct ifnet *ifp = &adapter->arpcom.ac_if;
9f60d74b
SZ
2922 struct em_buffer *tx_buffer;
2923 int tdh, first, num_avail, dd_idx = -1;
2924
2925 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2926 return;
2927
2928 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0));
2929 if (tdh == adapter->next_tx_to_clean)
2930 return;
2931
2932 if (adapter->tx_dd_head != adapter->tx_dd_tail)
2933 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2934
2935 num_avail = adapter->num_tx_desc_avail;
2936 first = adapter->next_tx_to_clean;
2937
2938 while (first != tdh) {
edbfa193
SZ
2939 logif(pkt_txclean);
2940
9f60d74b
SZ
2941 num_avail++;
2942
4e499730 2943 tx_buffer = &adapter->tx_buffer_area[first];
9f60d74b
SZ
2944 if (tx_buffer->m_head) {
2945 ifp->if_opackets++;
2946 bus_dmamap_unload(adapter->txtag,
2947 tx_buffer->map);
2948 m_freem(tx_buffer->m_head);
2949 tx_buffer->m_head = NULL;
2950 }
2951
2952 if (first == dd_idx) {
2953 EM_INC_TXDD_IDX(adapter->tx_dd_head);
2954 if (adapter->tx_dd_head == adapter->tx_dd_tail) {
2955 adapter->tx_dd_head = 0;
2956 adapter->tx_dd_tail = 0;
2957 dd_idx = -1;
2958 } else {
2959 dd_idx = adapter->tx_dd[adapter->tx_dd_head];
2960 }
2961 }
2962
2963 if (++first == adapter->num_tx_desc)
2964 first = 0;
2965 }
2966 adapter->next_tx_to_clean = first;
9c80d176 2967 adapter->num_tx_desc_avail = num_avail;
984263bc 2968
9f60d74b 2969 if (!EM_IS_OACTIVE(adapter)) {
9c80d176 2970 ifp->if_flags &= ~IFF_OACTIVE;
afa68aa1 2971
9c80d176
SZ
2972 /* All clean, turn off the timer */
2973 if (adapter->num_tx_desc_avail == adapter->num_tx_desc)
2974 ifp->if_timer = 0;
2975 }
2976}
984263bc 2977
9c80d176
SZ
2978/*
2979 * When Link is lost sometimes there is work still in the TX ring
2980 * which will result in a watchdog, rather than allow that do an
2981 * attempted cleanup and then reinit here. Note that this has been
2982 * seens mostly with fiber adapters.
2983 */
2984static void
2985em_tx_purge(struct adapter *adapter)
2986{
2987 struct ifnet *ifp = &adapter->arpcom.ac_if;
2988
2989 if (!adapter->link_active && ifp->if_timer) {
9f60d74b 2990 em_tx_collect(adapter);
9c80d176
SZ
2991 if (ifp->if_timer) {
2992 if_printf(ifp, "Link lost, TX pending, reinit\n");
f647ad3d 2993 ifp->if_timer = 0;
9c80d176
SZ
2994 em_init(adapter);
2995 }
f647ad3d 2996 }
984263bc
MD
2997}
2998
984263bc 2999static int
9c80d176 3000em_newbuf(struct adapter *adapter, int i, int init)
984263bc 3001{
9c80d176
SZ
3002 struct mbuf *m;
3003 bus_dma_segment_t seg;
3004 bus_dmamap_t map;
9ccd8c1f 3005 struct em_buffer *rx_buffer;
9c80d176
SZ
3006 int error, nseg;
3007
3008 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3009 if (m == NULL) {
3010 adapter->mbuf_cluster_failed++;
3011 if (init) {
3012 if_printf(&adapter->arpcom.ac_if,
3013 "Unable to allocate RX mbuf\n");
984263bc 3014 }
9c80d176 3015 return (ENOBUFS);
984263bc 3016 }
9c80d176 3017 m->m_len = m->m_pkthdr.len = MCLBYTES;
87307ba1 3018
9c80d176
SZ
3019 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN)
3020 m_adj(m, ETHER_ALIGN);
9ccd8c1f 3021
9c80d176
SZ
3022 error = bus_dmamap_load_mbuf_segment(adapter->rxtag,
3023 adapter->rx_sparemap, m,
3024 &seg, 1, &nseg, BUS_DMA_NOWAIT);
9ccd8c1f 3025 if (error) {
9c80d176
SZ
3026 m_freem(m);
3027 if (init) {
3028 if_printf(&adapter->arpcom.ac_if,
3029 "Unable to load RX mbuf\n");
3030 }
87307ba1 3031 return (error);
9ccd8c1f 3032 }
984263bc 3033
9c80d176
SZ
3034 rx_buffer = &adapter->rx_buffer_area[i];
3035 if (rx_buffer->m_head != NULL)
3036 bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
3037
3038 map = rx_buffer->map;
3039 rx_buffer->map = adapter->rx_sparemap;
3040 adapter->rx_sparemap = map;
3041
3042 rx_buffer->m_head = m;
3043
3044 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr);
87307ba1 3045 return (0);
984263bc
MD
3046}
3047
984263bc 3048static int
9c80d176 3049em_create_rx_ring(struct adapter *adapter)
984263bc 3050{
9c80d176 3051 device_t dev = adapter->dev;
9ccd8c1f 3052 struct em_buffer *rx_buffer;
9c80d176
SZ
3053 int i, error;
3054
3055 adapter->rx_buffer_area =
3056 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc,
3057 M_DEVBUF, M_WAITOK | M_ZERO);
9ccd8c1f 3058
9c80d176
SZ
3059 /*
3060 * Create DMA tag for rx buffers
3061 */
3062 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */
3063 1, 0, /* alignment, bounds */
3064 BUS_SPACE_MAXADDR, /* lowaddr */
3065 BUS_SPACE_MAXADDR, /* highaddr */
3066 NULL, NULL, /* filter, filterarg */
3067 MCLBYTES, /* maxsize */
3068 1, /* nsegments */
3069 MCLBYTES, /* maxsegsize */
3070 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */
3071 &adapter->rxtag);
87307ba1 3072 if (error) {
9c80d176
SZ
3073 device_printf(dev, "Unable to allocate RX DMA tag\n");
3074 kfree(adapter->rx_buffer_area, M_DEVBUF);
3075 adapter->rx_buffer_area = NULL;
3076 return error;
3077 }
3078
3079 /*
3080 * Create spare DMA map for rx buffers
3081 */
3082 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
3083 &adapter->rx_sparemap);
3084 if (error) {
3085 device_printf(dev, "Unable to create spare RX DMA map\n");
3086 bus_dma_tag_destroy(adapter->rxtag);
3087 kfree(adapter->rx_buffer_area, M_DEVBUF);
3088 adapter->rx_buffer_area = NULL;
3089 return error;
9ccd8c1f 3090 }
9c80d176
SZ
3091
3092 /*
3093 * Create DMA maps for rx buffers
3094 */
3095 for (i = 0; i < adapter->num_rx_desc; i++) {
3096 rx_buffer = &adapter->rx_buffer_area[i];
3097
3098 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK,
9ccd8c1f 3099 &rx_buffer->map);
87307ba1 3100 if (error) {
9c80d176
SZ
3101 device_printf(dev, "Unable to create RX DMA map\n");
3102 em_destroy_rx_ring(adapter, i);
3103 return error;
9ccd8c1f 3104 }
984263bc 3105 }
87307ba1 3106 return (0);
984263bc
MD
3107}
3108
984263bc 3109static int
9c80d176 3110em_init_rx_ring(struct adapter *adapter)
984263bc 3111{
9c80d176 3112 int i, error;
984263bc 3113
9c80d176 3114 /* Reset descriptor ring */
87307ba1 3115 bzero(adapter->rx_desc_base,
9c80d176 3116 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc);
87307ba1 3117
9c80d176
SZ
3118 /* Allocate new ones. */
3119 for (i = 0; i < adapter->num_rx_desc; i++) {
3120 error = em_newbuf(adapter, i, 1);
3121 if (error)
3122 return (error);
3123 }
984263bc
MD
3124
3125 /* Setup our descriptor pointers */
f647ad3d 3126 adapter->next_rx_desc_to_check = 0;
87307ba1
SZ
3127
3128 return (0);
984263bc
MD
3129}
3130
984263bc 3131static void
9c80d176 3132em_init_rx_unit(struct adapter *adapter)
984263bc 3133{
9c80d176 3134 struct ifnet *ifp = &adapter->arpcom.ac_if;
f647ad3d 3135 uint64_t bus_addr;
2d0e5700 3136 uint32_t rctl;
984263bc 3137
87307ba1
SZ
3138 /*
3139 * Make sure receives are disabled while setting
3140 * up the descriptor ring
3141 */
9c80d176
SZ
3142 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
3143 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
984263bc 3144
9c80d176 3145 if (adapter->hw.mac.type >= e1000_82540) {
2d0e5700
SZ
3146 uint32_t itr;
3147
9c80d176
SZ
3148 /*
3149 * Set the interrupt throttling rate. Value is calculated
3150 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns)
3151 */
2d0e5700
SZ
3152 if (adapter->int_throttle_ceil)
3153 itr = 1000000000 / 256 / adapter->int_throttle_ceil;
3154 else
3155 itr = 0;
3156 em_set_itr(adapter, itr);
f647ad3d 3157 }
984263bc 3158
9c80d176
SZ
3159 /* Disable accelerated ackknowledge */
3160 if (adapter->hw.mac.type == e1000_82574) {
3161 E1000_WRITE_REG(&adapter->hw,
3162 E1000_RFCTL, E1000_RFCTL_ACK_DIS);
3163 }
3164
2d0e5700
SZ
3165 /* Receive Checksum Offload for TCP and UDP */
3166 if (ifp->if_capenable & IFCAP_RXCSUM) {
3167 uint32_t rxcsum;
3168
3169 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM);
3170 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
3171 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum);
3172 }
3173
3174 /*
3175 * XXX TEMPORARY WORKAROUND: on some systems with 82573
3176 * long latencies are observed, like Lenovo X60. This
3177 * change eliminates the problem, but since having positive
3178 * values in RDTR is a known source of problems on other
3179 * platforms another solution is being sought.
3180 */
3181 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) {
3182 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573);
3183 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573);
3184 }
3185
3186 /*
3187 * Setup the Base and Length of the Rx Descriptor Ring
3188 */
9ccd8c1f 3189 bus_addr = adapter->rxdma.dma_paddr;
9c80d176
SZ
3190 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0),
3191 adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
3192 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0),
3193 (uint32_t)(bus_addr >> 32));
3194 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
3195 (uint32_t)bus_addr);
984263bc 3196
2d0e5700
SZ
3197 /*
3198 * Setup the HW Rx Head and Tail Descriptor Pointers
3199 */
3200 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
3201 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1);
3202
3203 /* Set early receive threshold on appropriate hw */
3204 if (((adapter->hw.mac.type == e1000_ich9lan) ||
3205 (adapter->hw.mac.type == e1000_pch2lan) ||
3206 (adapter->hw.mac.type == e1000_ich10lan)) &&
3207 (ifp->if_mtu > ETHERMTU)) {
3208 uint32_t rxdctl;
3209
3210 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0));
3211 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl | 3);
3212 E1000_WRITE_REG(&adapter->hw, E1000_ERT, 0x100 | (1 << 13));
3213 }
3214
3215 if (adapter->hw.mac.type == e1000_pch2lan) {
3216 if (ifp->if_mtu > ETHERMTU)
3217 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE);
3218 else
3219 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE);
3220 }
3221
984263bc 3222 /* Setup the Receive Control Register */
9c80d176
SZ
3223 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3224 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
3225 E1000_RCTL_RDMTS_HALF |
3226 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
984263bc 3227
9c80d176
SZ
3228 /* Make sure VLAN Filters are off */
3229 rctl &= ~E1000_RCTL_VFE;
3230
3231 if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
3232 rctl |= E1000_RCTL_SBP;
3233 else
3234 rctl &= ~E1000_RCTL_SBP;
984263bc 3235
984263bc
MD
3236 switch (adapter->rx_buffer_len) {
3237 default:
9c80d176
SZ
3238 case 2048:
3239 rctl |= E1000_RCTL_SZ_2048;
3240 break;
3241
3242 case 4096:
3243 rctl |= E1000_RCTL_SZ_4096 |
3244 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3245 break;
9c80d176
SZ
3246
3247 case 8192:
3248 rctl |= E1000_RCTL_SZ_8192 |
3249 E1000_RCTL_BSEX | E1000_RCTL_LPE;
984263bc 3250 break;
9c80d176
SZ
3251
3252 case 16384:
3253 rctl |= E1000_RCTL_SZ_16384 |
3254 E1000_RCTL_BSEX | E1000_RCTL_LPE;