2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
48 * BCM5709S A0, A1, B0, B1, B2, C0
51 * Note about MSI-X on 5709/5716:
52 * - 9 MSI-X vectors are supported.
53 * - MSI-X vectors, RX/TX rings and status blocks' association
55 * o The first RX ring and the first TX ring use the first
57 * o The first MSI-X vector is associated with the first
59 * o The second RX ring and the second TX ring use the second
61 * o The second MSI-X vector is associated with the second
65 * - Status blocks must reside in physically contiguous memory
66 * and each status block consumes 128bytes. In addition to
67 * this, the memory for the status blocks is aligned on 128bytes
68 * in this driver. (see bce_dma_alloc() and HC_CONFIG)
69 * - Each status block has its own coalesce parameters, which also
70 * serve as the related MSI-X vector's interrupt moderation
71 * parameters. (see bce_coal_change())
75 #include "opt_ifpoll.h"
77 #include <sys/param.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
95 #include <net/ethernet.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/toeplitz.h>
104 #include <net/toeplitz2.h>
105 #include <net/vlan/if_vlan_var.h>
106 #include <net/vlan/if_vlan_ether.h>
108 #include <dev/netif/mii_layer/mii.h>
109 #include <dev/netif/mii_layer/miivar.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
115 #include "miibus_if.h"
117 #include <dev/netif/bce/if_bcereg.h>
118 #include <dev/netif/bce/if_bcefw.h>
120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */
123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
125 if (sc->rss_debug >= lvl) \
126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
128 #else /* !BCE_RSS_DEBUG */
129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
130 #endif /* BCE_RSS_DEBUG */
132 /****************************************************************************/
133 /* PCI Device ID Table */
135 /* Used by bce_probe() to identify the devices supported by this driver. */
136 /****************************************************************************/
137 #define BCE_DEVDESC_MAX 64
139 static struct bce_type bce_devs[] = {
140 /* BCM5706C Controllers and OEM boards. */
141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
142 "HP NC370T Multifunction Gigabit Server Adapter" },
143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
144 "HP NC370i Multifunction Gigabit Server Adapter" },
145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
148 "HP NC371i Multifunction Gigabit Server Adapter" },
149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
150 "Broadcom NetXtreme II BCM5706 1000Base-T" },
152 /* BCM5706S controllers and OEM boards. */
153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
154 "HP NC370F Multifunction Gigabit Server Adapter" },
155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
156 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
158 /* BCM5708C controllers and OEM boards. */
159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
160 "HP NC373T PCIe Multifunction Gig Server Adapter" },
161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
162 "HP NC373i Multifunction Gigabit Server Adapter" },
163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
164 "HP NC374m PCIe Multifunction Adapter" },
165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
166 "Broadcom NetXtreme II BCM5708 1000Base-T" },
168 /* BCM5708S controllers and OEM boards. */
169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
170 "HP NC373m Multifunction Gigabit Server Adapter" },
171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
172 "HP NC373i Multifunction Gigabit Server Adapter" },
173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
174 "HP NC373F PCIe Multifunc Giga Server Adapter" },
175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
176 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
178 /* BCM5709C controllers and OEM boards. */
179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
180 "HP NC382i DP Multifunction Gigabit Server Adapter" },
181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
184 "Broadcom NetXtreme II BCM5709 1000Base-T" },
186 /* BCM5709S controllers and OEM boards. */
187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
190 "HP NC382i DP Multifunction Gigabit Server Adapter" },
191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
192 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
194 /* BCM5716 controllers and OEM boards. */
195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
196 "Broadcom NetXtreme II BCM5716 1000Base-T" },
201 /****************************************************************************/
202 /* Supported Flash NVRAM device data. */
203 /****************************************************************************/
204 static const struct flash_spec flash_table[] =
206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
207 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
214 /* Expansion entry 0001 */
215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 /* Saifun SA25F010 (non-buffered flash) */
220 /* strap, cfg1, & write1 need updates */
221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
224 "Non-buffered flash (128kB)"},
225 /* Saifun SA25F020 (non-buffered flash) */
226 /* strap, cfg1, & write1 need updates */
227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
230 "Non-buffered flash (256kB)"},
231 /* Expansion entry 0100 */
232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
246 /* Saifun SA25F005 (non-buffered flash) */
247 /* strap, cfg1, & write1 need updates */
248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
251 "Non-buffered flash (64kB)"},
253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
257 /* Expansion entry 1001 */
258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 /* Expansion entry 1010 */
263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
267 /* ATMEL AT45DB011B (buffered flash) */
268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
271 "Buffered flash (128kB)"},
272 /* Expansion entry 1100 */
273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 /* Expansion entry 1101 */
278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
282 /* Ateml Expansion entry 1110 */
283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
286 "Entry 1110 (Atmel)"},
287 /* ATMEL AT45DB021B (buffered flash) */
288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
291 "Buffered flash (256kB)"},
295 * The BCM5709 controllers transparently handle the
296 * differences between Atmel 264 byte pages and all
297 * flash devices which use 256 byte pages, so no
298 * logical-to-physical mapping is required in the
301 static struct flash_spec flash_5709 = {
302 .flags = BCE_NV_BUFFERED,
303 .page_bits = BCM5709_FLASH_PAGE_BITS,
304 .page_size = BCM5709_FLASH_PAGE_SIZE,
305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
307 .name = "5709/5716 buffered flash (256kB)",
310 /****************************************************************************/
311 /* DragonFly device entry points. */
312 /****************************************************************************/
313 static int bce_probe(device_t);
314 static int bce_attach(device_t);
315 static int bce_detach(device_t);
316 static void bce_shutdown(device_t);
317 static int bce_miibus_read_reg(device_t, int, int);
318 static int bce_miibus_write_reg(device_t, int, int, int);
319 static void bce_miibus_statchg(device_t);
321 /****************************************************************************/
322 /* BCE Register/Memory Access Routines */
323 /****************************************************************************/
324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
327 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
330 /****************************************************************************/
331 /* BCE NVRAM Access Routines */
332 /****************************************************************************/
333 static int bce_acquire_nvram_lock(struct bce_softc *);
334 static int bce_release_nvram_lock(struct bce_softc *);
335 static void bce_enable_nvram_access(struct bce_softc *);
336 static void bce_disable_nvram_access(struct bce_softc *);
337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
339 static int bce_init_nvram(struct bce_softc *);
340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
341 static int bce_nvram_test(struct bce_softc *);
343 /****************************************************************************/
344 /* BCE DMA Allocate/Free Routines */
345 /****************************************************************************/
346 static int bce_dma_alloc(struct bce_softc *);
347 static void bce_dma_free(struct bce_softc *);
348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
350 /****************************************************************************/
351 /* BCE Firmware Synchronization and Load */
352 /****************************************************************************/
353 static int bce_fw_sync(struct bce_softc *, uint32_t);
354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *);
359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
360 static void bce_start_rxp_cpu(struct bce_softc *);
361 static void bce_init_rxp_cpu(struct bce_softc *);
362 static void bce_init_txp_cpu(struct bce_softc *);
363 static void bce_init_tpat_cpu(struct bce_softc *);
364 static void bce_init_cp_cpu(struct bce_softc *);
365 static void bce_init_com_cpu(struct bce_softc *);
366 static void bce_init_cpus(struct bce_softc *);
367 static void bce_setup_msix_table(struct bce_softc *);
368 static void bce_init_rss(struct bce_softc *);
370 static void bce_stop(struct bce_softc *);
371 static int bce_reset(struct bce_softc *, uint32_t);
372 static int bce_chipinit(struct bce_softc *);
373 static int bce_blockinit(struct bce_softc *);
374 static void bce_probe_pci_caps(struct bce_softc *);
375 static void bce_print_adapter_info(struct bce_softc *);
376 static void bce_get_media(struct bce_softc *);
377 static void bce_mgmt_init(struct bce_softc *);
378 static int bce_init_ctx(struct bce_softc *);
379 static void bce_get_mac_addr(struct bce_softc *);
380 static void bce_set_mac_addr(struct bce_softc *);
381 static void bce_set_rx_mode(struct bce_softc *);
382 static void bce_coal_change(struct bce_softc *);
383 static void bce_npoll_coal_change(struct bce_softc *);
384 static void bce_setup_serialize(struct bce_softc *);
385 static void bce_serialize_skipmain(struct bce_softc *);
386 static void bce_deserialize_skipmain(struct bce_softc *);
387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t);
388 static int bce_alloc_intr(struct bce_softc *);
389 static void bce_free_intr(struct bce_softc *);
390 static void bce_try_alloc_msix(struct bce_softc *);
391 static void bce_free_msix(struct bce_softc *, boolean_t);
392 static void bce_setup_ring_cnt(struct bce_softc *);
393 static int bce_setup_intr(struct bce_softc *);
394 static void bce_teardown_intr(struct bce_softc *);
395 static int bce_setup_msix(struct bce_softc *);
396 static void bce_teardown_msix(struct bce_softc *, int);
398 static int bce_create_tx_ring(struct bce_tx_ring *);
399 static void bce_destroy_tx_ring(struct bce_tx_ring *);
400 static void bce_init_tx_context(struct bce_tx_ring *);
401 static int bce_init_tx_chain(struct bce_tx_ring *);
402 static void bce_free_tx_chain(struct bce_tx_ring *);
403 static void bce_xmit(struct bce_tx_ring *);
404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
406 uint16_t *, uint16_t *);
408 static int bce_create_rx_ring(struct bce_rx_ring *);
409 static void bce_destroy_rx_ring(struct bce_rx_ring *);
410 static void bce_init_rx_context(struct bce_rx_ring *);
411 static int bce_init_rx_chain(struct bce_rx_ring *);
412 static void bce_free_rx_chain(struct bce_rx_ring *);
413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
418 const struct l2_fhdr *);
420 static void bce_start(struct ifnet *, struct ifaltq_subque *);
421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
422 static void bce_watchdog(struct ifaltq_subque *);
423 static int bce_ifmedia_upd(struct ifnet *);
424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
425 static void bce_init(void *);
427 static void bce_npoll(struct ifnet *, struct ifpoll_info *);
428 static void bce_npoll_rx(struct ifnet *, void *, int);
429 static void bce_npoll_tx(struct ifnet *, void *, int);
430 static void bce_npoll_status(struct ifnet *);
431 static void bce_npoll_rx_pack(struct ifnet *, void *, int);
433 static void bce_serialize(struct ifnet *, enum ifnet_serialize);
434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize);
435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize);
437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
441 static void bce_intr(struct bce_softc *);
442 static void bce_intr_legacy(void *);
443 static void bce_intr_msi(void *);
444 static void bce_intr_msi_oneshot(void *);
445 static void bce_intr_msix_rxtx(void *);
446 static void bce_intr_msix_rx(void *);
447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t);
448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
449 static void bce_phy_intr(struct bce_softc *);
450 static void bce_disable_intr(struct bce_softc *);
451 static void bce_enable_intr(struct bce_softc *);
452 static void bce_reenable_intr(struct bce_rx_ring *);
453 static void bce_check_msi(void *);
455 static void bce_stats_update(struct bce_softc *);
456 static void bce_tick(void *);
457 static void bce_tick_serialized(struct bce_softc *);
458 static void bce_pulse(void *);
460 static void bce_add_sysctls(struct bce_softc *);
461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
473 uint32_t *, uint32_t);
477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
478 * takes 1023 as the TX ticks limit. However, using 1023 will
479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
480 * there is _no_ network activity on the NIC.
482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */
488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */
489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */
491 static int bce_tx_wreg = 8;
493 static int bce_msi_enable = 1;
494 static int bce_msix_enable = 1;
496 static int bce_rx_pages = RX_PAGES_DEFAULT;
497 static int bce_tx_pages = TX_PAGES_DEFAULT;
499 static int bce_rx_rings = 0; /* auto */
500 static int bce_tx_rings = 0; /* auto */
502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
518 /****************************************************************************/
519 /* DragonFly device dispatch table. */
520 /****************************************************************************/
521 static device_method_t bce_methods[] = {
522 /* Device interface */
523 DEVMETHOD(device_probe, bce_probe),
524 DEVMETHOD(device_attach, bce_attach),
525 DEVMETHOD(device_detach, bce_detach),
526 DEVMETHOD(device_shutdown, bce_shutdown),
529 DEVMETHOD(bus_print_child, bus_generic_print_child),
530 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
535 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
540 static driver_t bce_driver = {
543 sizeof(struct bce_softc)
546 static devclass_t bce_devclass;
548 DECLARE_DUMMY_MODULE(if_bce);
549 MODULE_DEPEND(bce, miibus, 1, 1, 1);
550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
553 /****************************************************************************/
554 /* Device probe function. */
556 /* Compares the device to the driver's list of supported devices and */
557 /* reports back to the OS whether this is the right driver for the device. */
560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
561 /****************************************************************************/
563 bce_probe(device_t dev)
566 uint16_t vid, did, svid, sdid;
568 /* Get the data for the device to be probed. */
569 vid = pci_get_vendor(dev);
570 did = pci_get_device(dev);
571 svid = pci_get_subvendor(dev);
572 sdid = pci_get_subdevice(dev);
574 /* Look through the list of known devices for a match. */
575 for (t = bce_devs; t->bce_name != NULL; ++t) {
576 if (vid == t->bce_vid && did == t->bce_did &&
577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
584 /* Print out the device identity. */
585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
589 device_set_desc_copy(dev, descbuf);
590 kfree(descbuf, M_TEMP);
597 /****************************************************************************/
598 /* PCI Capabilities Probe Function. */
600 /* Walks the PCI capabiites list for the device to find what features are */
605 /****************************************************************************/
607 bce_print_adapter_info(struct bce_softc *sc)
609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
615 if (sc->bce_flags & BCE_PCIE_FLAG) {
616 kprintf("Bus (PCIe x%d, ", sc->link_width);
617 switch (sc->link_speed) {
619 kprintf("2.5Gbps); ");
625 kprintf("Unknown link speed); ");
629 kprintf("Bus (PCI%s, %s, %dMHz); ",
630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
635 /* Firmware version and device features. */
636 kprintf("B/C (%s)", sc->bce_bc_ver);
638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
642 kprintf("MFW[%s]", sc->bce_mfw_ver);
643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
650 /****************************************************************************/
651 /* PCI Capabilities Probe Function. */
653 /* Walks the PCI capabiites list for the device to find what features are */
658 /****************************************************************************/
660 bce_probe_pci_caps(struct bce_softc *sc)
662 device_t dev = sc->bce_dev;
665 if (pci_is_pcix(dev))
666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
668 ptr = pci_get_pciecap_ptr(dev);
670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
672 sc->link_speed = link_status & 0xf;
673 sc->link_width = (link_status >> 4) & 0x3f;
674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
675 sc->bce_flags |= BCE_PCIE_FLAG;
679 /****************************************************************************/
680 /* Device attach function. */
682 /* Allocates device resources, performs secondary chip identification, */
683 /* resets and initializes the hardware, and initializes driver instance */
687 /* 0 on success, positive value on failure. */
688 /****************************************************************************/
690 bce_attach(device_t dev)
692 struct bce_softc *sc = device_get_softc(dev);
693 struct ifnet *ifp = &sc->arpcom.ac_if;
697 struct mii_probe_args mii_args;
698 uintptr_t mii_priv = 0;
700 int offset, offset_def;
704 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
706 lwkt_serialize_init(&sc->main_serialize);
707 for (i = 0; i < BCE_MSIX_MAX; ++i) {
708 struct bce_msix_data *msix = &sc->bce_msix[i];
710 msix->msix_cpuid = -1;
714 pci_enable_busmaster(dev);
716 bce_probe_pci_caps(sc);
718 /* Allocate PCI memory resources. */
720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
721 RF_ACTIVE | PCI_RF_DENSE);
722 if (sc->bce_res_mem == NULL) {
723 device_printf(dev, "PCI memory allocation failed\n");
726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
730 * Configure byte swap and enable indirect register access.
731 * Rely on CPU to do target byte swapping on big endian systems.
732 * Access to registers outside of PCI configurtion space are not
733 * valid until this is done.
735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
739 /* Save ASIC revsion info. */
740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
742 /* Weed out any non-production controller revisions. */
743 switch (BCE_CHIP_ID(sc)) {
744 case BCE_CHIP_ID_5706_A0:
745 case BCE_CHIP_ID_5706_A1:
746 case BCE_CHIP_ID_5708_A0:
747 case BCE_CHIP_ID_5708_B0:
748 case BCE_CHIP_ID_5709_A0:
749 case BCE_CHIP_ID_5709_B0:
750 case BCE_CHIP_ID_5709_B1:
752 /* 5709C B2 seems to work fine */
753 case BCE_CHIP_ID_5709_B2:
755 device_printf(dev, "Unsupported chip id 0x%08x!\n",
761 mii_priv |= BRGPHY_FLAG_WIRESPEED;
762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
767 mii_priv |= BRGPHY_FLAG_BER_BUG;
771 * Find the base address for shared memory access.
772 * Newer versions of bootcode use a signature and offset
773 * while older versions use a fixed address.
775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
777 BCE_SHM_HDR_SIGNATURE_SIG) {
778 /* Multi-port devices use different offsets in shared memory. */
779 sc->bce_shmem_base = REG_RD_IND(sc,
780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
785 /* Fetch the bootcode revision. */
786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
787 for (i = 0, j = 0; i < 3; i++) {
791 num = (uint8_t)(val >> (24 - (i * 8)));
792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
793 if (num >= k || !skip0 || k == 1) {
794 sc->bce_bc_ver[j++] = (num / k) + '0';
799 sc->bce_bc_ver[j++] = '.';
802 /* Check if any management firwmare is running. */
803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
807 /* Allow time for firmware to enter the running state. */
808 for (i = 0; i < 30; i++) {
809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
810 if (val & BCE_CONDITION_MFW_RUN_MASK)
816 /* Check the current bootcode state. */
817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
818 BCE_CONDITION_MFW_RUN_MASK;
819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
820 val != BCE_CONDITION_MFW_RUN_NONE) {
821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
823 for (i = 0, j = 0; j < 3; j++) {
824 val = bce_reg_rd_ind(sc, addr + j * 4);
826 memcpy(&sc->bce_mfw_ver[i], &val, 4);
831 /* Get PCI bus information (speed and type). */
832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
836 sc->bce_flags |= BCE_PCIX_FLAG;
838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
842 sc->bus_speed_mhz = 133;
845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
846 sc->bus_speed_mhz = 100;
849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
851 sc->bus_speed_mhz = 66;
854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
856 sc->bus_speed_mhz = 50;
859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
862 sc->bus_speed_mhz = 33;
866 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
867 sc->bus_speed_mhz = 66;
869 sc->bus_speed_mhz = 33;
872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
873 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
875 /* Reset the controller. */
876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
880 /* Initialize the controller. */
881 rc = bce_chipinit(sc);
883 device_printf(dev, "Controller initialization failed!\n");
887 /* Perform NVRAM test. */
888 rc = bce_nvram_test(sc);
890 device_printf(dev, "NVRAM test failed!\n");
894 /* Fetch the permanent Ethernet MAC address. */
895 bce_get_mac_addr(sc);
898 * Trip points control how many BDs
899 * should be ready before generating an
900 * interrupt while ticks control how long
901 * a BD can sit in the chain before
902 * generating an interrupt. Set the default
903 * values for the RX and TX rings.
907 /* Force more frequent interrupts. */
908 sc->bce_tx_quick_cons_trip_int = 1;
909 sc->bce_tx_quick_cons_trip = 1;
910 sc->bce_tx_ticks_int = 0;
911 sc->bce_tx_ticks = 0;
913 sc->bce_rx_quick_cons_trip_int = 1;
914 sc->bce_rx_quick_cons_trip = 1;
915 sc->bce_rx_ticks_int = 0;
916 sc->bce_rx_ticks = 0;
918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
919 sc->bce_tx_quick_cons_trip = bce_tx_bds;
920 sc->bce_tx_ticks_int = bce_tx_ticks_int;
921 sc->bce_tx_ticks = bce_tx_ticks;
923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
924 sc->bce_rx_quick_cons_trip = bce_rx_bds;
925 sc->bce_rx_ticks_int = bce_rx_ticks_int;
926 sc->bce_rx_ticks = bce_rx_ticks;
929 /* Update statistics once every second. */
930 sc->bce_stats_ticks = 1000000 & 0xffff00;
932 /* Find the media type for the adapter. */
935 /* Find out RX/TX ring count */
936 bce_setup_ring_cnt(sc);
938 /* Allocate DMA memory resources. */
939 rc = bce_dma_alloc(sc);
941 device_printf(dev, "DMA resource allocation failed!\n");
947 * NPOLLING RX/TX CPU offset
949 if (sc->rx_ring_cnt2 == ncpus2) {
952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2;
953 offset = device_getenv_int(dev, "npoll.offset", offset_def);
954 if (offset >= ncpus2 ||
955 offset % sc->rx_ring_cnt2 != 0) {
956 device_printf(dev, "invalid npoll.offset %d, use %d\n",
961 sc->npoll_ofs = offset;
964 /* Allocate PCI IRQ resources. */
965 rc = bce_alloc_intr(sc);
969 /* Setup serializer */
970 bce_setup_serialize(sc);
972 /* Initialize the ifnet interface. */
974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975 ifp->if_ioctl = bce_ioctl;
976 ifp->if_start = bce_start;
977 ifp->if_init = bce_init;
978 ifp->if_serialize = bce_serialize;
979 ifp->if_deserialize = bce_deserialize;
980 ifp->if_tryserialize = bce_tryserialize;
982 ifp->if_serialize_assert = bce_serialize_assert;
985 ifp->if_npoll = bce_npoll;
988 ifp->if_mtu = ETHERMTU;
989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
990 ifp->if_capabilities = BCE_IF_CAPABILITIES;
991 if (sc->rx_ring_cnt > 1)
992 ifp->if_capabilities |= IFCAP_RSS;
993 ifp->if_capenable = ifp->if_capabilities;
995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
996 ifp->if_baudrate = IF_Gbps(2.5);
998 ifp->if_baudrate = IF_Gbps(1);
1000 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
1001 ifq_set_ready(&ifp->if_snd);
1002 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
1004 if (sc->tx_ring_cnt > 1) {
1005 ifp->if_mapsubq = ifq_mapsubq_mask;
1006 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1);
1012 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
1013 mii_args.mii_probemask = 1 << sc->bce_phy_addr;
1014 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
1015 mii_args.mii_priv = mii_priv;
1017 rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
1019 device_printf(dev, "PHY probe failed!\n");
1023 /* Attach to the Ethernet interface list. */
1024 ether_ifattach(ifp, sc->eaddr, NULL);
1026 /* Setup TX rings and subqueues */
1027 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1028 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1029 struct bce_tx_ring *txr = &sc->tx_rings[i];
1031 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1032 ifsq_set_priv(ifsq, txr);
1033 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1036 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1039 callout_init_mp(&sc->bce_tick_callout);
1040 callout_init_mp(&sc->bce_pulse_callout);
1041 callout_init_mp(&sc->bce_ckmsi_callout);
1043 rc = bce_setup_intr(sc);
1045 device_printf(dev, "Failed to setup IRQ!\n");
1046 ether_ifdetach(ifp);
1050 /* Set timer CPUID */
1051 bce_set_timer_cpuid(sc, FALSE);
1053 /* Add the supported sysctls to the kernel. */
1054 bce_add_sysctls(sc);
1057 * The chip reset earlier notified the bootcode that
1058 * a driver is present. We now need to start our pulse
1059 * routine so that the bootcode is reminded that we're
1064 /* Get the firmware running so IPMI still works */
1068 bce_print_adapter_info(sc);
1076 /****************************************************************************/
1077 /* Device detach function. */
1079 /* Stops the controller, resets the controller, and releases resources. */
1082 /* 0 on success, positive value on failure. */
1083 /****************************************************************************/
1085 bce_detach(device_t dev)
1087 struct bce_softc *sc = device_get_softc(dev);
1089 if (device_is_attached(dev)) {
1090 struct ifnet *ifp = &sc->arpcom.ac_if;
1093 ifnet_serialize_all(ifp);
1095 /* Stop and reset the controller. */
1096 callout_stop(&sc->bce_pulse_callout);
1098 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1099 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1101 msg = BCE_DRV_MSG_CODE_UNLOAD;
1104 bce_teardown_intr(sc);
1106 ifnet_deserialize_all(ifp);
1108 ether_ifdetach(ifp);
1111 /* If we have a child device on the MII bus remove it too. */
1113 device_delete_child(dev, sc->bce_miibus);
1114 bus_generic_detach(dev);
1118 if (sc->bce_res_mem != NULL) {
1119 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1125 if (sc->serializes != NULL)
1126 kfree(sc->serializes, M_DEVBUF);
1131 /****************************************************************************/
1132 /* Device shutdown function. */
1134 /* Stops and resets the controller. */
1138 /****************************************************************************/
1140 bce_shutdown(device_t dev)
1142 struct bce_softc *sc = device_get_softc(dev);
1143 struct ifnet *ifp = &sc->arpcom.ac_if;
1146 ifnet_serialize_all(ifp);
1149 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1150 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1152 msg = BCE_DRV_MSG_CODE_UNLOAD;
1155 ifnet_deserialize_all(ifp);
1158 /****************************************************************************/
1159 /* Indirect register read. */
1161 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1162 /* configuration space. Using this mechanism avoids issues with posted */
1163 /* reads but is much slower than memory-mapped I/O. */
1166 /* The value of the register. */
1167 /****************************************************************************/
1169 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1171 device_t dev = sc->bce_dev;
1173 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1174 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1177 /****************************************************************************/
1178 /* Indirect register write. */
1180 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1181 /* configuration space. Using this mechanism avoids issues with posted */
1182 /* writes but is muchh slower than memory-mapped I/O. */
1186 /****************************************************************************/
1188 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1190 device_t dev = sc->bce_dev;
1192 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1193 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1196 /****************************************************************************/
1197 /* Shared memory write. */
1199 /* Writes NetXtreme II shared memory region. */
1203 /****************************************************************************/
1205 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1207 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1210 /****************************************************************************/
1211 /* Shared memory read. */
1213 /* Reads NetXtreme II shared memory region. */
1216 /* The 32 bit value read. */
1217 /****************************************************************************/
1219 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1221 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1224 /****************************************************************************/
1225 /* Context memory write. */
1227 /* The NetXtreme II controller uses context memory to track connection */
1228 /* information for L2 and higher network protocols. */
1232 /****************************************************************************/
1234 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1237 uint32_t idx, offset = ctx_offset + cid_addr;
1238 uint32_t val, retry_cnt = 5;
1240 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1241 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1242 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1243 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1245 for (idx = 0; idx < retry_cnt; idx++) {
1246 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1247 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1252 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1253 device_printf(sc->bce_dev,
1254 "Unable to write CTX memory: "
1255 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1256 cid_addr, ctx_offset);
1259 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1260 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1264 /****************************************************************************/
1265 /* PHY register read. */
1267 /* Implements register reads on the MII bus. */
1270 /* The value of the register. */
1271 /****************************************************************************/
1273 bce_miibus_read_reg(device_t dev, int phy, int reg)
1275 struct bce_softc *sc = device_get_softc(dev);
1279 /* Make sure we are accessing the correct PHY address. */
1280 KASSERT(phy == sc->bce_phy_addr,
1281 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1283 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1284 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1285 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1287 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1288 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1293 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1294 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1295 BCE_EMAC_MDIO_COMM_START_BUSY;
1296 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1298 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1301 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1302 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1305 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1306 val &= BCE_EMAC_MDIO_COMM_DATA;
1311 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1312 if_printf(&sc->arpcom.ac_if,
1313 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1317 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1320 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1321 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1322 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1324 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1325 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1329 return (val & 0xffff);
1332 /****************************************************************************/
1333 /* PHY register write. */
1335 /* Implements register writes on the MII bus. */
1338 /* The value of the register. */
1339 /****************************************************************************/
1341 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1343 struct bce_softc *sc = device_get_softc(dev);
1347 /* Make sure we are accessing the correct PHY address. */
1348 KASSERT(phy == sc->bce_phy_addr,
1349 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1351 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1352 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1353 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1355 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1356 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1361 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1362 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1363 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1364 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1366 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1369 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1370 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1376 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1377 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1379 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1380 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1381 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1383 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1384 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1391 /****************************************************************************/
1392 /* MII bus status change. */
1394 /* Called by the MII bus driver when the PHY establishes link to set the */
1395 /* MAC interface registers. */
1399 /****************************************************************************/
1401 bce_miibus_statchg(device_t dev)
1403 struct bce_softc *sc = device_get_softc(dev);
1404 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1406 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1409 * Set MII or GMII interface based on the speed negotiated
1412 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1413 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1414 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1416 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1420 * Set half or full duplex based on the duplicity negotiated
1423 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1424 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1426 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1430 /****************************************************************************/
1431 /* Acquire NVRAM lock. */
1433 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1434 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1435 /* for use by the driver. */
1438 /* 0 on success, positive value on failure. */
1439 /****************************************************************************/
1441 bce_acquire_nvram_lock(struct bce_softc *sc)
1446 /* Request access to the flash interface. */
1447 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1448 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1449 val = REG_RD(sc, BCE_NVM_SW_ARB);
1450 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1456 if (j >= NVRAM_TIMEOUT_COUNT) {
1462 /****************************************************************************/
1463 /* Release NVRAM lock. */
1465 /* When the caller is finished accessing NVRAM the lock must be released. */
1466 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1467 /* for use by the driver. */
1470 /* 0 on success, positive value on failure. */
1471 /****************************************************************************/
1473 bce_release_nvram_lock(struct bce_softc *sc)
1479 * Relinquish nvram interface.
1481 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1483 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1484 val = REG_RD(sc, BCE_NVM_SW_ARB);
1485 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1491 if (j >= NVRAM_TIMEOUT_COUNT) {
1497 /****************************************************************************/
1498 /* Enable NVRAM access. */
1500 /* Before accessing NVRAM for read or write operations the caller must */
1501 /* enabled NVRAM access. */
1505 /****************************************************************************/
1507 bce_enable_nvram_access(struct bce_softc *sc)
1511 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1512 /* Enable both bits, even on read. */
1513 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1514 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1517 /****************************************************************************/
1518 /* Disable NVRAM access. */
1520 /* When the caller is finished accessing NVRAM access must be disabled. */
1524 /****************************************************************************/
1526 bce_disable_nvram_access(struct bce_softc *sc)
1530 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1532 /* Disable both bits, even after read. */
1533 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1534 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1537 /****************************************************************************/
1538 /* Read a dword (32 bits) from NVRAM. */
1540 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1541 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1544 /* 0 on success and the 32 bit value read, positive value on failure. */
1545 /****************************************************************************/
1547 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1553 /* Build the command word. */
1554 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1556 /* Calculate the offset for buffered flash. */
1557 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1558 offset = ((offset / sc->bce_flash_info->page_size) <<
1559 sc->bce_flash_info->page_bits) +
1560 (offset % sc->bce_flash_info->page_size);
1564 * Clear the DONE bit separately, set the address to read,
1565 * and issue the read.
1567 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1568 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1569 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1571 /* Wait for completion. */
1572 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1577 val = REG_RD(sc, BCE_NVM_COMMAND);
1578 if (val & BCE_NVM_COMMAND_DONE) {
1579 val = REG_RD(sc, BCE_NVM_READ);
1582 memcpy(ret_val, &val, 4);
1587 /* Check for errors. */
1588 if (i >= NVRAM_TIMEOUT_COUNT) {
1589 if_printf(&sc->arpcom.ac_if,
1590 "Timeout error reading NVRAM at offset 0x%08X!\n",
1597 /****************************************************************************/
1598 /* Initialize NVRAM access. */
1600 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1601 /* access that device. */
1604 /* 0 on success, positive value on failure. */
1605 /****************************************************************************/
1607 bce_init_nvram(struct bce_softc *sc)
1610 int j, entry_count, rc = 0;
1611 const struct flash_spec *flash;
1613 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1614 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1615 sc->bce_flash_info = &flash_5709;
1616 goto bce_init_nvram_get_flash_size;
1619 /* Determine the selected interface. */
1620 val = REG_RD(sc, BCE_NVM_CFG1);
1622 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1625 * Flash reconfiguration is required to support additional
1626 * NVRAM devices not directly supported in hardware.
1627 * Check if the flash interface was reconfigured
1631 if (val & 0x40000000) {
1632 /* Flash interface reconfigured by bootcode. */
1633 for (j = 0, flash = flash_table; j < entry_count;
1635 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1636 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1637 sc->bce_flash_info = flash;
1642 /* Flash interface not yet reconfigured. */
1645 if (val & (1 << 23))
1646 mask = FLASH_BACKUP_STRAP_MASK;
1648 mask = FLASH_STRAP_MASK;
1650 /* Look for the matching NVRAM device configuration data. */
1651 for (j = 0, flash = flash_table; j < entry_count;
1653 /* Check if the device matches any of the known devices. */
1654 if ((val & mask) == (flash->strapping & mask)) {
1655 /* Found a device match. */
1656 sc->bce_flash_info = flash;
1658 /* Request access to the flash interface. */
1659 rc = bce_acquire_nvram_lock(sc);
1663 /* Reconfigure the flash interface. */
1664 bce_enable_nvram_access(sc);
1665 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1666 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1667 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1668 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1669 bce_disable_nvram_access(sc);
1670 bce_release_nvram_lock(sc);
1676 /* Check if a matching device was found. */
1677 if (j == entry_count) {
1678 sc->bce_flash_info = NULL;
1679 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1683 bce_init_nvram_get_flash_size:
1684 /* Write the flash config data to the shared memory interface. */
1685 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1686 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1688 sc->bce_flash_size = val;
1690 sc->bce_flash_size = sc->bce_flash_info->total_size;
1695 /****************************************************************************/
1696 /* Read an arbitrary range of data from NVRAM. */
1698 /* Prepares the NVRAM interface for access and reads the requested data */
1699 /* into the supplied buffer. */
1702 /* 0 on success and the data read, positive value on failure. */
1703 /****************************************************************************/
1705 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1708 uint32_t cmd_flags, offset32, len32, extra;
1714 /* Request access to the flash interface. */
1715 rc = bce_acquire_nvram_lock(sc);
1719 /* Enable access to flash interface */
1720 bce_enable_nvram_access(sc);
1728 /* XXX should we release nvram lock if read_dword() fails? */
1734 pre_len = 4 - (offset & 3);
1736 if (pre_len >= len32) {
1738 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1740 cmd_flags = BCE_NVM_COMMAND_FIRST;
1743 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1747 memcpy(ret_buf, buf + (offset & 3), pre_len);
1755 extra = 4 - (len32 & 3);
1756 len32 = (len32 + 4) & ~3;
1763 cmd_flags = BCE_NVM_COMMAND_LAST;
1765 cmd_flags = BCE_NVM_COMMAND_FIRST |
1766 BCE_NVM_COMMAND_LAST;
1768 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1770 memcpy(ret_buf, buf, 4 - extra);
1771 } else if (len32 > 0) {
1774 /* Read the first word. */
1778 cmd_flags = BCE_NVM_COMMAND_FIRST;
1780 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1782 /* Advance to the next dword. */
1787 while (len32 > 4 && rc == 0) {
1788 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1790 /* Advance to the next dword. */
1797 goto bce_nvram_read_locked_exit;
1799 cmd_flags = BCE_NVM_COMMAND_LAST;
1800 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1802 memcpy(ret_buf, buf, 4 - extra);
1805 bce_nvram_read_locked_exit:
1806 /* Disable access to flash interface and release the lock. */
1807 bce_disable_nvram_access(sc);
1808 bce_release_nvram_lock(sc);
1813 /****************************************************************************/
1814 /* Verifies that NVRAM is accessible and contains valid data. */
1816 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1820 /* 0 on success, positive value on failure. */
1821 /****************************************************************************/
1823 bce_nvram_test(struct bce_softc *sc)
1825 uint32_t buf[BCE_NVRAM_SIZE / 4];
1826 uint32_t magic, csum;
1827 uint8_t *data = (uint8_t *)buf;
1831 * Check that the device NVRAM is valid by reading
1832 * the magic value at offset 0.
1834 rc = bce_nvram_read(sc, 0, data, 4);
1838 magic = be32toh(buf[0]);
1839 if (magic != BCE_NVRAM_MAGIC) {
1840 if_printf(&sc->arpcom.ac_if,
1841 "Invalid NVRAM magic value! Expected: 0x%08X, "
1842 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1847 * Verify that the device NVRAM includes valid
1848 * configuration data.
1850 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1854 csum = ether_crc32_le(data, 0x100);
1855 if (csum != BCE_CRC32_RESIDUAL) {
1856 if_printf(&sc->arpcom.ac_if,
1857 "Invalid Manufacturing Information NVRAM CRC! "
1858 "Expected: 0x%08X, Found: 0x%08X\n",
1859 BCE_CRC32_RESIDUAL, csum);
1863 csum = ether_crc32_le(data + 0x100, 0x100);
1864 if (csum != BCE_CRC32_RESIDUAL) {
1865 if_printf(&sc->arpcom.ac_if,
1866 "Invalid Feature Configuration Information "
1867 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1868 BCE_CRC32_RESIDUAL, csum);
1874 /****************************************************************************/
1875 /* Identifies the current media type of the controller and sets the PHY */
1880 /****************************************************************************/
1882 bce_get_media(struct bce_softc *sc)
1886 sc->bce_phy_addr = 1;
1888 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1889 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1890 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1891 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1895 * The BCM5709S is software configurable
1896 * for Copper or SerDes operation.
1898 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1900 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1901 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1905 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1906 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1909 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1912 if (pci_get_function(sc->bce_dev) == 0) {
1917 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1925 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1929 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1930 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1933 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1934 sc->bce_flags |= BCE_NO_WOL_FLAG;
1935 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1936 sc->bce_phy_addr = 2;
1937 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1938 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1939 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1941 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1942 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1943 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1948 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1952 /* Destroy the TX buffer descriptor DMA stuffs. */
1953 if (txr->tx_bd_chain_tag != NULL) {
1954 for (i = 0; i < txr->tx_pages; i++) {
1955 if (txr->tx_bd_chain[i] != NULL) {
1956 bus_dmamap_unload(txr->tx_bd_chain_tag,
1957 txr->tx_bd_chain_map[i]);
1958 bus_dmamem_free(txr->tx_bd_chain_tag,
1959 txr->tx_bd_chain[i],
1960 txr->tx_bd_chain_map[i]);
1963 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1966 /* Destroy the TX mbuf DMA stuffs. */
1967 if (txr->tx_mbuf_tag != NULL) {
1968 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1969 /* Must have been unloaded in bce_stop() */
1970 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1971 bus_dmamap_destroy(txr->tx_mbuf_tag,
1972 txr->tx_bufs[i].tx_mbuf_map);
1974 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1977 if (txr->tx_bd_chain_map != NULL)
1978 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1979 if (txr->tx_bd_chain != NULL)
1980 kfree(txr->tx_bd_chain, M_DEVBUF);
1981 if (txr->tx_bd_chain_paddr != NULL)
1982 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1984 if (txr->tx_bufs != NULL)
1985 kfree(txr->tx_bufs, M_DEVBUF);
1989 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1993 /* Destroy the RX buffer descriptor DMA stuffs. */
1994 if (rxr->rx_bd_chain_tag != NULL) {
1995 for (i = 0; i < rxr->rx_pages; i++) {
1996 if (rxr->rx_bd_chain[i] != NULL) {
1997 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1998 rxr->rx_bd_chain_map[i]);
1999 bus_dmamem_free(rxr->rx_bd_chain_tag,
2000 rxr->rx_bd_chain[i],
2001 rxr->rx_bd_chain_map[i]);
2004 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
2007 /* Destroy the RX mbuf DMA stuffs. */
2008 if (rxr->rx_mbuf_tag != NULL) {
2009 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2010 /* Must have been unloaded in bce_stop() */
2011 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
2012 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2013 rxr->rx_bufs[i].rx_mbuf_map);
2015 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
2016 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2019 if (rxr->rx_bd_chain_map != NULL)
2020 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2021 if (rxr->rx_bd_chain != NULL)
2022 kfree(rxr->rx_bd_chain, M_DEVBUF);
2023 if (rxr->rx_bd_chain_paddr != NULL)
2024 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2026 if (rxr->rx_bufs != NULL)
2027 kfree(rxr->rx_bufs, M_DEVBUF);
2030 /****************************************************************************/
2031 /* Free any DMA memory owned by the driver. */
2033 /* Scans through each data structre that requires DMA memory and frees */
2034 /* the memory if allocated. */
2038 /****************************************************************************/
2040 bce_dma_free(struct bce_softc *sc)
2044 /* Destroy the status block. */
2045 if (sc->status_tag != NULL) {
2046 if (sc->status_block != NULL) {
2047 bus_dmamap_unload(sc->status_tag, sc->status_map);
2048 bus_dmamem_free(sc->status_tag, sc->status_block,
2051 bus_dma_tag_destroy(sc->status_tag);
2054 /* Destroy the statistics block. */
2055 if (sc->stats_tag != NULL) {
2056 if (sc->stats_block != NULL) {
2057 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2058 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2061 bus_dma_tag_destroy(sc->stats_tag);
2064 /* Destroy the CTX DMA stuffs. */
2065 if (sc->ctx_tag != NULL) {
2066 for (i = 0; i < sc->ctx_pages; i++) {
2067 if (sc->ctx_block[i] != NULL) {
2068 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2069 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2073 bus_dma_tag_destroy(sc->ctx_tag);
2077 if (sc->tx_rings != NULL) {
2078 for (i = 0; i < sc->tx_ring_cnt; ++i)
2079 bce_destroy_tx_ring(&sc->tx_rings[i]);
2080 kfree(sc->tx_rings, M_DEVBUF);
2084 if (sc->rx_rings != NULL) {
2085 for (i = 0; i < sc->rx_ring_cnt; ++i)
2086 bce_destroy_rx_ring(&sc->rx_rings[i]);
2087 kfree(sc->rx_rings, M_DEVBUF);
2090 /* Destroy the parent tag */
2091 if (sc->parent_tag != NULL)
2092 bus_dma_tag_destroy(sc->parent_tag);
2095 /****************************************************************************/
2096 /* Get DMA memory from the OS. */
2098 /* Validates that the OS has provided DMA buffers in response to a */
2099 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2100 /* When the callback is used the OS will return 0 for the mapping function */
2101 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2102 /* failures back to the caller. */
2106 /****************************************************************************/
2108 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2110 bus_addr_t *busaddr = arg;
2112 /* Check for an error and signal the caller that an error occurred. */
2116 KASSERT(nseg == 1, ("only one segment is allowed"));
2117 *busaddr = segs->ds_addr;
2121 bce_create_tx_ring(struct bce_tx_ring *txr)
2125 lwkt_serialize_init(&txr->tx_serialize);
2126 txr->tx_wreg = bce_tx_wreg;
2128 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2129 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2130 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2131 pages = TX_PAGES_DEFAULT;
2133 txr->tx_pages = pages;
2135 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2136 M_DEVBUF, M_WAITOK | M_ZERO);
2137 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2138 M_DEVBUF, M_WAITOK | M_ZERO);
2139 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2140 M_DEVBUF, M_WAITOK | M_ZERO);
2142 txr->tx_bufs = kmalloc_cachealign(
2143 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2144 M_DEVBUF, M_WAITOK | M_ZERO);
2147 * Create a DMA tag for the TX buffer descriptor chain,
2148 * allocate and clear the memory, and fetch the
2149 * physical address of the block.
2151 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2152 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2153 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2154 0, &txr->tx_bd_chain_tag);
2156 device_printf(txr->sc->bce_dev, "Could not allocate "
2157 "TX descriptor chain DMA tag!\n");
2161 for (i = 0; i < txr->tx_pages; i++) {
2164 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2165 (void **)&txr->tx_bd_chain[i],
2166 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2167 &txr->tx_bd_chain_map[i]);
2169 device_printf(txr->sc->bce_dev,
2170 "Could not allocate %dth TX descriptor "
2171 "chain DMA memory!\n", i);
2175 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2176 txr->tx_bd_chain_map[i],
2177 txr->tx_bd_chain[i],
2178 BCE_TX_CHAIN_PAGE_SZ,
2179 bce_dma_map_addr, &busaddr,
2182 if (rc == EINPROGRESS) {
2183 panic("%s coherent memory loading "
2184 "is still in progress!",
2185 txr->sc->arpcom.ac_if.if_xname);
2187 device_printf(txr->sc->bce_dev, "Could not map %dth "
2188 "TX descriptor chain DMA memory!\n", i);
2189 bus_dmamem_free(txr->tx_bd_chain_tag,
2190 txr->tx_bd_chain[i],
2191 txr->tx_bd_chain_map[i]);
2192 txr->tx_bd_chain[i] = NULL;
2196 txr->tx_bd_chain_paddr[i] = busaddr;
2199 /* Create a DMA tag for TX mbufs. */
2200 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2201 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2202 IP_MAXPACKET + sizeof(struct ether_vlan_header),
2203 BCE_MAX_SEGMENTS, PAGE_SIZE,
2204 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2207 device_printf(txr->sc->bce_dev,
2208 "Could not allocate TX mbuf DMA tag!\n");
2212 /* Create DMA maps for the TX mbufs clusters. */
2213 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2214 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2215 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2216 &txr->tx_bufs[i].tx_mbuf_map);
2220 for (j = 0; j < i; ++j) {
2221 bus_dmamap_destroy(txr->tx_mbuf_tag,
2222 txr->tx_bufs[j].tx_mbuf_map);
2224 bus_dma_tag_destroy(txr->tx_mbuf_tag);
2225 txr->tx_mbuf_tag = NULL;
2227 device_printf(txr->sc->bce_dev, "Unable to create "
2228 "%dth TX mbuf DMA map!\n", i);
2236 bce_create_rx_ring(struct bce_rx_ring *rxr)
2240 lwkt_serialize_init(&rxr->rx_serialize);
2242 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2243 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2244 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2245 pages = RX_PAGES_DEFAULT;
2247 rxr->rx_pages = pages;
2249 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2250 M_DEVBUF, M_WAITOK | M_ZERO);
2251 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2252 M_DEVBUF, M_WAITOK | M_ZERO);
2253 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2254 M_DEVBUF, M_WAITOK | M_ZERO);
2256 rxr->rx_bufs = kmalloc_cachealign(
2257 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2258 M_DEVBUF, M_WAITOK | M_ZERO);
2261 * Create a DMA tag for the RX buffer descriptor chain,
2262 * allocate and clear the memory, and fetch the physical
2263 * address of the blocks.
2265 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2266 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2267 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2268 0, &rxr->rx_bd_chain_tag);
2270 device_printf(rxr->sc->bce_dev, "Could not allocate "
2271 "RX descriptor chain DMA tag!\n");
2275 for (i = 0; i < rxr->rx_pages; i++) {
2278 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2279 (void **)&rxr->rx_bd_chain[i],
2280 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2281 &rxr->rx_bd_chain_map[i]);
2283 device_printf(rxr->sc->bce_dev,
2284 "Could not allocate %dth RX descriptor "
2285 "chain DMA memory!\n", i);
2289 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2290 rxr->rx_bd_chain_map[i],
2291 rxr->rx_bd_chain[i],
2292 BCE_RX_CHAIN_PAGE_SZ,
2293 bce_dma_map_addr, &busaddr,
2296 if (rc == EINPROGRESS) {
2297 panic("%s coherent memory loading "
2298 "is still in progress!",
2299 rxr->sc->arpcom.ac_if.if_xname);
2301 device_printf(rxr->sc->bce_dev,
2302 "Could not map %dth RX descriptor "
2303 "chain DMA memory!\n", i);
2304 bus_dmamem_free(rxr->rx_bd_chain_tag,
2305 rxr->rx_bd_chain[i],
2306 rxr->rx_bd_chain_map[i]);
2307 rxr->rx_bd_chain[i] = NULL;
2311 rxr->rx_bd_chain_paddr[i] = busaddr;
2314 /* Create a DMA tag for RX mbufs. */
2315 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2316 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2317 MCLBYTES, 1, MCLBYTES,
2318 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2321 device_printf(rxr->sc->bce_dev,
2322 "Could not allocate RX mbuf DMA tag!\n");
2326 /* Create tmp DMA map for RX mbuf clusters. */
2327 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2328 &rxr->rx_mbuf_tmpmap);
2330 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2331 rxr->rx_mbuf_tag = NULL;
2333 device_printf(rxr->sc->bce_dev,
2334 "Could not create RX mbuf tmp DMA map!\n");
2338 /* Create DMA maps for the RX mbuf clusters. */
2339 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2340 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2341 &rxr->rx_bufs[i].rx_mbuf_map);
2345 for (j = 0; j < i; ++j) {
2346 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2347 rxr->rx_bufs[j].rx_mbuf_map);
2349 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2350 rxr->rx_mbuf_tag = NULL;
2352 device_printf(rxr->sc->bce_dev, "Unable to create "
2353 "%dth RX mbuf DMA map!\n", i);
2360 /****************************************************************************/
2361 /* Allocate any DMA memory needed by the driver. */
2363 /* Allocates DMA memory needed for the various global structures needed by */
2366 /* Memory alignment requirements: */
2367 /* -----------------+----------+----------+----------+----------+ */
2368 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */
2369 /* -----------------+----------+----------+----------+----------+ */
2370 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2371 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2372 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2373 /* PG Buffers | none | none | none | none | */
2374 /* TX Buffers | none | none | none | none | */
2375 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2376 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */
2377 /* -----------------+----------+----------+----------+----------+ */
2379 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2382 /* 0 for success, positive value for failure. */
2383 /****************************************************************************/
2385 bce_dma_alloc(struct bce_softc *sc)
2387 struct ifnet *ifp = &sc->arpcom.ac_if;
2389 bus_addr_t busaddr, max_busaddr;
2390 bus_size_t status_align, stats_align, status_size;
2393 * The embedded PCIe to PCI-X bridge (EPB)
2394 * in the 5708 cannot address memory above
2395 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2397 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2398 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2400 max_busaddr = BUS_SPACE_MAXADDR;
2403 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2405 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2406 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2407 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2408 if (sc->ctx_pages == 0)
2410 if (sc->ctx_pages > BCE_CTX_PAGES) {
2411 device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2423 * Each MSI-X vector needs a status block; each status block
2424 * consumes 128bytes and is 128bytes aligned.
2426 if (sc->rx_ring_cnt > 1) {
2427 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2428 status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2430 status_size = BCE_STATUS_BLK_SZ;
2434 * Allocate the parent bus DMA tag appropriate for PCI.
2436 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2437 max_busaddr, BUS_SPACE_MAXADDR,
2439 BUS_SPACE_MAXSIZE_32BIT, 0,
2440 BUS_SPACE_MAXSIZE_32BIT,
2441 0, &sc->parent_tag);
2443 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2448 * Allocate status block.
2450 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2451 status_align, status_size,
2452 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2453 &sc->status_tag, &sc->status_map,
2454 &sc->status_block_paddr);
2455 if (sc->status_block == NULL) {
2456 if_printf(ifp, "Could not allocate status block!\n");
2461 * Allocate statistics block.
2463 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2464 stats_align, BCE_STATS_BLK_SZ,
2465 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2466 &sc->stats_tag, &sc->stats_map,
2467 &sc->stats_block_paddr);
2468 if (sc->stats_block == NULL) {
2469 if_printf(ifp, "Could not allocate statistics block!\n");
2474 * Allocate context block, if needed
2476 if (sc->ctx_pages != 0) {
2477 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2478 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2480 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2483 if_printf(ifp, "Could not allocate "
2484 "context block DMA tag!\n");
2488 for (i = 0; i < sc->ctx_pages; i++) {
2489 rc = bus_dmamem_alloc(sc->ctx_tag,
2490 (void **)&sc->ctx_block[i],
2491 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2495 if_printf(ifp, "Could not allocate %dth context "
2496 "DMA memory!\n", i);
2500 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2501 sc->ctx_block[i], BCM_PAGE_SIZE,
2502 bce_dma_map_addr, &busaddr,
2505 if (rc == EINPROGRESS) {
2506 panic("%s coherent memory loading "
2507 "is still in progress!", ifp->if_xname);
2509 if_printf(ifp, "Could not map %dth context "
2510 "DMA memory!\n", i);
2511 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2513 sc->ctx_block[i] = NULL;
2516 sc->ctx_paddr[i] = busaddr;
2520 sc->tx_rings = kmalloc_cachealign(
2521 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2523 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2524 sc->tx_rings[i].sc = sc;
2526 sc->tx_rings[i].tx_cid = TX_CID;
2527 sc->tx_rings[i].tx_hw_cons =
2528 &sc->status_block->status_tx_quick_consumer_index0;
2530 struct status_block_msix *sblk =
2531 (struct status_block_msix *)
2532 (((uint8_t *)(sc->status_block)) +
2533 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2535 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2536 sc->tx_rings[i].tx_hw_cons =
2537 &sblk->status_tx_quick_consumer_index;
2540 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2542 device_printf(sc->bce_dev,
2543 "can't create %dth tx ring\n", i);
2548 sc->rx_rings = kmalloc_cachealign(
2549 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2551 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2552 sc->rx_rings[i].sc = sc;
2553 sc->rx_rings[i].idx = i;
2555 sc->rx_rings[i].rx_cid = RX_CID;
2556 sc->rx_rings[i].rx_hw_cons =
2557 &sc->status_block->status_rx_quick_consumer_index0;
2558 sc->rx_rings[i].hw_status_idx =
2559 &sc->status_block->status_idx;
2561 struct status_block_msix *sblk =
2562 (struct status_block_msix *)
2563 (((uint8_t *)(sc->status_block)) +
2564 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2566 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2567 sc->rx_rings[i].rx_hw_cons =
2568 &sblk->status_rx_quick_consumer_index;
2569 sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2572 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2574 device_printf(sc->bce_dev,
2575 "can't create %dth rx ring\n", i);
2583 /****************************************************************************/
2584 /* Firmware synchronization. */
2586 /* Before performing certain events such as a chip reset, synchronize with */
2587 /* the firmware first. */
2590 /* 0 for success, positive value for failure. */
2591 /****************************************************************************/
2593 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2598 /* Don't waste any time if we've timed out before. */
2599 if (sc->bce_fw_timed_out)
2602 /* Increment the message sequence number. */
2603 sc->bce_fw_wr_seq++;
2604 msg_data |= sc->bce_fw_wr_seq;
2606 /* Send the message to the bootcode driver mailbox. */
2607 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2609 /* Wait for the bootcode to acknowledge the message. */
2610 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2611 /* Check for a response in the bootcode firmware mailbox. */
2612 val = bce_shmem_rd(sc, BCE_FW_MB);
2613 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2618 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2619 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2620 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2621 if_printf(&sc->arpcom.ac_if,
2622 "Firmware synchronization timeout! "
2623 "msg_data = 0x%08X\n", msg_data);
2625 msg_data &= ~BCE_DRV_MSG_CODE;
2626 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2628 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2630 sc->bce_fw_timed_out = 1;
2636 /****************************************************************************/
2637 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2641 /****************************************************************************/
2643 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2644 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2649 for (i = 0; i < rv2p_code_len; i += 8) {
2650 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2652 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2655 if (rv2p_proc == RV2P_PROC1) {
2656 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2657 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2659 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2660 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2664 /* Reset the processor, un-stall is done later. */
2665 if (rv2p_proc == RV2P_PROC1)
2666 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2668 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2671 /****************************************************************************/
2672 /* Load RISC processor firmware. */
2674 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2675 /* associated with a particular processor. */
2679 /****************************************************************************/
2681 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2687 bce_halt_cpu(sc, cpu_reg);
2689 /* Load the Text area. */
2690 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2692 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2693 REG_WR_IND(sc, offset, fw->text[j]);
2696 /* Load the Data area. */
2697 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2699 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2700 REG_WR_IND(sc, offset, fw->data[j]);
2703 /* Load the SBSS area. */
2704 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2706 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2707 REG_WR_IND(sc, offset, fw->sbss[j]);
2710 /* Load the BSS area. */
2711 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2713 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2714 REG_WR_IND(sc, offset, fw->bss[j]);
2717 /* Load the Read-Only area. */
2718 offset = cpu_reg->spad_base +
2719 (fw->rodata_addr - cpu_reg->mips_view_base);
2721 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2722 REG_WR_IND(sc, offset, fw->rodata[j]);
2725 /* Clear the pre-fetch instruction and set the FW start address. */
2726 REG_WR_IND(sc, cpu_reg->inst, 0);
2727 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2730 /****************************************************************************/
2731 /* Starts the RISC processor. */
2733 /* Assumes the CPU starting address has already been set. */
2737 /****************************************************************************/
2739 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2743 /* Start the CPU. */
2744 val = REG_RD_IND(sc, cpu_reg->mode);
2745 val &= ~cpu_reg->mode_value_halt;
2746 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2747 REG_WR_IND(sc, cpu_reg->mode, val);
2750 /****************************************************************************/
2751 /* Halts the RISC processor. */
2755 /****************************************************************************/
2757 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2762 val = REG_RD_IND(sc, cpu_reg->mode);
2763 val |= cpu_reg->mode_value_halt;
2764 REG_WR_IND(sc, cpu_reg->mode, val);
2765 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2768 /****************************************************************************/
2769 /* Start the RX CPU. */
2773 /****************************************************************************/
2775 bce_start_rxp_cpu(struct bce_softc *sc)
2777 struct cpu_reg cpu_reg;
2779 cpu_reg.mode = BCE_RXP_CPU_MODE;
2780 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2781 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2782 cpu_reg.state = BCE_RXP_CPU_STATE;
2783 cpu_reg.state_value_clear = 0xffffff;
2784 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2785 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2786 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2787 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2788 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2789 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2790 cpu_reg.mips_view_base = 0x8000000;
2792 bce_start_cpu(sc, &cpu_reg);
2795 /****************************************************************************/
2796 /* Initialize the RX CPU. */
2800 /****************************************************************************/
2802 bce_init_rxp_cpu(struct bce_softc *sc)
2804 struct cpu_reg cpu_reg;
2807 cpu_reg.mode = BCE_RXP_CPU_MODE;
2808 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2809 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2810 cpu_reg.state = BCE_RXP_CPU_STATE;
2811 cpu_reg.state_value_clear = 0xffffff;
2812 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2813 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2814 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2815 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2816 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2817 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2818 cpu_reg.mips_view_base = 0x8000000;
2820 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2821 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2822 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2823 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2824 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2825 fw.start_addr = bce_RXP_b09FwStartAddr;
2827 fw.text_addr = bce_RXP_b09FwTextAddr;
2828 fw.text_len = bce_RXP_b09FwTextLen;
2830 fw.text = bce_RXP_b09FwText;
2832 fw.data_addr = bce_RXP_b09FwDataAddr;
2833 fw.data_len = bce_RXP_b09FwDataLen;
2835 fw.data = bce_RXP_b09FwData;
2837 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2838 fw.sbss_len = bce_RXP_b09FwSbssLen;
2840 fw.sbss = bce_RXP_b09FwSbss;
2842 fw.bss_addr = bce_RXP_b09FwBssAddr;
2843 fw.bss_len = bce_RXP_b09FwBssLen;
2845 fw.bss = bce_RXP_b09FwBss;
2847 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2848 fw.rodata_len = bce_RXP_b09FwRodataLen;
2849 fw.rodata_index = 0;
2850 fw.rodata = bce_RXP_b09FwRodata;
2852 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2853 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2854 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2855 fw.start_addr = bce_RXP_b06FwStartAddr;
2857 fw.text_addr = bce_RXP_b06FwTextAddr;
2858 fw.text_len = bce_RXP_b06FwTextLen;
2860 fw.text = bce_RXP_b06FwText;
2862 fw.data_addr = bce_RXP_b06FwDataAddr;
2863 fw.data_len = bce_RXP_b06FwDataLen;
2865 fw.data = bce_RXP_b06FwData;
2867 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2868 fw.sbss_len = bce_RXP_b06FwSbssLen;
2870 fw.sbss = bce_RXP_b06FwSbss;
2872 fw.bss_addr = bce_RXP_b06FwBssAddr;
2873 fw.bss_len = bce_RXP_b06FwBssLen;
2875 fw.bss = bce_RXP_b06FwBss;
2877 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2878 fw.rodata_len = bce_RXP_b06FwRodataLen;
2879 fw.rodata_index = 0;
2880 fw.rodata = bce_RXP_b06FwRodata;
2883 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2884 /* Delay RXP start until initialization is complete. */
2887 /****************************************************************************/
2888 /* Initialize the TX CPU. */
2892 /****************************************************************************/
2894 bce_init_txp_cpu(struct bce_softc *sc)
2896 struct cpu_reg cpu_reg;
2899 cpu_reg.mode = BCE_TXP_CPU_MODE;
2900 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2901 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2902 cpu_reg.state = BCE_TXP_CPU_STATE;
2903 cpu_reg.state_value_clear = 0xffffff;
2904 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2905 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2906 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2907 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2908 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2909 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2910 cpu_reg.mips_view_base = 0x8000000;
2912 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2913 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2914 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2915 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2916 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2917 fw.start_addr = bce_TXP_b09FwStartAddr;
2919 fw.text_addr = bce_TXP_b09FwTextAddr;
2920 fw.text_len = bce_TXP_b09FwTextLen;
2922 fw.text = bce_TXP_b09FwText;
2924 fw.data_addr = bce_TXP_b09FwDataAddr;
2925 fw.data_len = bce_TXP_b09FwDataLen;
2927 fw.data = bce_TXP_b09FwData;
2929 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2930 fw.sbss_len = bce_TXP_b09FwSbssLen;
2932 fw.sbss = bce_TXP_b09FwSbss;
2934 fw.bss_addr = bce_TXP_b09FwBssAddr;
2935 fw.bss_len = bce_TXP_b09FwBssLen;
2937 fw.bss = bce_TXP_b09FwBss;
2939 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2940 fw.rodata_len = bce_TXP_b09FwRodataLen;
2941 fw.rodata_index = 0;
2942 fw.rodata = bce_TXP_b09FwRodata;
2944 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2945 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2946 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2947 fw.start_addr = bce_TXP_b06FwStartAddr;
2949 fw.text_addr = bce_TXP_b06FwTextAddr;
2950 fw.text_len = bce_TXP_b06FwTextLen;
2952 fw.text = bce_TXP_b06FwText;
2954 fw.data_addr = bce_TXP_b06FwDataAddr;
2955 fw.data_len = bce_TXP_b06FwDataLen;
2957 fw.data = bce_TXP_b06FwData;
2959 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2960 fw.sbss_len = bce_TXP_b06FwSbssLen;
2962 fw.sbss = bce_TXP_b06FwSbss;
2964 fw.bss_addr = bce_TXP_b06FwBssAddr;
2965 fw.bss_len = bce_TXP_b06FwBssLen;
2967 fw.bss = bce_TXP_b06FwBss;
2969 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2970 fw.rodata_len = bce_TXP_b06FwRodataLen;
2971 fw.rodata_index = 0;
2972 fw.rodata = bce_TXP_b06FwRodata;
2975 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2976 bce_start_cpu(sc, &cpu_reg);
2979 /****************************************************************************/
2980 /* Initialize the TPAT CPU. */
2984 /****************************************************************************/
2986 bce_init_tpat_cpu(struct bce_softc *sc)
2988 struct cpu_reg cpu_reg;
2991 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2992 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2993 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2994 cpu_reg.state = BCE_TPAT_CPU_STATE;
2995 cpu_reg.state_value_clear = 0xffffff;
2996 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2997 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2998 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2999 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3000 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3001 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3002 cpu_reg.mips_view_base = 0x8000000;
3004 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3005 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3006 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3007 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3008 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3009 fw.start_addr = bce_TPAT_b09FwStartAddr;
3011 fw.text_addr = bce_TPAT_b09FwTextAddr;
3012 fw.text_len = bce_TPAT_b09FwTextLen;
3014 fw.text = bce_TPAT_b09FwText;
3016 fw.data_addr = bce_TPAT_b09FwDataAddr;
3017 fw.data_len = bce_TPAT_b09FwDataLen;
3019 fw.data = bce_TPAT_b09FwData;
3021 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3022 fw.sbss_len = bce_TPAT_b09FwSbssLen;
3024 fw.sbss = bce_TPAT_b09FwSbss;
3026 fw.bss_addr = bce_TPAT_b09FwBssAddr;
3027 fw.bss_len = bce_TPAT_b09FwBssLen;
3029 fw.bss = bce_TPAT_b09FwBss;
3031 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3032 fw.rodata_len = bce_TPAT_b09FwRodataLen;
3033 fw.rodata_index = 0;
3034 fw.rodata = bce_TPAT_b09FwRodata;
3036 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3037 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3038 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3039 fw.start_addr = bce_TPAT_b06FwStartAddr;
3041 fw.text_addr = bce_TPAT_b06FwTextAddr;
3042 fw.text_len = bce_TPAT_b06FwTextLen;
3044 fw.text = bce_TPAT_b06FwText;
3046 fw.data_addr = bce_TPAT_b06FwDataAddr;
3047 fw.data_len = bce_TPAT_b06FwDataLen;
3049 fw.data = bce_TPAT_b06FwData;
3051 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3052 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3054 fw.sbss = bce_TPAT_b06FwSbss;
3056 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3057 fw.bss_len = bce_TPAT_b06FwBssLen;
3059 fw.bss = bce_TPAT_b06FwBss;
3061 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3062 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3063 fw.rodata_index = 0;
3064 fw.rodata = bce_TPAT_b06FwRodata;
3067 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3068 bce_start_cpu(sc, &cpu_reg);
3071 /****************************************************************************/
3072 /* Initialize the CP CPU. */
3076 /****************************************************************************/
3078 bce_init_cp_cpu(struct bce_softc *sc)
3080 struct cpu_reg cpu_reg;
3083 cpu_reg.mode = BCE_CP_CPU_MODE;
3084 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3085 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3086 cpu_reg.state = BCE_CP_CPU_STATE;
3087 cpu_reg.state_value_clear = 0xffffff;
3088 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3089 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3090 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3091 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3092 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3093 cpu_reg.spad_base = BCE_CP_SCRATCH;
3094 cpu_reg.mips_view_base = 0x8000000;
3096 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3097 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3098 fw.ver_major = bce_CP_b09FwReleaseMajor;
3099 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3100 fw.ver_fix = bce_CP_b09FwReleaseFix;
3101 fw.start_addr = bce_CP_b09FwStartAddr;
3103 fw.text_addr = bce_CP_b09FwTextAddr;
3104 fw.text_len = bce_CP_b09FwTextLen;
3106 fw.text = bce_CP_b09FwText;
3108 fw.data_addr = bce_CP_b09FwDataAddr;
3109 fw.data_len = bce_CP_b09FwDataLen;
3111 fw.data = bce_CP_b09FwData;
3113 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3114 fw.sbss_len = bce_CP_b09FwSbssLen;
3116 fw.sbss = bce_CP_b09FwSbss;
3118 fw.bss_addr = bce_CP_b09FwBssAddr;
3119 fw.bss_len = bce_CP_b09FwBssLen;
3121 fw.bss = bce_CP_b09FwBss;
3123 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3124 fw.rodata_len = bce_CP_b09FwRodataLen;
3125 fw.rodata_index = 0;
3126 fw.rodata = bce_CP_b09FwRodata;
3128 fw.ver_major = bce_CP_b06FwReleaseMajor;
3129 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3130 fw.ver_fix = bce_CP_b06FwReleaseFix;
3131 fw.start_addr = bce_CP_b06FwStartAddr;
3133 fw.text_addr = bce_CP_b06FwTextAddr;
3134 fw.text_len = bce_CP_b06FwTextLen;
3136 fw.text = bce_CP_b06FwText;
3138 fw.data_addr = bce_CP_b06FwDataAddr;
3139 fw.data_len = bce_CP_b06FwDataLen;
3141 fw.data = bce_CP_b06FwData;
3143 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3144 fw.sbss_len = bce_CP_b06FwSbssLen;
3146 fw.sbss = bce_CP_b06FwSbss;
3148 fw.bss_addr = bce_CP_b06FwBssAddr;
3149 fw.bss_len = bce_CP_b06FwBssLen;
3151 fw.bss = bce_CP_b06FwBss;
3153 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3154 fw.rodata_len = bce_CP_b06FwRodataLen;
3155 fw.rodata_index = 0;
3156 fw.rodata = bce_CP_b06FwRodata;
3159 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3160 bce_start_cpu(sc, &cpu_reg);
3163 /****************************************************************************/
3164 /* Initialize the COM CPU. */
3168 /****************************************************************************/
3170 bce_init_com_cpu(struct bce_softc *sc)
3172 struct cpu_reg cpu_reg;
3175 cpu_reg.mode = BCE_COM_CPU_MODE;
3176 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3177 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3178 cpu_reg.state = BCE_COM_CPU_STATE;
3179 cpu_reg.state_value_clear = 0xffffff;
3180 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3181 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3182 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3183 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3184 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3185 cpu_reg.spad_base = BCE_COM_SCRATCH;
3186 cpu_reg.mips_view_base = 0x8000000;
3188 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3189 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3190 fw.ver_major = bce_COM_b09FwReleaseMajor;
3191 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3192 fw.ver_fix = bce_COM_b09FwReleaseFix;
3193 fw.start_addr = bce_COM_b09FwStartAddr;
3195 fw.text_addr = bce_COM_b09FwTextAddr;
3196 fw.text_len = bce_COM_b09FwTextLen;
3198 fw.text = bce_COM_b09FwText;
3200 fw.data_addr = bce_COM_b09FwDataAddr;
3201 fw.data_len = bce_COM_b09FwDataLen;
3203 fw.data = bce_COM_b09FwData;
3205 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3206 fw.sbss_len = bce_COM_b09FwSbssLen;
3208 fw.sbss = bce_COM_b09FwSbss;
3210 fw.bss_addr = bce_COM_b09FwBssAddr;
3211 fw.bss_len = bce_COM_b09FwBssLen;
3213 fw.bss = bce_COM_b09FwBss;
3215 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3216 fw.rodata_len = bce_COM_b09FwRodataLen;
3217 fw.rodata_index = 0;
3218 fw.rodata = bce_COM_b09FwRodata;
3220 fw.ver_major = bce_COM_b06FwReleaseMajor;
3221 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3222 fw.ver_fix = bce_COM_b06FwReleaseFix;
3223 fw.start_addr = bce_COM_b06FwStartAddr;
3225 fw.text_addr = bce_COM_b06FwTextAddr;
3226 fw.text_len = bce_COM_b06FwTextLen;
3228 fw.text = bce_COM_b06FwText;
3230 fw.data_addr = bce_COM_b06FwDataAddr;
3231 fw.data_len = bce_COM_b06FwDataLen;
3233 fw.data = bce_COM_b06FwData;
3235 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3236 fw.sbss_len = bce_COM_b06FwSbssLen;
3238 fw.sbss = bce_COM_b06FwSbss;
3240 fw.bss_addr = bce_COM_b06FwBssAddr;
3241 fw.bss_len = bce_COM_b06FwBssLen;
3243 fw.bss = bce_COM_b06FwBss;
3245 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3246 fw.rodata_len = bce_COM_b06FwRodataLen;
3247 fw.rodata_index = 0;
3248 fw.rodata = bce_COM_b06FwRodata;
3251 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3252 bce_start_cpu(sc, &cpu_reg);
3255 /****************************************************************************/
3256 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
3258 /* Loads the firmware for each CPU and starts the CPU. */
3262 /****************************************************************************/
3264 bce_init_cpus(struct bce_softc *sc)
3266 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3267 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3268 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3269 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3270 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3271 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3272 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3274 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3275 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3276 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3277 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3280 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3281 sizeof(bce_rv2p_proc1), RV2P_PROC1);
3282 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3283 sizeof(bce_rv2p_proc2), RV2P_PROC2);
3286 bce_init_rxp_cpu(sc);
3287 bce_init_txp_cpu(sc);
3288 bce_init_tpat_cpu(sc);
3289 bce_init_com_cpu(sc);
3290 bce_init_cp_cpu(sc);
3293 /****************************************************************************/
3294 /* Initialize context memory. */
3296 /* Clears the memory associated with each Context ID (CID). */
3300 /****************************************************************************/
3302 bce_init_ctx(struct bce_softc *sc)
3304 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3305 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3306 /* DRC: Replace this constant value with a #define. */
3307 int i, retry_cnt = 10;
3311 * BCM5709 context memory may be cached
3312 * in host memory so prepare the host memory
3315 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3317 val |= (BCM_PAGE_BITS - 8) << 16;
3318 REG_WR(sc, BCE_CTX_COMMAND, val);
3320 /* Wait for mem init command to complete. */
3321 for (i = 0; i < retry_cnt; i++) {
3322 val = REG_RD(sc, BCE_CTX_COMMAND);
3323 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3327 if (i == retry_cnt) {
3328 device_printf(sc->bce_dev,
3329 "Context memory initialization failed!\n");
3333 for (i = 0; i < sc->ctx_pages; i++) {
3337 * Set the physical address of the context
3340 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3341 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3342 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3343 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3344 BCE_ADDR_HI(sc->ctx_paddr[i]));
3345 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3346 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3349 * Verify that the context memory write was successful.
3351 for (j = 0; j < retry_cnt; j++) {
3352 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3354 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3358 if (j == retry_cnt) {
3359 device_printf(sc->bce_dev,
3360 "Failed to initialize context page!\n");
3365 uint32_t vcid_addr, offset;
3368 * For the 5706/5708, context memory is local to
3369 * the controller, so initialize the controller
3373 vcid_addr = GET_CID_ADDR(96);
3375 vcid_addr -= PHY_CTX_SIZE;
3377 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3378 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3380 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3381 CTX_WR(sc, 0x00, offset, 0);
3383 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3384 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3390 /****************************************************************************/
3391 /* Fetch the permanent MAC address of the controller. */
3395 /****************************************************************************/
3397 bce_get_mac_addr(struct bce_softc *sc)
3399 uint32_t mac_lo = 0, mac_hi = 0;
3402 * The NetXtreme II bootcode populates various NIC
3403 * power-on and runtime configuration items in a
3404 * shared memory area. The factory configured MAC
3405 * address is available from both NVRAM and the
3406 * shared memory area so we'll read the value from
3407 * shared memory for speed.
3410 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
3411 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3413 if (mac_lo == 0 && mac_hi == 0) {
3414 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3416 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3417 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3418 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3419 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3420 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3421 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3425 /****************************************************************************/
3426 /* Program the MAC address. */
3430 /****************************************************************************/
3432 bce_set_mac_addr(struct bce_softc *sc)
3434 const uint8_t *mac_addr = sc->eaddr;
3437 val = (mac_addr[0] << 8) | mac_addr[1];
3438 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3440 val = (mac_addr[2] << 24) |
3441 (mac_addr[3] << 16) |
3442 (mac_addr[4] << 8) |
3444 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3447 /****************************************************************************/
3448 /* Stop the controller. */
3452 /****************************************************************************/
3454 bce_stop(struct bce_softc *sc)
3456 struct ifnet *ifp = &sc->arpcom.ac_if;
3459 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3461 callout_stop(&sc->bce_tick_callout);
3463 /* Disable the transmit/receive blocks. */
3464 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3465 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3468 bce_disable_intr(sc);
3470 ifp->if_flags &= ~IFF_RUNNING;
3471 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3472 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3473 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3476 /* Free the RX lists. */
3477 for (i = 0; i < sc->rx_ring_cnt; ++i)
3478 bce_free_rx_chain(&sc->rx_rings[i]);
3480 /* Free TX buffers. */
3481 for (i = 0; i < sc->tx_ring_cnt; ++i)
3482 bce_free_tx_chain(&sc->tx_rings[i]);
3485 sc->bce_coalchg_mask = 0;
3489 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3494 /* Wait for pending PCI transactions to complete. */
3495 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3496 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3497 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3498 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3499 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3500 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3504 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3505 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3506 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3507 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3508 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3511 /* Assume bootcode is running. */
3512 sc->bce_fw_timed_out = 0;
3513 sc->bce_drv_cardiac_arrest = 0;
3515 /* Give the firmware a chance to prepare for the reset. */
3516 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3518 if_printf(&sc->arpcom.ac_if,
3519 "Firmware is not ready for reset\n");
3523 /* Set a firmware reminder that this is a soft reset. */
3524 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3525 BCE_DRV_RESET_SIGNATURE_MAGIC);
3527 /* Dummy read to force the chip to complete all current transactions. */
3528 val = REG_RD(sc, BCE_MISC_ID);
3531 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3532 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3533 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3534 REG_RD(sc, BCE_MISC_COMMAND);
3537 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3538 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3540 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3542 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3543 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3544 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3545 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3547 /* Allow up to 30us for reset to complete. */
3548 for (i = 0; i < 10; i++) {
3549 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3550 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3551 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3556 /* Check that reset completed successfully. */
3557 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3558 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3559 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3564 /* Make sure byte swapping is properly configured. */
3565 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3566 if (val != 0x01020304) {
3567 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3571 /* Just completed a reset, assume that firmware is running again. */
3572 sc->bce_fw_timed_out = 0;
3573 sc->bce_drv_cardiac_arrest = 0;
3575 /* Wait for the firmware to finish its initialization. */
3576 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3578 if_printf(&sc->arpcom.ac_if,
3579 "Firmware did not complete initialization!\n");
3582 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3583 bce_setup_msix_table(sc);
3584 /* Prevent MSIX table reads and write from timing out */
3585 REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3586 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3593 bce_chipinit(struct bce_softc *sc)
3598 /* Make sure the interrupt is not active. */
3599 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3600 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3603 * Initialize DMA byte/word swapping, configure the number of DMA
3604 * channels and PCI clock compensation delay.
3606 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3607 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3608 #if BYTE_ORDER == BIG_ENDIAN
3609 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3611 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3612 DMA_READ_CHANS << 12 |
3613 DMA_WRITE_CHANS << 16;
3615 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3617 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3618 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3621 * This setting resolves a problem observed on certain Intel PCI
3622 * chipsets that cannot handle multiple outstanding DMA operations.
3623 * See errata E9_5706A1_65.
3625 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3626 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3627 !(sc->bce_flags & BCE_PCIX_FLAG))
3628 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3630 REG_WR(sc, BCE_DMA_CONFIG, val);
3632 /* Enable the RX_V2P and Context state machines before access. */
3633 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3634 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3635 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3636 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3638 /* Initialize context mapping and zero out the quick contexts. */
3639 rc = bce_init_ctx(sc);
3643 /* Initialize the on-boards CPUs */
3646 /* Enable management frames (NC-SI) to flow to the MCP. */
3647 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3648 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3649 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3650 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3653 /* Prepare NVRAM for access. */
3654 rc = bce_init_nvram(sc);
3658 /* Set the kernel bypass block size */
3659 val = REG_RD(sc, BCE_MQ_CONFIG);
3660 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3661 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3663 /* Enable bins used on the 5709/5716. */
3664 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3665 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3666 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3667 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3668 val |= BCE_MQ_CONFIG_HALT_DIS;
3671 REG_WR(sc, BCE_MQ_CONFIG, val);
3673 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3674 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3675 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3677 /* Set the page size and clear the RV2P processor stall bits. */
3678 val = (BCM_PAGE_BITS - 8) << 24;
3679 REG_WR(sc, BCE_RV2P_CONFIG, val);
3681 /* Configure page size. */
3682 val = REG_RD(sc, BCE_TBDR_CONFIG);
3683 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3684 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3685 REG_WR(sc, BCE_TBDR_CONFIG, val);
3687 /* Set the perfect match control register to default. */
3688 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3693 /****************************************************************************/
3694 /* Initialize the controller in preparation to send/receive traffic. */
3697 /* 0 for success, positive value for failure. */
3698 /****************************************************************************/
3700 bce_blockinit(struct bce_softc *sc)
3705 /* Load the hardware default MAC address. */
3706 bce_set_mac_addr(sc);
3708 /* Set the Ethernet backoff seed value */
3709 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3710 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3711 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3713 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3715 /* Set up link change interrupt generation. */
3716 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3718 /* Program the physical address of the status block. */
3719 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3720 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3722 /* Program the physical address of the statistics block. */
3723 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3724 BCE_ADDR_LO(sc->stats_block_paddr));
3725 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3726 BCE_ADDR_HI(sc->stats_block_paddr));
3728 /* Program various host coalescing parameters. */
3729 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3730 (sc->bce_tx_quick_cons_trip_int << 16) |
3731 sc->bce_tx_quick_cons_trip);
3732 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3733 (sc->bce_rx_quick_cons_trip_int << 16) |
3734 sc->bce_rx_quick_cons_trip);
3735 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3736 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3737 REG_WR(sc, BCE_HC_TX_TICKS,
3738 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3739 REG_WR(sc, BCE_HC_RX_TICKS,
3740 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3741 REG_WR(sc, BCE_HC_COM_TICKS,
3742 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3743 REG_WR(sc, BCE_HC_CMD_TICKS,
3744 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3745 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3746 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3748 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3749 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3751 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3752 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3753 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3755 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3756 if_printf(&sc->arpcom.ac_if,
3759 if_printf(&sc->arpcom.ac_if,
3760 "using oneshot MSI\n");
3763 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3764 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3765 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3767 REG_WR(sc, BCE_HC_CONFIG, val);
3769 for (i = 1; i < sc->rx_ring_cnt; ++i) {
3772 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3773 KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3776 BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3777 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3778 BCE_HC_SB_CONFIG_1_ONE_SHOT);
3780 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3781 (sc->bce_tx_quick_cons_trip_int << 16) |
3782 sc->bce_tx_quick_cons_trip);
3783 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3784 (sc->bce_rx_quick_cons_trip_int << 16) |
3785 sc->bce_rx_quick_cons_trip);
3786 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3787 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3788 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3789 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3792 /* Clear the internal statistics counters. */
3793 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3795 /* Verify that bootcode is running. */
3796 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3798 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3799 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3800 if_printf(&sc->arpcom.ac_if,
3801 "Bootcode not running! Found: 0x%08X, "
3802 "Expected: 08%08X\n",
3803 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3804 BCE_DEV_INFO_SIGNATURE_MAGIC);
3809 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3810 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3811 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3812 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3813 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3816 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3817 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3819 /* Enable link state change interrupt generation. */
3820 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3822 /* Enable the RXP. */
3823 bce_start_rxp_cpu(sc);
3825 /* Disable management frames (NC-SI) from flowing to the MCP. */
3826 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3827 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3828 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3829 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3832 /* Enable all remaining blocks in the MAC. */
3833 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3834 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3835 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3836 BCE_MISC_ENABLE_DEFAULT_XI);
3838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3840 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3843 /* Save the current host coalescing block settings. */
3844 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3849 /****************************************************************************/
3850 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3852 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3853 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3857 /* 0 for success, positive value for failure. */
3858 /****************************************************************************/
3860 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3861 uint32_t *prod_bseq, int init)
3863 struct bce_rx_buf *rx_buf;
3865 bus_dma_segment_t seg;
3869 /* This is a new mbuf allocation. */
3870 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3874 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3876 /* Map the mbuf cluster into device memory. */
3877 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3878 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3882 if_printf(&rxr->sc->arpcom.ac_if,
3883 "Error mapping mbuf into RX chain!\n");
3888 rx_buf = &rxr->rx_bufs[chain_prod];
3889 if (rx_buf->rx_mbuf_ptr != NULL)
3890 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3892 map = rx_buf->rx_mbuf_map;
3893 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3894 rxr->rx_mbuf_tmpmap = map;
3896 /* Save the mbuf and update our counter. */
3897 rx_buf->rx_mbuf_ptr = m_new;
3898 rx_buf->rx_mbuf_paddr = seg.ds_addr;
3901 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3907 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3908 uint32_t *prod_bseq)
3910 const struct bce_rx_buf *rx_buf;
3915 rx_buf = &rxr->rx_bufs[chain_prod];
3916 paddr = rx_buf->rx_mbuf_paddr;
3917 len = rx_buf->rx_mbuf_ptr->m_len;
3919 /* Setup the rx_bd for the first segment. */
3920 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3922 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3923 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3924 rxbd->rx_bd_len = htole32(len);
3925 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3928 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3931 /****************************************************************************/
3932 /* Initialize the TX context memory. */
3936 /****************************************************************************/
3938 bce_init_tx_context(struct bce_tx_ring *txr)
3942 /* Initialize the context ID for an L2 TX chain. */
3943 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3944 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3945 /* Set the CID type to support an L2 connection. */
3946 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3947 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3948 BCE_L2CTX_TX_TYPE_XI, val);
3949 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3950 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3951 BCE_L2CTX_TX_CMD_TYPE_XI, val);
3953 /* Point the hardware to the first page in the chain. */
3954 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3955 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3956 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3957 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3958 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3959 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3961 /* Set the CID type to support an L2 connection. */
3962 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3963 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3964 BCE_L2CTX_TX_TYPE, val);
3965 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3966 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3967 BCE_L2CTX_TX_CMD_TYPE, val);
3969 /* Point the hardware to the first page in the chain. */
3970 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3971 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3972 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3973 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3974 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3975 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3979 /****************************************************************************/
3980 /* Allocate memory and initialize the TX data structures. */
3983 /* 0 for success, positive value for failure. */
3984 /****************************************************************************/
3986 bce_init_tx_chain(struct bce_tx_ring *txr)
3991 /* Set the initial TX producer/consumer indices. */
3994 txr->tx_prod_bseq = 0;
3995 txr->used_tx_bd = 0;
3996 txr->max_tx_bd = USABLE_TX_BD(txr);
3999 * The NetXtreme II supports a linked-list structre called
4000 * a Buffer Descriptor Chain (or BD chain). A BD chain
4001 * consists of a series of 1 or more chain pages, each of which
4002 * consists of a fixed number of BD entries.
4003 * The last BD entry on each page is a pointer to the next page
4004 * in the chain, and the last pointer in the BD chain
4005 * points back to the beginning of the chain.
4008 /* Set the TX next pointer chain entries. */
4009 for (i = 0; i < txr->tx_pages; i++) {
4012 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4014 /* Check if we've reached the last page. */
4015 if (i == (txr->tx_pages - 1))
4020 txbd->tx_bd_haddr_hi =
4021 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4022 txbd->tx_bd_haddr_lo =
4023 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4025 bce_init_tx_context(txr);
4030 /****************************************************************************/
4031 /* Free memory and clear the TX data structures. */
4035 /****************************************************************************/
4037 bce_free_tx_chain(struct bce_tx_ring *txr)
4041 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4042 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4043 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4045 if (tx_buf->tx_mbuf_ptr != NULL) {
4046 bus_dmamap_unload(txr->tx_mbuf_tag,
4047 tx_buf->tx_mbuf_map);
4048 m_freem(tx_buf->tx_mbuf_ptr);
4049 tx_buf->tx_mbuf_ptr = NULL;
4053 /* Clear each TX chain page. */
4054 for (i = 0; i < txr->tx_pages; i++)
4055 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4056 txr->used_tx_bd = 0;
4059 /****************************************************************************/
4060 /* Initialize the RX context memory. */
4064 /****************************************************************************/
4066 bce_init_rx_context(struct bce_rx_ring *rxr)
4070 /* Initialize the context ID for an L2 RX chain. */
4071 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4072 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4075 * Set the level for generating pause frames
4076 * when the number of available rx_bd's gets
4077 * too low (the low watermark) and the level
4078 * when pause frames can be stopped (the high
4081 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4082 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4083 uint32_t lo_water, hi_water;
4085 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4086 hi_water = USABLE_RX_BD(rxr) / 4;
4088 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4089 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4093 else if (hi_water == 0)
4096 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4099 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4100 BCE_L2CTX_RX_CTX_TYPE, val);
4102 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4103 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4104 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4105 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4106 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4109 /* Point the hardware to the first page in the chain. */
4110 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4111 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4112 BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4113 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4114 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4115 BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4118 /****************************************************************************/
4119 /* Allocate memory and initialize the RX data structures. */
4122 /* 0 for success, positive value for failure. */
4123 /****************************************************************************/
4125 bce_init_rx_chain(struct bce_rx_ring *rxr)
4129 uint16_t prod, chain_prod;
4132 /* Initialize the RX producer and consumer indices. */
4135 rxr->rx_prod_bseq = 0;
4136 rxr->free_rx_bd = USABLE_RX_BD(rxr);
4137 rxr->max_rx_bd = USABLE_RX_BD(rxr);
4139 /* Clear cache status index */
4140 rxr->last_status_idx = 0;
4142 /* Initialize the RX next pointer chain entries. */
4143 for (i = 0; i < rxr->rx_pages; i++) {
4146 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4148 /* Check if we've reached the last page. */
4149 if (i == (rxr->rx_pages - 1))
4154 /* Setup the chain page pointers. */
4155 rxbd->rx_bd_haddr_hi =
4156 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4157 rxbd->rx_bd_haddr_lo =
4158 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4161 /* Allocate mbuf clusters for the rx_bd chain. */
4162 prod = prod_bseq = 0;
4163 while (prod < TOTAL_RX_BD(rxr)) {
4164 chain_prod = RX_CHAIN_IDX(rxr, prod);
4165 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4166 if_printf(&rxr->sc->arpcom.ac_if,
4167 "Error filling RX chain: rx_bd[0x%04X]!\n",
4172 prod = NEXT_RX_BD(prod);
4175 /* Save the RX chain producer index. */
4176 rxr->rx_prod = prod;
4177 rxr->rx_prod_bseq = prod_bseq;
4179 /* Tell the chip about the waiting rx_bd's. */
4180 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4182 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4185 bce_init_rx_context(rxr);
4190 /****************************************************************************/
4191 /* Free memory and clear the RX data structures. */
4195 /****************************************************************************/
4197 bce_free_rx_chain(struct bce_rx_ring *rxr)
4201 /* Free any mbufs still in the RX mbuf chain. */
4202 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4203 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4205 if (rx_buf->rx_mbuf_ptr != NULL) {
4206 bus_dmamap_unload(rxr->rx_mbuf_tag,
4207 rx_buf->rx_mbuf_map);
4208 m_freem(rx_buf->rx_mbuf_ptr);
4209 rx_buf->rx_mbuf_ptr = NULL;
4213 /* Clear each RX chain page. */
4214 for (i = 0; i < rxr->rx_pages; i++)
4215 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4218 /****************************************************************************/
4219 /* Set media options. */
4222 /* 0 for success, positive value for failure. */
4223 /****************************************************************************/
4225 bce_ifmedia_upd(struct ifnet *ifp)
4227 struct bce_softc *sc = ifp->if_softc;
4228 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4232 * 'mii' will be NULL, when this function is called on following
4233 * code path: bce_attach() -> bce_mgmt_init()
4236 /* Make sure the MII bus has been enumerated. */
4238 if (mii->mii_instance) {
4239 struct mii_softc *miisc;
4241 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4242 mii_phy_reset(miisc);
4244 error = mii_mediachg(mii);
4249 /****************************************************************************/
4250 /* Reports current media status. */
4254 /****************************************************************************/
4256 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4258 struct bce_softc *sc = ifp->if_softc;
4259 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4262 ifmr->ifm_active = mii->mii_media_active;
4263 ifmr->ifm_status = mii->mii_media_status;
4266 /****************************************************************************/
4267 /* Handles PHY generated interrupt events. */
4271 /****************************************************************************/
4273 bce_phy_intr(struct bce_softc *sc)
4275 uint32_t new_link_state, old_link_state;
4276 struct ifnet *ifp = &sc->arpcom.ac_if;
4278 ASSERT_SERIALIZED(&sc->main_serialize);
4280 new_link_state = sc->status_block->status_attn_bits &
4281 STATUS_ATTN_BITS_LINK_STATE;
4282 old_link_state = sc->status_block->status_attn_bits_ack &
4283 STATUS_ATTN_BITS_LINK_STATE;
4285 /* Handle any changes if the link state has changed. */
4286 if (new_link_state != old_link_state) { /* XXX redundant? */
4287 /* Update the status_attn_bits_ack field in the status block. */
4288 if (new_link_state) {
4289 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4290 STATUS_ATTN_BITS_LINK_STATE);
4292 if_printf(ifp, "Link is now UP.\n");
4294 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4295 STATUS_ATTN_BITS_LINK_STATE);
4297 if_printf(ifp, "Link is now DOWN.\n");
4301 * Assume link is down and allow tick routine to
4302 * update the state based on the actual media state.
4305 callout_stop(&sc->bce_tick_callout);
4306 bce_tick_serialized(sc);
4309 /* Acknowledge the link change interrupt. */
4310 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4313 /****************************************************************************/
4314 /* Reads the receive consumer value from the status block (skipping over */
4315 /* chain page pointer if necessary). */
4319 /****************************************************************************/
4320 static __inline uint16_t
4321 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4323 uint16_t hw_cons = *rxr->rx_hw_cons;
4325 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4330 /****************************************************************************/
4331 /* Handles received frame interrupt events. */
4335 /****************************************************************************/
4337 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4339 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4340 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4341 uint32_t sw_prod_bseq;
4342 int cpuid = mycpuid;
4344 ASSERT_SERIALIZED(&rxr->rx_serialize);
4346 /* Get working copies of the driver's view of the RX indices. */
4347 sw_cons = rxr->rx_cons;
4348 sw_prod = rxr->rx_prod;
4349 sw_prod_bseq = rxr->rx_prod_bseq;
4351 /* Scan through the receive chain as long as there is work to do. */
4352 while (sw_cons != hw_cons) {
4353 struct pktinfo pi0, *pi = NULL;
4354 struct bce_rx_buf *rx_buf;
4355 struct mbuf *m = NULL;
4356 struct l2_fhdr *l2fhdr = NULL;
4358 uint32_t status = 0;
4360 #ifdef IFPOLL_ENABLE
4361 if (count >= 0 && count-- == 0)
4366 * Convert the producer/consumer indices
4367 * to an actual rx_bd index.
4369 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4370 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4371 rx_buf = &rxr->rx_bufs[sw_chain_cons];
4375 /* The mbuf is stored with the last rx_bd entry of a packet. */
4376 if (rx_buf->rx_mbuf_ptr != NULL) {
4377 if (sw_chain_cons != sw_chain_prod) {
4378 if_printf(ifp, "RX cons(%d) != prod(%d), "
4379 "drop!\n", sw_chain_cons, sw_chain_prod);
4380 IFNET_STAT_INC(ifp, ierrors, 1);
4382 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4385 goto bce_rx_int_next_rx;
4388 /* Unmap the mbuf from DMA space. */
4389 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4390 BUS_DMASYNC_POSTREAD);
4392 /* Save the mbuf from the driver's chain. */
4393 m = rx_buf->rx_mbuf_ptr;
4396 * Frames received on the NetXteme II are prepended
4397 * with an l2_fhdr structure which provides status
4398 * information about the received frame (including
4399 * VLAN tags and checksum info). The frames are also
4400 * automatically adjusted to align the IP header
4401 * (i.e. two null bytes are inserted before the
4402 * Ethernet header). As a result the data DMA'd by
4403 * the controller into the mbuf is as follows:
4405 * +---------+-----+---------------------+-----+
4406 * | l2_fhdr | pad | packet data | FCS |
4407 * +---------+-----+---------------------+-----+
4409 * The l2_fhdr needs to be checked and skipped and the
4410 * FCS needs to be stripped before sending the packet
4413 l2fhdr = mtod(m, struct l2_fhdr *);
4415 len = l2fhdr->l2_fhdr_pkt_len;
4416 status = l2fhdr->l2_fhdr_status;
4418 len -= ETHER_CRC_LEN;
4420 /* Check the received frame for errors. */
4421 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4422 L2_FHDR_ERRORS_PHY_DECODE |
4423 L2_FHDR_ERRORS_ALIGNMENT |
4424 L2_FHDR_ERRORS_TOO_SHORT |
4425 L2_FHDR_ERRORS_GIANT_FRAME)) {
4426 IFNET_STAT_INC(ifp, ierrors, 1);
4428 /* Reuse the mbuf for a new frame. */
4429 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4432 goto bce_rx_int_next_rx;
4436 * Get a new mbuf for the rx_bd. If no new
4437 * mbufs are available then reuse the current mbuf,
4438 * log an ierror on the interface, and generate
4439 * an error in the system log.
4441 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4442 &sw_prod_bseq, 0)) {
4443 IFNET_STAT_INC(ifp, ierrors, 1);
4445 /* Try and reuse the exisitng mbuf. */
4446 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4449 goto bce_rx_int_next_rx;
4453 * Skip over the l2_fhdr when passing
4454 * the data up the stack.
4456 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4458 m->m_pkthdr.len = m->m_len = len;
4459 m->m_pkthdr.rcvif = ifp;
4461 /* Validate the checksum if offload enabled. */
4462 if (ifp->if_capenable & IFCAP_RXCSUM) {
4463 /* Check for an IP datagram. */
4464 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4465 m->m_pkthdr.csum_flags |=
4468 /* Check if the IP checksum is valid. */
4469 if ((l2fhdr->l2_fhdr_ip_xsum ^
4471 m->m_pkthdr.csum_flags |=
4476 /* Check for a valid TCP/UDP frame. */
4477 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4478 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4480 /* Check for a good TCP/UDP checksum. */
4482 (L2_FHDR_ERRORS_TCP_XSUM |
4483 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4484 m->m_pkthdr.csum_data =
4485 l2fhdr->l2_fhdr_tcp_udp_xsum;
4486 m->m_pkthdr.csum_flags |=
4492 if (ifp->if_capenable & IFCAP_RSS) {
4493 pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4495 (status & L2_FHDR_STATUS_RSS_HASH)) {
4496 m->m_flags |= M_HASH;
4498 toeplitz_hash(l2fhdr->l2_fhdr_hash);
4502 IFNET_STAT_INC(ifp, ipackets, 1);
4504 sw_prod = NEXT_RX_BD(sw_prod);
4507 sw_cons = NEXT_RX_BD(sw_cons);
4509 /* If we have a packet, pass it up the stack */
4511 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4512 m->m_flags |= M_VLANTAG;
4513 m->m_pkthdr.ether_vlantag =
4514 l2fhdr->l2_fhdr_vlan_tag;
4516 ifp->if_input(ifp, m, pi, cpuid);
4517 #ifdef BCE_RSS_DEBUG
4523 rxr->rx_cons = sw_cons;
4524 rxr->rx_prod = sw_prod;
4525 rxr->rx_prod_bseq = sw_prod_bseq;
4527 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4529 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4533 /****************************************************************************/
4534 /* Reads the transmit consumer value from the status block (skipping over */
4535 /* chain page pointer if necessary). */
4539 /****************************************************************************/
4540 static __inline uint16_t
4541 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4543 uint16_t hw_cons = *txr->tx_hw_cons;
4545 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4550 /****************************************************************************/
4551 /* Handles transmit completion interrupt events. */
4555 /****************************************************************************/
4557 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4559 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4560 uint16_t sw_tx_cons, sw_tx_chain_cons;
4562 ASSERT_SERIALIZED(&txr->tx_serialize);
4564 /* Get the hardware's view of the TX consumer index. */
4565 sw_tx_cons = txr->tx_cons;
4567 /* Cycle through any completed TX chain page entries. */
4568 while (sw_tx_cons != hw_tx_cons) {
4569 struct bce_tx_buf *tx_buf;
4571 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4572 tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4575 * Free the associated mbuf. Remember
4576 * that only the last tx_bd of a packet
4577 * has an mbuf pointer and DMA map.
4579 if (tx_buf->tx_mbuf_ptr != NULL) {
4580 /* Unmap the mbuf. */
4581 bus_dmamap_unload(txr->tx_mbuf_tag,
4582 tx_buf->tx_mbuf_map);
4584 /* Free the mbuf. */
4585 m_freem(tx_buf->tx_mbuf_ptr);
4586 tx_buf->tx_mbuf_ptr = NULL;
4588 IFNET_STAT_INC(ifp, opackets, 1);
4589 #ifdef BCE_TSS_DEBUG
4595 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4598 if (txr->used_tx_bd == 0) {
4599 /* Clear the TX timeout timer. */
4600 txr->tx_watchdog.wd_timer = 0;
4603 /* Clear the tx hardware queue full flag. */
4604 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4605 ifsq_clr_oactive(txr->ifsq);
4606 txr->tx_cons = sw_tx_cons;
4609 /****************************************************************************/
4610 /* Disables interrupt generation. */
4614 /****************************************************************************/
4616 bce_disable_intr(struct bce_softc *sc)
4620 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4621 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4622 (sc->rx_rings[i].idx << 24) |
4623 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4625 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4627 callout_stop(&sc->bce_ckmsi_callout);
4628 sc->bce_msi_maylose = FALSE;
4629 sc->bce_check_rx_cons = 0;
4630 sc->bce_check_tx_cons = 0;
4631 sc->bce_check_status_idx = 0xffff;
4633 for (i = 0; i < sc->rx_ring_cnt; ++i)
4634 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4637 /****************************************************************************/
4638 /* Enables interrupt generation. */
4642 /****************************************************************************/
4644 bce_enable_intr(struct bce_softc *sc)
4648 for (i = 0; i < sc->rx_ring_cnt; ++i)
4649 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4651 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4652 struct bce_rx_ring *rxr = &sc->rx_rings[i];
4654 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4655 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4656 BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4657 rxr->last_status_idx);
4658 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4659 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4660 rxr->last_status_idx);
4662 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4664 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4665 sc->bce_msi_maylose = FALSE;
4666 sc->bce_check_rx_cons = 0;
4667 sc->bce_check_tx_cons = 0;
4668 sc->bce_check_status_idx = 0xffff;
4671 if_printf(&sc->arpcom.ac_if, "check msi\n");
4673 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4674 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4678 /****************************************************************************/
4679 /* Reenables interrupt generation during interrupt handling. */
4683 /****************************************************************************/
4685 bce_reenable_intr(struct bce_rx_ring *rxr)
4687 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4688 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4691 /****************************************************************************/
4692 /* Handles controller initialization. */
4696 /****************************************************************************/
4700 struct bce_softc *sc = xsc;
4701 struct ifnet *ifp = &sc->arpcom.ac_if;
4706 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4708 /* Check if the driver is still running and bail out if it is. */
4709 if (ifp->if_flags & IFF_RUNNING)
4714 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4716 if_printf(ifp, "Controller reset failed!\n");
4720 error = bce_chipinit(sc);
4722 if_printf(ifp, "Controller initialization failed!\n");
4726 error = bce_blockinit(sc);
4728 if_printf(ifp, "Block initialization failed!\n");
4732 /* Load our MAC address. */
4733 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4734 bce_set_mac_addr(sc);
4736 /* Calculate and program the Ethernet MTU size. */
4737 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4740 * Program the mtu, enabling jumbo frame
4741 * support if necessary. Also set the mbuf
4742 * allocation count for RX frames.
4744 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4746 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4747 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4748 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4750 panic("jumbo buffer is not supported yet");
4753 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4756 /* Program appropriate promiscuous/multicast filtering. */
4757 bce_set_rx_mode(sc);
4760 * Init RX buffer descriptor chain.
4762 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4763 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4765 for (i = 0; i < sc->rx_ring_cnt; ++i)
4766 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */
4768 if (sc->rx_ring_cnt > 1)
4772 * Init TX buffer descriptor chain.
4774 REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4776 for (i = 0; i < sc->tx_ring_cnt; ++i)
4777 bce_init_tx_chain(&sc->tx_rings[i]);
4779 if (sc->tx_ring_cnt > 1) {
4780 REG_WR(sc, BCE_TSCH_TSS_CFG,
4781 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4785 #ifdef IFPOLL_ENABLE
4786 if (ifp->if_flags & IFF_NPOLLING)
4791 /* Disable interrupts if we are polling. */
4792 bce_disable_intr(sc);
4794 /* Change coalesce parameters */
4795 bce_npoll_coal_change(sc);
4797 /* Enable host interrupts. */
4798 bce_enable_intr(sc);
4800 bce_set_timer_cpuid(sc, polling);
4802 bce_ifmedia_upd(ifp);
4804 ifp->if_flags |= IFF_RUNNING;
4805 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4806 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4807 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4810 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4811 sc->bce_timer_cpuid);
4817 /****************************************************************************/
4818 /* Initialize the controller just enough so that any management firmware */
4819 /* running on the device will continue to operate corectly. */
4823 /****************************************************************************/
4825 bce_mgmt_init(struct bce_softc *sc)
4827 struct ifnet *ifp = &sc->arpcom.ac_if;
4829 /* Bail out if management firmware is not running. */
4830 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4833 /* Enable all critical blocks in the MAC. */
4834 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4835 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4836 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4837 BCE_MISC_ENABLE_DEFAULT_XI);
4839 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4841 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4844 bce_ifmedia_upd(ifp);
4847 /****************************************************************************/
4848 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4849 /* memory visible to the controller. */
4852 /* 0 for success, positive value for failure. */
4853 /****************************************************************************/
4855 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4857 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4858 bus_dmamap_t map, tmp_map;
4859 struct mbuf *m0 = *m_head;
4860 struct tx_bd *txbd = NULL;
4861 uint16_t vlan_tag = 0, flags = 0, mss = 0;
4862 uint16_t chain_prod, chain_prod_start, prod;
4864 int i, error, maxsegs, nsegs;
4866 /* Transfer any checksum offload flags to the bd. */
4867 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4868 error = bce_tso_setup(txr, m_head, &flags, &mss);
4872 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4873 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4874 flags |= TX_BD_FLAGS_IP_CKSUM;
4875 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4876 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4879 /* Transfer any VLAN tags to the bd. */
4880 if (m0->m_flags & M_VLANTAG) {
4881 flags |= TX_BD_FLAGS_VLAN_TAG;
4882 vlan_tag = m0->m_pkthdr.ether_vlantag;
4885 prod = txr->tx_prod;
4886 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4888 /* Map the mbuf into DMAable memory. */
4889 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4891 maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4892 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4893 ("not enough segments %d", maxsegs));
4894 if (maxsegs > BCE_MAX_SEGMENTS)
4895 maxsegs = BCE_MAX_SEGMENTS;
4897 /* Map the mbuf into our DMA address space. */
4898 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4899 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4902 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4904 *nsegs_used += nsegs;
4909 /* prod points to an empty tx_bd at this point. */
4910 prod_bseq = txr->tx_prod_bseq;
4913 * Cycle through each mbuf segment that makes up
4914 * the outgoing frame, gathering the mapping info
4915 * for that segment and creating a tx_bd to for
4918 for (i = 0; i < nsegs; i++) {
4919 chain_prod = TX_CHAIN_IDX(txr, prod);
4921 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4923 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4924 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4925 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4926 htole16(segs[i].ds_len);
4927 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4928 txbd->tx_bd_flags = htole16(flags);
4930 prod_bseq += segs[i].ds_len;
4932 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4933 prod = NEXT_TX_BD(prod);
4936 /* Set the END flag on the last TX buffer descriptor. */
4937 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4940 * Ensure that the mbuf pointer for this transmission
4941 * is placed at the array index of the last
4942 * descriptor in this chain. This is done
4943 * because a single map is used for all
4944 * segments of the mbuf and we don't want to
4945 * unload the map before all of the segments
4948 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4950 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4951 txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4952 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4954 txr->used_tx_bd += nsegs;
4956 /* prod points to the next free tx_bd at this point. */
4957 txr->tx_prod = prod;
4958 txr->tx_prod_bseq = prod_bseq;
4968 bce_xmit(struct bce_tx_ring *txr)
4970 /* Start the transmit. */
4971 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4973 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4977 /****************************************************************************/
4978 /* Main transmit routine when called from another routine with a lock. */
4982 /****************************************************************************/
4984 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4986 struct bce_softc *sc = ifp->if_softc;
4987 struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4990 KKASSERT(txr->ifsq == ifsq);
4991 ASSERT_SERIALIZED(&txr->tx_serialize);
4993 /* If there's no link or the transmit queue is empty then just exit. */
4994 if (!sc->bce_link) {
4999 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5003 struct mbuf *m_head;
5006 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
5009 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
5010 ifsq_set_oactive(ifsq);
5014 /* Check for any frames to send. */
5015 m_head = ifsq_dequeue(ifsq);
5020 * Pack the data into the transmit ring. If we
5021 * don't have room, place the mbuf back at the
5022 * head of the queue and set the OACTIVE flag
5023 * to wait for the NIC to drain the chain.
5025 if (bce_encap(txr, &m_head, &count)) {
5026 IFNET_STAT_INC(ifp, oerrors, 1);
5027 if (txr->used_tx_bd == 0) {
5030 ifsq_set_oactive(ifsq);
5035 if (count >= txr->tx_wreg) {
5040 /* Send a copy of the frame to any BPF listeners. */
5041 ETHER_BPF_MTAP(ifp, m_head);
5043 /* Set the tx timeout. */
5044 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5050 /****************************************************************************/
5051 /* Handles any IOCTL calls from the operating system. */
5054 /* 0 for success, positive value for failure. */
5055 /****************************************************************************/
5057 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5059 struct bce_softc *sc = ifp->if_softc;
5060 struct ifreq *ifr = (struct ifreq *)data;
5061 struct mii_data *mii;
5062 int mask, error = 0;
5064 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5068 /* Check that the MTU setting is supported. */
5069 if (ifr->ifr_mtu < BCE_MIN_MTU ||
5071 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5073 ifr->ifr_mtu > ETHERMTU
5080 ifp->if_mtu = ifr->ifr_mtu;
5081 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5086 if (ifp->if_flags & IFF_UP) {
5087 if (ifp->if_flags & IFF_RUNNING) {
5088 mask = ifp->if_flags ^ sc->bce_if_flags;
5090 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5091 bce_set_rx_mode(sc);
5095 } else if (ifp->if_flags & IFF_RUNNING) {
5098 /* If MFW is running, restart the controller a bit. */
5099 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5100 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5105 sc->bce_if_flags = ifp->if_flags;
5110 if (ifp->if_flags & IFF_RUNNING)
5111 bce_set_rx_mode(sc);
5116 mii = device_get_softc(sc->bce_miibus);
5117 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5121 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5122 if (mask & IFCAP_HWCSUM) {
5123 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5124 if (ifp->if_capenable & IFCAP_TXCSUM)
5125 ifp->if_hwassist |= BCE_CSUM_FEATURES;
5127 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5129 if (mask & IFCAP_TSO) {
5130 ifp->if_capenable ^= IFCAP_TSO;
5131 if (ifp->if_capenable & IFCAP_TSO)
5132 ifp->if_hwassist |= CSUM_TSO;
5134 ifp->if_hwassist &= ~CSUM_TSO;
5136 if (mask & IFCAP_RSS)
5137 ifp->if_capenable ^= IFCAP_RSS;
5141 error = ether_ioctl(ifp, command, data);
5147 /****************************************************************************/
5148 /* Transmit timeout handler. */
5152 /****************************************************************************/
5154 bce_watchdog(struct ifaltq_subque *ifsq)
5156 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5157 struct bce_softc *sc = ifp->if_softc;
5160 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5163 * If we are in this routine because of pause frames, then
5164 * don't reset the hardware.
5166 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5169 if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5171 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5174 IFNET_STAT_INC(ifp, oerrors, 1);
5176 for (i = 0; i < sc->tx_ring_cnt; ++i)
5177 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5180 #ifdef IFPOLL_ENABLE
5183 bce_npoll_status(struct ifnet *ifp)
5185 struct bce_softc *sc = ifp->if_softc;
5186 struct status_block *sblk = sc->status_block;
5187 uint32_t status_attn_bits;
5189 ASSERT_SERIALIZED(&sc->main_serialize);
5191 status_attn_bits = sblk->status_attn_bits;
5193 /* Was it a link change interrupt? */
5194 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5195 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5199 * Clear any transient status updates during link state change.
5201 REG_WR(sc, BCE_HC_COMMAND,
5202 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5203 REG_RD(sc, BCE_HC_COMMAND);
5207 * If any other attention is asserted then the chip is toast.
5209 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5210 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5211 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5212 sblk->status_attn_bits);
5213 bce_serialize_skipmain(sc);
5215 bce_deserialize_skipmain(sc);
5220 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5222 struct bce_rx_ring *rxr = arg;
5223 uint16_t hw_rx_cons;
5225 ASSERT_SERIALIZED(&rxr->rx_serialize);
5228 * Save the status block index value for use when enabling
5231 rxr->last_status_idx = *rxr->hw_status_idx;
5233 /* Make sure status index is extracted before RX/TX cons */
5236 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5238 /* Check for any completed RX frames. */
5239 if (hw_rx_cons != rxr->rx_cons)
5240 bce_rx_intr(rxr, count, hw_rx_cons);
5244 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5246 struct bce_rx_ring *rxr = arg;
5248 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5249 bce_npoll_rx(ifp, rxr, count);
5251 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5252 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5253 rxr->sc->rx_ring_cnt2));
5255 /* Last ring carries packets whose masked hash is 0 */
5256 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5258 lwkt_serialize_enter(&rxr->rx_serialize);
5259 bce_npoll_rx(ifp, rxr, count);
5260 lwkt_serialize_exit(&rxr->rx_serialize);
5264 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5266 struct bce_tx_ring *txr = arg;
5267 uint16_t hw_tx_cons;
5269 ASSERT_SERIALIZED(&txr->tx_serialize);
5271 hw_tx_cons = bce_get_hw_tx_cons(txr);
5273 /* Check for any completed TX frames. */
5274 if (hw_tx_cons != txr->tx_cons) {
5275 bce_tx_intr(txr, hw_tx_cons);
5276 if (!ifsq_is_empty(txr->ifsq))
5277 ifsq_devstart(txr->ifsq);
5282 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5284 struct bce_softc *sc = ifp->if_softc;
5287 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5290 info->ifpi_status.status_func = bce_npoll_status;
5291 info->ifpi_status.serializer = &sc->main_serialize;
5293 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5294 struct bce_tx_ring *txr = &sc->tx_rings[i];
5295 int idx = i + sc->npoll_ofs;
5297 KKASSERT(idx < ncpus2);
5298 info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5299 info->ifpi_tx[idx].arg = txr;
5300 info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5301 ifsq_set_cpuid(txr->ifsq, idx);
5304 for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5305 struct bce_rx_ring *rxr = &sc->rx_rings[i];
5306 int idx = i + sc->npoll_ofs;
5308 KKASSERT(idx < ncpus2);
5309 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5311 * If RSS is enabled, the packets whose
5312 * masked hash are 0 are queued to the
5313 * last RX ring; piggyback the last RX
5314 * ring's processing in the first RX
5315 * polling handler. (see also: comment
5316 * in bce_setup_ring_cnt())
5319 if_printf(ifp, "npoll pack last "
5320 "RX ring on cpu%d\n", idx);
5322 info->ifpi_rx[idx].poll_func =
5325 info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5327 info->ifpi_rx[idx].arg = rxr;
5328 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5331 if (ifp->if_flags & IFF_RUNNING) {
5332 bce_set_timer_cpuid(sc, TRUE);
5333 bce_disable_intr(sc);
5334 bce_npoll_coal_change(sc);
5337 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5338 ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5339 sc->bce_msix[i].msix_cpuid);
5342 if (ifp->if_flags & IFF_RUNNING) {
5343 bce_set_timer_cpuid(sc, FALSE);
5344 bce_enable_intr(sc);
5346 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5347 BCE_COALMASK_RX_BDS_INT;
5348 bce_coal_change(sc);
5353 #endif /* IFPOLL_ENABLE */
5356 * Interrupt handler.
5358 /****************************************************************************/
5359 /* Main interrupt entry point. Verifies that the controller generated the */
5360 /* interrupt and then calls a separate routine for handle the various */
5361 /* interrupt causes (PHY, TX, RX). */
5364 /* 0 for success, positive value for failure. */
5365 /****************************************************************************/
5367 bce_intr(struct bce_softc *sc)
5369 struct ifnet *ifp = &sc->arpcom.ac_if;
5370 struct status_block *sblk;
5371 uint16_t hw_rx_cons, hw_tx_cons;
5372 uint32_t status_attn_bits;
5373 struct bce_tx_ring *txr = &sc->tx_rings[0];
5374 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5376 ASSERT_SERIALIZED(&sc->main_serialize);
5378 sblk = sc->status_block;
5381 * Save the status block index value for use during
5382 * the next interrupt.
5384 rxr->last_status_idx = *rxr->hw_status_idx;
5386 /* Make sure status index is extracted before RX/TX cons */
5389 /* Check if the hardware has finished any work. */
5390 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5391 hw_tx_cons = bce_get_hw_tx_cons(txr);
5393 status_attn_bits = sblk->status_attn_bits;
5395 /* Was it a link change interrupt? */
5396 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5397 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5401 * Clear any transient status updates during link state
5404 REG_WR(sc, BCE_HC_COMMAND,
5405 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5406 REG_RD(sc, BCE_HC_COMMAND);
5410 * If any other attention is asserted then
5411 * the chip is toast.
5413 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5414 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5415 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5416 sblk->status_attn_bits);
5417 bce_serialize_skipmain(sc);
5419 bce_deserialize_skipmain(sc);
5423 /* Check for any completed RX frames. */
5424 lwkt_serialize_enter(&rxr->rx_serialize);
5425 if (hw_rx_cons != rxr->rx_cons)
5426 bce_rx_intr(rxr, -1, hw_rx_cons);
5427 lwkt_serialize_exit(&rxr->rx_serialize);
5429 /* Check for any completed TX frames. */
5430 lwkt_serialize_enter(&txr->tx_serialize);
5431 if (hw_tx_cons != txr->tx_cons) {
5432 bce_tx_intr(txr, hw_tx_cons);
5433 if (!ifsq_is_empty(txr->ifsq))
5434 ifsq_devstart(txr->ifsq);
5436 lwkt_serialize_exit(&txr->tx_serialize);
5440 bce_intr_legacy(void *xsc)
5442 struct bce_softc *sc = xsc;
5443 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5444 struct status_block *sblk;
5446 sblk = sc->status_block;
5449 * If the hardware status block index matches the last value
5450 * read by the driver and we haven't asserted our interrupt
5451 * then there's nothing to do.
5453 if (sblk->status_idx == rxr->last_status_idx &&
5454 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5455 BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5458 /* Ack the interrupt and stop others from occuring. */
5459 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5460 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5461 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5464 * Read back to deassert IRQ immediately to avoid too
5465 * many spurious interrupts.
5467 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5471 /* Re-enable interrupts. */
5472 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5473 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5474 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5475 bce_reenable_intr(rxr);
5479 bce_intr_msi(void *xsc)
5481 struct bce_softc *sc = xsc;
5483 /* Ack the interrupt and stop others from occuring. */
5484 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5485 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5486 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5490 /* Re-enable interrupts */
5491 bce_reenable_intr(&sc->rx_rings[0]);
5495 bce_intr_msi_oneshot(void *xsc)
5497 struct bce_softc *sc = xsc;
5501 /* Re-enable interrupts */
5502 bce_reenable_intr(&sc->rx_rings[0]);
5506 bce_intr_msix_rxtx(void *xrxr)
5508 struct bce_rx_ring *rxr = xrxr;
5509 struct bce_tx_ring *txr;
5510 uint16_t hw_rx_cons, hw_tx_cons;
5512 ASSERT_SERIALIZED(&rxr->rx_serialize);
5514 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5515 txr = &rxr->sc->tx_rings[rxr->idx];
5518 * Save the status block index value for use during
5519 * the next interrupt.
5521 rxr->last_status_idx = *rxr->hw_status_idx;
5523 /* Make sure status index is extracted before RX/TX cons */
5526 /* Check if the hardware has finished any work. */
5527 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5528 if (hw_rx_cons != rxr->rx_cons)
5529 bce_rx_intr(rxr, -1, hw_rx_cons);
5531 /* Check for any completed TX frames. */
5532 hw_tx_cons = bce_get_hw_tx_cons(txr);
5533 lwkt_serialize_enter(&txr->tx_serialize);
5534 if (hw_tx_cons != txr->tx_cons) {
5535 bce_tx_intr(txr, hw_tx_cons);
5536 if (!ifsq_is_empty(txr->ifsq))
5537 ifsq_devstart(txr->ifsq);
5539 lwkt_serialize_exit(&txr->tx_serialize);
5541 /* Re-enable interrupts */
5542 bce_reenable_intr(rxr);
5546 bce_intr_msix_rx(void *xrxr)
5548 struct bce_rx_ring *rxr = xrxr;
5549 uint16_t hw_rx_cons;
5551 ASSERT_SERIALIZED(&rxr->rx_serialize);
5554 * Save the status block index value for use during
5555 * the next interrupt.
5557 rxr->last_status_idx = *rxr->hw_status_idx;
5559 /* Make sure status index is extracted before RX cons */
5562 /* Check if the hardware has finished any work. */
5563 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5564 if (hw_rx_cons != rxr->rx_cons)
5565 bce_rx_intr(rxr, -1, hw_rx_cons);
5567 /* Re-enable interrupts */
5568 bce_reenable_intr(rxr);
5571 /****************************************************************************/
5572 /* Programs the various packet receive modes (broadcast and multicast). */
5576 /****************************************************************************/
5578 bce_set_rx_mode(struct bce_softc *sc)
5580 struct ifnet *ifp = &sc->arpcom.ac_if;
5581 struct ifmultiaddr *ifma;
5582 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5583 uint32_t rx_mode, sort_mode;
5586 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5588 /* Initialize receive mode default settings. */
5589 rx_mode = sc->rx_mode &
5590 ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5591 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5592 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5595 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5598 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5599 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5600 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5603 * Check for promiscuous, all multicast, or selected
5604 * multicast address filtering.
5606 if (ifp->if_flags & IFF_PROMISC) {
5607 /* Enable promiscuous mode. */
5608 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5609 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5610 } else if (ifp->if_flags & IFF_ALLMULTI) {
5611 /* Enable all multicast addresses. */
5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5613 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5616 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5618 /* Accept one or more multicast(s). */
5619 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5620 if (ifma->ifma_addr->sa_family != AF_LINK)
5623 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5624 ETHER_ADDR_LEN) & 0xFF;
5625 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5628 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5629 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5632 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5635 /* Only make changes if the recive mode has actually changed. */
5636 if (rx_mode != sc->rx_mode) {
5637 sc->rx_mode = rx_mode;
5638 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5641 /* Disable and clear the exisitng sort before enabling a new sort. */
5642 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5643 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5644 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5647 /****************************************************************************/
5648 /* Called periodically to updates statistics from the controllers */
5649 /* statistics block. */
5653 /****************************************************************************/
5655 bce_stats_update(struct bce_softc *sc)
5657 struct ifnet *ifp = &sc->arpcom.ac_if;
5658 struct statistics_block *stats = sc->stats_block;
5660 ASSERT_SERIALIZED(&sc->main_serialize);
5663 * Certain controllers don't report carrier sense errors correctly.
5664 * See errata E11_5708CA0_1165.
5666 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5667 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5668 IFNET_STAT_INC(ifp, oerrors,
5669 (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5673 * Update the sysctl statistics from the hardware statistics.
5675 sc->stat_IfHCInOctets =
5676 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5677 (uint64_t)stats->stat_IfHCInOctets_lo;
5679 sc->stat_IfHCInBadOctets =
5680 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5681 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5683 sc->stat_IfHCOutOctets =
5684 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5685 (uint64_t)stats->stat_IfHCOutOctets_lo;
5687 sc->stat_IfHCOutBadOctets =
5688 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5689 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5691 sc->stat_IfHCInUcastPkts =
5692 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5693 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5695 sc->stat_IfHCInMulticastPkts =
5696 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5697 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5699 sc->stat_IfHCInBroadcastPkts =
5700 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5701 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5703 sc->stat_IfHCOutUcastPkts =
5704 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5705 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5707 sc->stat_IfHCOutMulticastPkts =
5708 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5709 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5711 sc->stat_IfHCOutBroadcastPkts =
5712 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5713 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5715 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5716 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5718 sc->stat_Dot3StatsCarrierSenseErrors =
5719 stats->stat_Dot3StatsCarrierSenseErrors;
5721 sc->stat_Dot3StatsFCSErrors =
5722 stats->stat_Dot3StatsFCSErrors;
5724 sc->stat_Dot3StatsAlignmentErrors =
5725 stats->stat_Dot3StatsAlignmentErrors;
5727 sc->stat_Dot3StatsSingleCollisionFrames =
5728 stats->stat_Dot3StatsSingleCollisionFrames;
5730 sc->stat_Dot3StatsMultipleCollisionFrames =
5731 stats->stat_Dot3StatsMultipleCollisionFrames;
5733 sc->stat_Dot3StatsDeferredTransmissions =
5734 stats->stat_Dot3StatsDeferredTransmissions;
5736 sc->stat_Dot3StatsExcessiveCollisions =
5737 stats->stat_Dot3StatsExcessiveCollisions;
5739 sc->stat_Dot3StatsLateCollisions =
5740 stats->stat_Dot3StatsLateCollisions;
5742 sc->stat_EtherStatsCollisions =
5743 stats->stat_EtherStatsCollisions;
5745 sc->stat_EtherStatsFragments =
5746 stats->stat_EtherStatsFragments;
5748 sc->stat_EtherStatsJabbers =
5749 stats->stat_EtherStatsJabbers;
5751 sc->stat_EtherStatsUndersizePkts =
5752 stats->stat_EtherStatsUndersizePkts;
5754 sc->stat_EtherStatsOverrsizePkts =
5755 stats->stat_EtherStatsOverrsizePkts;
5757 sc->stat_EtherStatsPktsRx64Octets =
5758 stats->stat_EtherStatsPktsRx64Octets;
5760 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5761 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5763 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5764 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5766 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5767 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5769 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5770 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5772 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5773 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5775 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5776 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5778 sc->stat_EtherStatsPktsTx64Octets =
5779 stats->stat_EtherStatsPktsTx64Octets;
5781 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5782 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5784 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5785 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5787 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5788 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5790 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5791 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5793 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5794 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5796 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5797 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5799 sc->stat_XonPauseFramesReceived =
5800 stats->stat_XonPauseFramesReceived;
5802 sc->stat_XoffPauseFramesReceived =
5803 stats->stat_XoffPauseFramesReceived;
5805 sc->stat_OutXonSent =
5806 stats->stat_OutXonSent;
5808 sc->stat_OutXoffSent =
5809 stats->stat_OutXoffSent;
5811 sc->stat_FlowControlDone =
5812 stats->stat_FlowControlDone;
5814 sc->stat_MacControlFramesReceived =
5815 stats->stat_MacControlFramesReceived;
5817 sc->stat_XoffStateEntered =
5818 stats->stat_XoffStateEntered;
5820 sc->stat_IfInFramesL2FilterDiscards =
5821 stats->stat_IfInFramesL2FilterDiscards;
5823 sc->stat_IfInRuleCheckerDiscards =
5824 stats->stat_IfInRuleCheckerDiscards;
5826 sc->stat_IfInFTQDiscards =
5827 stats->stat_IfInFTQDiscards;
5829 sc->stat_IfInMBUFDiscards =
5830 stats->stat_IfInMBUFDiscards;
5832 sc->stat_IfInRuleCheckerP4Hit =
5833 stats->stat_IfInRuleCheckerP4Hit;
5835 sc->stat_CatchupInRuleCheckerDiscards =
5836 stats->stat_CatchupInRuleCheckerDiscards;
5838 sc->stat_CatchupInFTQDiscards =
5839 stats->stat_CatchupInFTQDiscards;
5841 sc->stat_CatchupInMBUFDiscards =
5842 stats->stat_CatchupInMBUFDiscards;
5844 sc->stat_CatchupInRuleCheckerP4Hit =
5845 stats->stat_CatchupInRuleCheckerP4Hit;
5847 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5850 * Update the interface statistics from the
5851 * hardware statistics.
5853 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5855 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5856 (u_long)sc->stat_EtherStatsOverrsizePkts +
5857 (u_long)sc->stat_IfInMBUFDiscards +
5858 (u_long)sc->stat_Dot3StatsAlignmentErrors +
5859 (u_long)sc->stat_Dot3StatsFCSErrors +
5860 (u_long)sc->stat_IfInRuleCheckerDiscards +
5861 (u_long)sc->stat_IfInFTQDiscards +
5862 (u_long)sc->com_no_buffers);
5864 IFNET_STAT_SET(ifp, oerrors,
5865 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5866 (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5867 (u_long)sc->stat_Dot3StatsLateCollisions);
5870 /****************************************************************************/
5871 /* Periodic function to notify the bootcode that the driver is still */
5876 /****************************************************************************/
5878 bce_pulse(void *xsc)
5880 struct bce_softc *sc = xsc;
5881 struct ifnet *ifp = &sc->arpcom.ac_if;
5884 lwkt_serialize_enter(&sc->main_serialize);
5886 /* Tell the firmware that the driver is still running. */
5887 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5888 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5890 /* Update the bootcode condition. */
5891 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5893 /* Report whether the bootcode still knows the driver is running. */
5894 if (!sc->bce_drv_cardiac_arrest) {
5895 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5896 sc->bce_drv_cardiac_arrest = 1;
5897 if_printf(ifp, "Bootcode lost the driver pulse! "
5898 "(bc_state = 0x%08X)\n", sc->bc_state);
5902 * Not supported by all bootcode versions.
5903 * (v5.0.11+ and v5.2.1+) Older bootcode
5904 * will require the driver to reset the
5905 * controller to clear this condition.
5907 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5908 sc->bce_drv_cardiac_arrest = 0;
5909 if_printf(ifp, "Bootcode found the driver pulse! "
5910 "(bc_state = 0x%08X)\n", sc->bc_state);
5914 /* Schedule the next pulse. */
5915 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5916 sc->bce_timer_cpuid);
5918 lwkt_serialize_exit(&sc->main_serialize);
5921 /****************************************************************************/
5922 /* Periodic function to check whether MSI is lost */
5926 /****************************************************************************/
5928 bce_check_msi(void *xsc)
5930 struct bce_softc *sc = xsc;
5931 struct ifnet *ifp = &sc->arpcom.ac_if;
5932 struct status_block *sblk = sc->status_block;
5933 struct bce_tx_ring *txr = &sc->tx_rings[0];
5934 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5936 lwkt_serialize_enter(&sc->main_serialize);
5938 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5940 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5941 lwkt_serialize_exit(&sc->main_serialize);
5945 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5946 bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5947 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5948 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5949 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5950 sc->bce_check_tx_cons == txr->tx_cons &&
5951 sc->bce_check_status_idx == rxr->last_status_idx) {
5954 if (!sc->bce_msi_maylose) {
5955 sc->bce_msi_maylose = TRUE;
5959 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5960 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5962 if_printf(ifp, "lost MSI\n");
5964 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5965 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5966 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5969 } else if (bootverbose) {
5970 if_printf(ifp, "MSI may be lost\n");
5974 sc->bce_msi_maylose = FALSE;
5975 sc->bce_check_rx_cons = rxr->rx_cons;
5976 sc->bce_check_tx_cons = txr->tx_cons;
5977 sc->bce_check_status_idx = rxr->last_status_idx;
5980 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5982 lwkt_serialize_exit(&sc->main_serialize);
5985 /****************************************************************************/
5986 /* Periodic function to perform maintenance tasks. */
5990 /****************************************************************************/
5992 bce_tick_serialized(struct bce_softc *sc)
5994 struct mii_data *mii;
5996 ASSERT_SERIALIZED(&sc->main_serialize);
5998 /* Update the statistics from the hardware statistics block. */
5999 bce_stats_update(sc);
6001 /* Schedule the next tick. */
6002 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
6003 sc->bce_timer_cpuid);
6005 /* If link is up already up then we're done. */
6009 mii = device_get_softc(sc->bce_miibus);
6012 /* Check if the link has come up. */
6013 if ((mii->mii_media_status & IFM_ACTIVE) &&
6014 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6018 /* Now that link is up, handle any outstanding TX traffic. */
6019 for (i = 0; i < sc->tx_ring_cnt; ++i)
6020 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6027 struct bce_softc *sc = xsc;
6029 lwkt_serialize_enter(&sc->main_serialize);
6030 bce_tick_serialized(sc);
6031 lwkt_serialize_exit(&sc->main_serialize);
6034 /****************************************************************************/
6035 /* Adds any sysctl parameters for tuning or debugging purposes. */
6038 /* 0 for success, positive value for failure. */
6039 /****************************************************************************/
6041 bce_add_sysctls(struct bce_softc *sc)
6043 struct sysctl_ctx_list *ctx;
6044 struct sysctl_oid_list *children;
6045 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6050 ctx = device_get_sysctl_ctx(sc->bce_dev);
6051 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6053 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6054 CTLTYPE_INT | CTLFLAG_RW,
6055 sc, 0, bce_sysctl_tx_bds_int, "I",
6056 "Send max coalesced BD count during interrupt");
6057 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6058 CTLTYPE_INT | CTLFLAG_RW,
6059 sc, 0, bce_sysctl_tx_bds, "I",
6060 "Send max coalesced BD count");
6061 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6062 CTLTYPE_INT | CTLFLAG_RW,
6063 sc, 0, bce_sysctl_tx_ticks_int, "I",
6064 "Send coalescing ticks during interrupt");
6065 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6066 CTLTYPE_INT | CTLFLAG_RW,
6067 sc, 0, bce_sysctl_tx_ticks, "I",
6068 "Send coalescing ticks");
6070 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6071 CTLTYPE_INT | CTLFLAG_RW,
6072 sc, 0, bce_sysctl_rx_bds_int, "I",
6073 "Receive max coalesced BD count during interrupt");
6074 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6075 CTLTYPE_INT | CTLFLAG_RW,
6076 sc, 0, bce_sysctl_rx_bds, "I",
6077 "Receive max coalesced BD count");
6078 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6079 CTLTYPE_INT | CTLFLAG_RW,
6080 sc, 0, bce_sysctl_rx_ticks_int, "I",
6081 "Receive coalescing ticks during interrupt");
6082 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6083 CTLTYPE_INT | CTLFLAG_RW,
6084 sc, 0, bce_sysctl_rx_ticks, "I",
6085 "Receive coalescing ticks");
6087 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6088 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6089 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6090 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6092 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6093 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6094 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6095 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6097 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6098 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6099 "# segments before write to hardware registers");
6101 #ifdef IFPOLL_ENABLE
6102 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
6103 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
6104 "I", "NPOLLING cpu offset");
6107 #ifdef BCE_RSS_DEBUG
6108 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6109 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6110 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6111 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6112 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6113 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6118 #ifdef BCE_TSS_DEBUG
6119 for (i = 0; i < sc->tx_ring_cnt; ++i) {
6120 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6121 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6122 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6127 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6128 "stat_IfHCInOctets",
6129 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6132 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6133 "stat_IfHCInBadOctets",
6134 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6135 "Bad bytes received");
6137 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6138 "stat_IfHCOutOctets",
6139 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6142 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6143 "stat_IfHCOutBadOctets",
6144 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6147 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6148 "stat_IfHCInUcastPkts",
6149 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6150 "Unicast packets received");
6152 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6153 "stat_IfHCInMulticastPkts",
6154 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6155 "Multicast packets received");
6157 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6158 "stat_IfHCInBroadcastPkts",
6159 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6160 "Broadcast packets received");
6162 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6163 "stat_IfHCOutUcastPkts",
6164 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6165 "Unicast packets sent");
6167 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6168 "stat_IfHCOutMulticastPkts",
6169 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6170 "Multicast packets sent");
6172 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6173 "stat_IfHCOutBroadcastPkts",
6174 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6175 "Broadcast packets sent");
6177 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6178 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6179 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6180 0, "Internal MAC transmit errors");
6182 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6183 "stat_Dot3StatsCarrierSenseErrors",
6184 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6185 0, "Carrier sense errors");
6187 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6188 "stat_Dot3StatsFCSErrors",
6189 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6190 0, "Frame check sequence errors");
6192 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6193 "stat_Dot3StatsAlignmentErrors",
6194 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6195 0, "Alignment errors");
6197 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6198 "stat_Dot3StatsSingleCollisionFrames",
6199 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6200 0, "Single Collision Frames");
6202 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6203 "stat_Dot3StatsMultipleCollisionFrames",
6204 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6205 0, "Multiple Collision Frames");
6207 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6208 "stat_Dot3StatsDeferredTransmissions",
6209 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6210 0, "Deferred Transmissions");
6212 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6213 "stat_Dot3StatsExcessiveCollisions",
6214 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6215 0, "Excessive Collisions");
6217 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6218 "stat_Dot3StatsLateCollisions",
6219 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6220 0, "Late Collisions");
6222 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6223 "stat_EtherStatsCollisions",
6224 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6227 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6228 "stat_EtherStatsFragments",
6229 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6232 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6233 "stat_EtherStatsJabbers",
6234 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6237 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6238 "stat_EtherStatsUndersizePkts",
6239 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6240 0, "Undersize packets");
6242 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6243 "stat_EtherStatsOverrsizePkts",
6244 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6245 0, "stat_EtherStatsOverrsizePkts");
6247 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6248 "stat_EtherStatsPktsRx64Octets",
6249 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6250 0, "Bytes received in 64 byte packets");
6252 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6253 "stat_EtherStatsPktsRx65Octetsto127Octets",
6254 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6255 0, "Bytes received in 65 to 127 byte packets");
6257 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6258 "stat_EtherStatsPktsRx128Octetsto255Octets",
6259 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6260 0, "Bytes received in 128 to 255 byte packets");
6262 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6263 "stat_EtherStatsPktsRx256Octetsto511Octets",
6264 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6265 0, "Bytes received in 256 to 511 byte packets");
6267 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6268 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6269 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6270 0, "Bytes received in 512 to 1023 byte packets");
6272 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6273 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6274 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6275 0, "Bytes received in 1024 t0 1522 byte packets");
6277 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6278 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6279 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6280 0, "Bytes received in 1523 to 9022 byte packets");
6282 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6283 "stat_EtherStatsPktsTx64Octets",
6284 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6285 0, "Bytes sent in 64 byte packets");
6287 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6288 "stat_EtherStatsPktsTx65Octetsto127Octets",
6289 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6290 0, "Bytes sent in 65 to 127 byte packets");
6292 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6293 "stat_EtherStatsPktsTx128Octetsto255Octets",
6294 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6295 0, "Bytes sent in 128 to 255 byte packets");
6297 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6298 "stat_EtherStatsPktsTx256Octetsto511Octets",
6299 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6300 0, "Bytes sent in 256 to 511 byte packets");
6302 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6303 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6304 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6305 0, "Bytes sent in 512 to 1023 byte packets");
6307 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6308 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6309 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6310 0, "Bytes sent in 1024 to 1522 byte packets");
6312 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6313 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6314 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6315 0, "Bytes sent in 1523 to 9022 byte packets");
6317 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6318 "stat_XonPauseFramesReceived",
6319 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6320 0, "XON pause frames receved");
6322 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6323 "stat_XoffPauseFramesReceived",
6324 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6325 0, "XOFF pause frames received");
6327 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6329 CTLFLAG_RD, &sc->stat_OutXonSent,
6330 0, "XON pause frames sent");
6332 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 CTLFLAG_RD, &sc->stat_OutXoffSent,
6335 0, "XOFF pause frames sent");
6337 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6338 "stat_FlowControlDone",
6339 CTLFLAG_RD, &sc->stat_FlowControlDone,
6340 0, "Flow control done");
6342 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6343 "stat_MacControlFramesReceived",
6344 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6345 0, "MAC control frames received");
6347 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6348 "stat_XoffStateEntered",
6349 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6350 0, "XOFF state entered");
6352 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6353 "stat_IfInFramesL2FilterDiscards",
6354 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6355 0, "Received L2 packets discarded");
6357 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6358 "stat_IfInRuleCheckerDiscards",
6359 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6360 0, "Received packets discarded by rule");
6362 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6363 "stat_IfInFTQDiscards",
6364 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6365 0, "Received packet FTQ discards");
6367 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6368 "stat_IfInMBUFDiscards",
6369 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6370 0, "Received packets discarded due to lack of controller buffer memory");
6372 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6373 "stat_IfInRuleCheckerP4Hit",
6374 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6375 0, "Received packets rule checker hits");
6377 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6378 "stat_CatchupInRuleCheckerDiscards",
6379 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6380 0, "Received packets discarded in Catchup path");
6382 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6383 "stat_CatchupInFTQDiscards",
6384 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6385 0, "Received packets discarded in FTQ in Catchup path");
6387 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6388 "stat_CatchupInMBUFDiscards",
6389 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6390 0, "Received packets discarded in controller buffer memory in Catchup path");
6392 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6393 "stat_CatchupInRuleCheckerP4Hit",
6394 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6395 0, "Received packets rule checker hits in Catchup path");
6397 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6399 CTLFLAG_RD, &sc->com_no_buffers,
6400 0, "Valid packets received but no RX buffers available");
6404 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6406 struct bce_softc *sc = arg1;
6408 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6409 &sc->bce_tx_quick_cons_trip_int,
6410 BCE_COALMASK_TX_BDS_INT);
6414 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6416 struct bce_softc *sc = arg1;
6418 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6419 &sc->bce_tx_quick_cons_trip,
6420 BCE_COALMASK_TX_BDS);
6424 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6426 struct bce_softc *sc = arg1;
6428 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6429 &sc->bce_tx_ticks_int,
6430 BCE_COALMASK_TX_TICKS_INT);
6434 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6436 struct bce_softc *sc = arg1;
6438 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6440 BCE_COALMASK_TX_TICKS);
6444 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6446 struct bce_softc *sc = arg1;
6448 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6449 &sc->bce_rx_quick_cons_trip_int,
6450 BCE_COALMASK_RX_BDS_INT);
6454 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6456 struct bce_softc *sc = arg1;
6458 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6459 &sc->bce_rx_quick_cons_trip,
6460 BCE_COALMASK_RX_BDS);
6464 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6466 struct bce_softc *sc = arg1;
6468 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6469 &sc->bce_rx_ticks_int,
6470 BCE_COALMASK_RX_TICKS_INT);
6474 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6476 struct bce_softc *sc = arg1;
6478 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6480 BCE_COALMASK_RX_TICKS);
6484 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6485 uint32_t coalchg_mask)
6487 struct bce_softc *sc = arg1;
6488 struct ifnet *ifp = &sc->arpcom.ac_if;
6491 ifnet_serialize_all(ifp);
6494 error = sysctl_handle_int(oidp, &v, 0, req);
6495 if (!error && req->newptr != NULL) {
6500 sc->bce_coalchg_mask |= coalchg_mask;
6502 /* Commit changes */
6503 bce_coal_change(sc);
6507 ifnet_deserialize_all(ifp);
6512 bce_coal_change(struct bce_softc *sc)
6514 struct ifnet *ifp = &sc->arpcom.ac_if;
6517 ASSERT_SERIALIZED(&sc->main_serialize);
6519 if ((ifp->if_flags & IFF_RUNNING) == 0) {
6520 sc->bce_coalchg_mask = 0;
6524 if (sc->bce_coalchg_mask &
6525 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6526 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6527 (sc->bce_tx_quick_cons_trip_int << 16) |
6528 sc->bce_tx_quick_cons_trip);
6529 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6532 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6534 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6535 (sc->bce_tx_quick_cons_trip_int << 16) |
6536 sc->bce_tx_quick_cons_trip);
6539 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6540 sc->bce_tx_quick_cons_trip,
6541 sc->bce_tx_quick_cons_trip_int);
6545 if (sc->bce_coalchg_mask &
6546 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6547 REG_WR(sc, BCE_HC_TX_TICKS,
6548 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6549 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6552 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6554 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6555 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6558 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6559 sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6563 if (sc->bce_coalchg_mask &
6564 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6565 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6566 (sc->bce_rx_quick_cons_trip_int << 16) |
6567 sc->bce_rx_quick_cons_trip);
6568 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6571 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6573 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6574 (sc->bce_rx_quick_cons_trip_int << 16) |
6575 sc->bce_rx_quick_cons_trip);
6578 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6579 sc->bce_rx_quick_cons_trip,
6580 sc->bce_rx_quick_cons_trip_int);
6584 if (sc->bce_coalchg_mask &
6585 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6586 REG_WR(sc, BCE_HC_RX_TICKS,
6587 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6588 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6591 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6593 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6594 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6597 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6598 sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6602 sc->bce_coalchg_mask = 0;
6606 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6607 uint16_t *flags0, uint16_t *mss0)
6611 int thoff, iphlen, hoff;
6614 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6616 hoff = m->m_pkthdr.csum_lhlen;
6617 iphlen = m->m_pkthdr.csum_iphlen;
6618 thoff = m->m_pkthdr.csum_thlen;
6620 KASSERT(hoff >= sizeof(struct ether_header),
6621 ("invalid ether header len %d", hoff));
6622 KASSERT(iphlen >= sizeof(struct ip),
6623 ("invalid ip header len %d", iphlen));
6624 KASSERT(thoff >= sizeof(struct tcphdr),
6625 ("invalid tcp header len %d", thoff));
6627 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6628 m = m_pullup(m, hoff + iphlen + thoff);
6636 /* Set the LSO flag in the TX BD */
6637 flags = TX_BD_FLAGS_SW_LSO;
6639 /* Set the length of IP + TCP options (in 32 bit words) */
6640 flags |= (((iphlen + thoff -
6641 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6643 *mss0 = htole16(m->m_pkthdr.tso_segsz);
6650 bce_setup_serialize(struct bce_softc *sc)
6655 * Allocate serializer array
6658 /* Main + TX + RX */
6659 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6662 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6663 M_DEVBUF, M_WAITOK | M_ZERO);
6668 * NOTE: Order is critical
6673 KKASSERT(i < sc->serialize_cnt);
6674 sc->serializes[i++] = &sc->main_serialize;
6676 for (j = 0; j < sc->rx_ring_cnt; ++j) {
6677 KKASSERT(i < sc->serialize_cnt);
6678 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6681 for (j = 0; j < sc->tx_ring_cnt; ++j) {
6682 KKASSERT(i < sc->serialize_cnt);
6683 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6686 KKASSERT(i == sc->serialize_cnt);
6690 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6692 struct bce_softc *sc = ifp->if_softc;
6694 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6698 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6700 struct bce_softc *sc = ifp->if_softc;
6702 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6706 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6708 struct bce_softc *sc = ifp->if_softc;
6710 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6717 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6718 boolean_t serialized)
6720 struct bce_softc *sc = ifp->if_softc;
6722 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6726 #endif /* INVARIANTS */
6729 bce_serialize_skipmain(struct bce_softc *sc)
6731 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6735 bce_deserialize_skipmain(struct bce_softc *sc)
6737 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6740 #ifdef IFPOLL_ENABLE
6743 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6745 struct bce_softc *sc = (void *)arg1;
6746 struct ifnet *ifp = &sc->arpcom.ac_if;
6749 off = sc->npoll_ofs;
6750 error = sysctl_handle_int(oidp, &off, 0, req);
6751 if (error || req->newptr == NULL)
6756 ifnet_serialize_all(ifp);
6757 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) {
6761 sc->npoll_ofs = off;
6763 ifnet_deserialize_all(ifp);
6768 #endif /* IFPOLL_ENABLE */
6771 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6774 sc->bce_timer_cpuid = 0; /* XXX */
6776 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6780 bce_alloc_intr(struct bce_softc *sc)
6784 bce_try_alloc_msix(sc);
6785 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6788 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6789 &sc->bce_irq_rid, &irq_flags);
6791 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6792 &sc->bce_irq_rid, irq_flags);
6793 if (sc->bce_res_irq == NULL) {
6794 device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6797 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6798 sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6804 bce_try_alloc_msix(struct bce_softc *sc)
6806 struct bce_msix_data *msix;
6807 int offset, i, error;
6808 boolean_t setup = FALSE;
6810 if (sc->rx_ring_cnt == 1)
6813 if (sc->rx_ring_cnt2 == ncpus2) {
6817 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2;
6819 offset = device_getenv_int(sc->bce_dev,
6820 "msix.offset", offset_def);
6821 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) {
6822 device_printf(sc->bce_dev,
6823 "invalid msix.offset %d, use %d\n",
6824 offset, offset_def);
6825 offset = offset_def;
6829 msix = &sc->bce_msix[0];
6830 msix->msix_serialize = &sc->main_serialize;
6831 msix->msix_func = bce_intr_msi_oneshot;
6832 msix->msix_arg = sc;
6833 KKASSERT(offset < ncpus2);
6834 msix->msix_cpuid = offset;
6835 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6836 device_get_nameunit(sc->bce_dev));
6838 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6839 struct bce_rx_ring *rxr = &sc->rx_rings[i];
6841 msix = &sc->bce_msix[i];
6843 msix->msix_serialize = &rxr->rx_serialize;
6844 msix->msix_arg = rxr;
6845 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2);
6846 KKASSERT(msix->msix_cpuid < ncpus2);
6848 if (i < sc->tx_ring_cnt) {
6849 msix->msix_func = bce_intr_msix_rxtx;
6850 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6851 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6853 msix->msix_func = bce_intr_msix_rx;
6854 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6855 "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6862 bce_setup_msix_table(sc);
6863 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6864 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6865 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6867 REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6869 error = pci_setup_msix(sc->bce_dev);
6871 device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6876 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6877 msix = &sc->bce_msix[i];
6879 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6882 device_printf(sc->bce_dev,
6883 "Unable to allocate MSI-X %d on cpu%d\n",
6884 i, msix->msix_cpuid);
6888 msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6889 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6890 if (msix->msix_res == NULL) {
6891 device_printf(sc->bce_dev,
6892 "Unable to allocate MSI-X %d resource\n", i);
6898 pci_enable_msix(sc->bce_dev);
6899 sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6902 bce_free_msix(sc, setup);
6906 bce_setup_ring_cnt(struct bce_softc *sc)
6908 int msix_enable, ring_max, msix_cnt2, msix_cnt, i;
6910 sc->rx_ring_cnt = 1;
6911 sc->rx_ring_cnt2 = 1;
6912 sc->tx_ring_cnt = 1;
6914 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6915 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6918 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6926 msix_cnt = pci_msix_count(sc->bce_dev);
6931 while ((1 << (i + 1)) <= msix_cnt)
6936 * One extra RX ring will be needed (see below), so make sure
6937 * that there are enough MSI-X vectors.
6939 if (msix_cnt == msix_cnt2) {
6942 * This probably will not happen; 5709/5716
6943 * come with 9 MSI-X vectors.
6946 if (msix_cnt2 <= 1) {
6947 device_printf(sc->bce_dev,
6948 "MSI-X count %d could not be used\n", msix_cnt);
6951 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n",
6956 * Setup RX ring count
6958 ring_max = BCE_RX_RING_MAX;
6959 if (ring_max > msix_cnt2)
6960 ring_max = msix_cnt2;
6961 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings",
6963 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max);
6966 * Don't use MSI-X, if the effective RX ring count is 1.
6967 * Since if the effective RX ring count is 1, the TX ring
6968 * count will be 1. This RX ring and the TX ring must be
6969 * bundled into one MSI-X vector, so the hot path will be
6970 * exact same as using MSI. Besides, the first RX ring
6971 * must be fully populated, which only accepts packets whose
6972 * RSS hash can't calculated, e.g. ARP packets; waste of
6973 * resource at least.
6975 if (sc->rx_ring_cnt2 == 1)
6979 * One extra RX ring is allocated, since the first RX ring
6980 * could not be used for RSS hashed packets whose masked
6981 * hash is 0. The first RX ring is only used for packets
6982 * whose RSS hash could not be calculated, e.g. ARP packets.
6983 * This extra RX ring will be used for packets whose masked
6984 * hash is 0. The effective RX ring count involved in RSS
6985 * is still sc->rx_ring_cnt2.
6987 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt);
6988 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6991 * Setup TX ring count
6994 * TX ring count must be less than the effective RSS RX ring
6995 * count, since we use RX ring software data struct to save
6996 * status index and various other MSI-X related stuffs.
6998 ring_max = BCE_TX_RING_MAX;
6999 if (ring_max > msix_cnt2)
7000 ring_max = msix_cnt2;
7001 if (ring_max > sc->rx_ring_cnt2)
7002 ring_max = sc->rx_ring_cnt2;
7003 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings",
7005 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
7009 bce_free_msix(struct bce_softc *sc, boolean_t setup)
7013 KKASSERT(sc->rx_ring_cnt > 1);
7015 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7016 struct bce_msix_data *msix = &sc->bce_msix[i];
7018 if (msix->msix_res != NULL) {
7019 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7020 msix->msix_rid, msix->msix_res);
7022 if (msix->msix_rid >= 0)
7023 pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
7026 pci_teardown_msix(sc->bce_dev);
7030 bce_free_intr(struct bce_softc *sc)
7032 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
7033 if (sc->bce_res_irq != NULL) {
7034 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7035 sc->bce_irq_rid, sc->bce_res_irq);
7037 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
7038 pci_release_msi(sc->bce_dev);
7040 bce_free_msix(sc, TRUE);
7045 bce_setup_msix_table(struct bce_softc *sc)
7047 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
7048 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
7049 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
7053 bce_setup_intr(struct bce_softc *sc)
7055 void (*irq_handle)(void *);
7058 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
7059 return bce_setup_msix(sc);
7061 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
7062 irq_handle = bce_intr_legacy;
7063 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
7064 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7065 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7066 irq_handle = bce_intr_msi_oneshot;
7067 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7069 irq_handle = bce_intr_msi;
7070 sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7073 panic("%s: unsupported intr type %d",
7074 device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7077 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7078 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7080 device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7088 bce_teardown_intr(struct bce_softc *sc)
7090 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7091 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7093 bce_teardown_msix(sc, sc->rx_ring_cnt);
7097 bce_setup_msix(struct bce_softc *sc)
7101 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7102 struct bce_msix_data *msix = &sc->bce_msix[i];
7105 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7106 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7107 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7109 device_printf(sc->bce_dev, "could not set up %s "
7110 "interrupt handler.\n", msix->msix_desc);
7111 bce_teardown_msix(sc, i);
7119 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7123 for (i = 0; i < msix_cnt; ++i) {
7124 struct bce_msix_data *msix = &sc->bce_msix[i];
7126 bus_teardown_intr(sc->bce_dev, msix->msix_res,
7132 bce_init_rss(struct bce_softc *sc)
7134 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7138 KKASSERT(sc->rx_ring_cnt > 2);
7141 * Configure RSS keys
7143 toeplitz_get_key(key, sizeof(key));
7144 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7147 rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7148 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7150 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7154 * Configure the redirect table
7157 * - The "queue ID" in redirect table is the software RX ring's
7158 * index _minus_ one.
7159 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7160 * will be used for packets whose masked hash is 0.
7161 * (see also: comment in bce_setup_ring_cnt())
7163 * The redirect table is configured in following fashion, except
7164 * for the masked hash 0, which is noted above:
7165 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
7167 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7168 int shift = (i % 8) << 2, qid;
7170 qid = i % sc->rx_ring_cnt2;
7174 qid = sc->rx_ring_cnt - 2;
7175 KKASSERT(qid < (sc->rx_ring_cnt - 1));
7177 tbl |= qid << shift;
7179 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7180 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7181 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7182 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7183 BCE_RLUP_RSS_COMMAND_WRITE |
7184 BCE_RLUP_RSS_COMMAND_HASH_MASK);
7188 REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7189 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7193 bce_npoll_coal_change(struct bce_softc *sc)
7195 uint32_t old_rx_cons, old_tx_cons;
7197 old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7198 old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7199 sc->bce_rx_quick_cons_trip_int = 1;
7200 sc->bce_tx_quick_cons_trip_int = 1;
7202 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7203 BCE_COALMASK_RX_BDS_INT;
7204 bce_coal_change(sc);
7206 sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7207 sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7210 static struct pktinfo *
7211 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7212 const struct l2_fhdr *l2fhdr)
7214 /* Check for an IP datagram. */
7215 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7218 /* Check if the IP checksum is valid. */
7219 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7222 /* Check for a valid TCP/UDP frame. */
7223 if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7224 if (status & L2_FHDR_ERRORS_TCP_XSUM)
7226 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7228 pi->pi_l3proto = IPPROTO_TCP;
7229 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7230 if (status & L2_FHDR_ERRORS_UDP_XSUM)
7232 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7234 pi->pi_l3proto = IPPROTO_UDP;
7238 pi->pi_netisr = NETISR_IP;