2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
48 * BCM5709S A0, A1, B0, B1, B2, C0
52 #include "opt_ifpoll.h"
54 #include <sys/param.h>
56 #include <sys/endian.h>
57 #include <sys/kernel.h>
58 #include <sys/interrupt.h>
60 #include <sys/malloc.h>
61 #include <sys/queue.h>
63 #include <sys/serialize.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 #include <sys/sysctl.h>
68 #include <netinet/ip.h>
69 #include <netinet/tcp.h>
72 #include <net/ethernet.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_poll.h>
78 #include <net/if_types.h>
79 #include <net/ifq_var.h>
80 #include <net/vlan/if_vlan_var.h>
81 #include <net/vlan/if_vlan_ether.h>
83 #include <dev/netif/mii_layer/mii.h>
84 #include <dev/netif/mii_layer/miivar.h>
85 #include <dev/netif/mii_layer/brgphyreg.h>
87 #include <bus/pci/pcireg.h>
88 #include <bus/pci/pcivar.h>
90 #include "miibus_if.h"
92 #include <dev/netif/bce/if_bcereg.h>
93 #include <dev/netif/bce/if_bcefw.h>
95 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */
97 /****************************************************************************/
98 /* PCI Device ID Table */
100 /* Used by bce_probe() to identify the devices supported by this driver. */
101 /****************************************************************************/
102 #define BCE_DEVDESC_MAX 64
104 static struct bce_type bce_devs[] = {
105 /* BCM5706C Controllers and OEM boards. */
106 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
107 "HP NC370T Multifunction Gigabit Server Adapter" },
108 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
109 "HP NC370i Multifunction Gigabit Server Adapter" },
110 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
111 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
112 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
113 "HP NC371i Multifunction Gigabit Server Adapter" },
114 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
115 "Broadcom NetXtreme II BCM5706 1000Base-T" },
117 /* BCM5706S controllers and OEM boards. */
118 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
119 "HP NC370F Multifunction Gigabit Server Adapter" },
120 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
121 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
123 /* BCM5708C controllers and OEM boards. */
124 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
125 "HP NC373T PCIe Multifunction Gig Server Adapter" },
126 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
127 "HP NC373i Multifunction Gigabit Server Adapter" },
128 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
129 "HP NC374m PCIe Multifunction Adapter" },
130 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
131 "Broadcom NetXtreme II BCM5708 1000Base-T" },
133 /* BCM5708S controllers and OEM boards. */
134 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
135 "HP NC373m Multifunction Gigabit Server Adapter" },
136 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
137 "HP NC373i Multifunction Gigabit Server Adapter" },
138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
139 "HP NC373F PCIe Multifunc Giga Server Adapter" },
140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
141 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
143 /* BCM5709C controllers and OEM boards. */
144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
145 "HP NC382i DP Multifunction Gigabit Server Adapter" },
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
147 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
149 "Broadcom NetXtreme II BCM5709 1000Base-T" },
151 /* BCM5709S controllers and OEM boards. */
152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
153 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
155 "HP NC382i DP Multifunction Gigabit Server Adapter" },
156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
157 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
159 /* BCM5716 controllers and OEM boards. */
160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
161 "Broadcom NetXtreme II BCM5716 1000Base-T" },
167 /****************************************************************************/
168 /* Supported Flash NVRAM device data. */
169 /****************************************************************************/
170 static const struct flash_spec flash_table[] =
172 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
173 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
176 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
177 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
178 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 /* Expansion entry 0001 */
181 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 /* Saifun SA25F010 (non-buffered flash) */
186 /* strap, cfg1, & write1 need updates */
187 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
190 "Non-buffered flash (128kB)"},
191 /* Saifun SA25F020 (non-buffered flash) */
192 /* strap, cfg1, & write1 need updates */
193 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
194 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
196 "Non-buffered flash (256kB)"},
197 /* Expansion entry 0100 */
198 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
199 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
203 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
204 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
205 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
206 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
207 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
208 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
209 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
210 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
211 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
212 /* Saifun SA25F005 (non-buffered flash) */
213 /* strap, cfg1, & write1 need updates */
214 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
215 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
217 "Non-buffered flash (64kB)"},
219 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
220 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
221 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
223 /* Expansion entry 1001 */
224 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
225 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
226 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
228 /* Expansion entry 1010 */
229 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
230 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
231 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
233 /* ATMEL AT45DB011B (buffered flash) */
234 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
235 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
236 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
237 "Buffered flash (128kB)"},
238 /* Expansion entry 1100 */
239 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
240 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
241 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
243 /* Expansion entry 1101 */
244 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
245 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
248 /* Ateml Expansion entry 1110 */
249 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
250 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
251 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
252 "Entry 1110 (Atmel)"},
253 /* ATMEL AT45DB021B (buffered flash) */
254 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
255 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
256 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
257 "Buffered flash (256kB)"},
261 * The BCM5709 controllers transparently handle the
262 * differences between Atmel 264 byte pages and all
263 * flash devices which use 256 byte pages, so no
264 * logical-to-physical mapping is required in the
267 static struct flash_spec flash_5709 = {
268 .flags = BCE_NV_BUFFERED,
269 .page_bits = BCM5709_FLASH_PAGE_BITS,
270 .page_size = BCM5709_FLASH_PAGE_SIZE,
271 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
272 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
273 .name = "5709/5716 buffered flash (256kB)",
277 /****************************************************************************/
278 /* DragonFly device entry points. */
279 /****************************************************************************/
280 static int bce_probe(device_t);
281 static int bce_attach(device_t);
282 static int bce_detach(device_t);
283 static void bce_shutdown(device_t);
286 /****************************************************************************/
287 /* BCE Register/Memory Access Routines */
288 /****************************************************************************/
289 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
290 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
291 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
292 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
293 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
294 static int bce_miibus_read_reg(device_t, int, int);
295 static int bce_miibus_write_reg(device_t, int, int, int);
296 static void bce_miibus_statchg(device_t);
299 /****************************************************************************/
300 /* BCE NVRAM Access Routines */
301 /****************************************************************************/
302 static int bce_acquire_nvram_lock(struct bce_softc *);
303 static int bce_release_nvram_lock(struct bce_softc *);
304 static void bce_enable_nvram_access(struct bce_softc *);
305 static void bce_disable_nvram_access(struct bce_softc *);
306 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
308 static int bce_init_nvram(struct bce_softc *);
309 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
310 static int bce_nvram_test(struct bce_softc *);
312 /****************************************************************************/
313 /* BCE DMA Allocate/Free Routines */
314 /****************************************************************************/
315 static int bce_dma_alloc(struct bce_softc *);
316 static void bce_dma_free(struct bce_softc *);
317 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
319 /****************************************************************************/
320 /* BCE Firmware Synchronization and Load */
321 /****************************************************************************/
322 static int bce_fw_sync(struct bce_softc *, uint32_t);
323 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
325 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
327 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *);
328 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
329 static void bce_start_rxp_cpu(struct bce_softc *);
330 static void bce_init_rxp_cpu(struct bce_softc *);
331 static void bce_init_txp_cpu(struct bce_softc *);
332 static void bce_init_tpat_cpu(struct bce_softc *);
333 static void bce_init_cp_cpu(struct bce_softc *);
334 static void bce_init_com_cpu(struct bce_softc *);
335 static void bce_init_cpus(struct bce_softc *);
337 static void bce_stop(struct bce_softc *);
338 static int bce_reset(struct bce_softc *, uint32_t);
339 static int bce_chipinit(struct bce_softc *);
340 static int bce_blockinit(struct bce_softc *);
341 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t *,
343 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
345 static void bce_probe_pci_caps(struct bce_softc *);
346 static void bce_print_adapter_info(struct bce_softc *);
347 static void bce_get_media(struct bce_softc *);
349 static int bce_create_tx_ring(struct bce_tx_ring *);
350 static void bce_destroy_tx_ring(struct bce_tx_ring *);
351 static void bce_init_tx_context(struct bce_tx_ring *);
352 static int bce_init_tx_chain(struct bce_tx_ring *);
353 static void bce_free_tx_chain(struct bce_tx_ring *);
354 static int bce_create_rx_ring(struct bce_rx_ring *);
355 static void bce_destroy_rx_ring(struct bce_rx_ring *);
356 static void bce_init_rx_context(struct bce_rx_ring *);
357 static int bce_init_rx_chain(struct bce_rx_ring *);
358 static void bce_free_rx_chain(struct bce_rx_ring *);
360 static void bce_xmit(struct bce_tx_ring *);
361 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
362 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
363 uint16_t *, uint16_t *);
364 static void bce_start(struct ifnet *, struct ifaltq_subque *);
365 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void bce_watchdog(struct ifnet *);
367 static int bce_ifmedia_upd(struct ifnet *);
368 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
369 static void bce_init(void *);
370 static void bce_mgmt_init(struct bce_softc *);
372 static int bce_init_ctx(struct bce_softc *);
373 static void bce_get_mac_addr(struct bce_softc *);
374 static void bce_set_mac_addr(struct bce_softc *);
375 static void bce_phy_intr(struct bce_softc *);
376 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
377 static void bce_tx_intr(struct bce_tx_ring *, uint16_t);
378 static void bce_disable_intr(struct bce_softc *);
379 static void bce_enable_intr(struct bce_softc *);
380 static void bce_reenable_intr(struct bce_softc *);
383 static void bce_npoll(struct ifnet *, struct ifpoll_info *);
384 static void bce_npoll_compat(struct ifnet *, void *, int);
386 static void bce_intr(struct bce_softc *);
387 static void bce_intr_legacy(void *);
388 static void bce_intr_msi(void *);
389 static void bce_intr_msi_oneshot(void *);
390 static void bce_set_rx_mode(struct bce_softc *);
391 static void bce_stats_update(struct bce_softc *);
392 static void bce_tick(void *);
393 static void bce_tick_serialized(struct bce_softc *);
394 static void bce_pulse(void *);
395 static void bce_check_msi(void *);
396 static void bce_add_sysctls(struct bce_softc *);
398 static void bce_coal_change(struct bce_softc *);
399 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
400 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
401 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
402 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
403 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
404 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
405 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
406 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
407 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
408 uint32_t *, uint32_t);
412 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
413 * takes 1023 as the TX ticks limit. However, using 1023 will
414 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
415 * there is _no_ network activity on the NIC.
417 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
418 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
419 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
420 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
421 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
422 static uint32_t bce_rx_bds = 0; /* bcm: 6 */
423 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */
424 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */
426 static int bce_tx_wreg = 8;
428 static int bce_msi_enable = 1;
430 static int bce_rx_pages = RX_PAGES_DEFAULT;
431 static int bce_tx_pages = TX_PAGES_DEFAULT;
433 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
434 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
435 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
436 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
437 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
438 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
439 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
440 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
441 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
442 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
443 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
444 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
446 /****************************************************************************/
447 /* DragonFly device dispatch table. */
448 /****************************************************************************/
449 static device_method_t bce_methods[] = {
450 /* Device interface */
451 DEVMETHOD(device_probe, bce_probe),
452 DEVMETHOD(device_attach, bce_attach),
453 DEVMETHOD(device_detach, bce_detach),
454 DEVMETHOD(device_shutdown, bce_shutdown),
457 DEVMETHOD(bus_print_child, bus_generic_print_child),
458 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
461 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
462 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
463 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
468 static driver_t bce_driver = {
471 sizeof(struct bce_softc)
474 static devclass_t bce_devclass;
477 DECLARE_DUMMY_MODULE(if_bce);
478 MODULE_DEPEND(bce, miibus, 1, 1, 1);
479 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
480 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
483 /****************************************************************************/
484 /* Device probe function. */
486 /* Compares the device to the driver's list of supported devices and */
487 /* reports back to the OS whether this is the right driver for the device. */
490 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
491 /****************************************************************************/
493 bce_probe(device_t dev)
496 uint16_t vid, did, svid, sdid;
498 /* Get the data for the device to be probed. */
499 vid = pci_get_vendor(dev);
500 did = pci_get_device(dev);
501 svid = pci_get_subvendor(dev);
502 sdid = pci_get_subdevice(dev);
504 /* Look through the list of known devices for a match. */
505 for (t = bce_devs; t->bce_name != NULL; ++t) {
506 if (vid == t->bce_vid && did == t->bce_did &&
507 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
508 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
509 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
512 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
514 /* Print out the device identity. */
515 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
517 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
519 device_set_desc_copy(dev, descbuf);
520 kfree(descbuf, M_TEMP);
528 /****************************************************************************/
529 /* PCI Capabilities Probe Function. */
531 /* Walks the PCI capabiites list for the device to find what features are */
536 /****************************************************************************/
538 bce_print_adapter_info(struct bce_softc *sc)
540 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
542 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
543 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
546 if (sc->bce_flags & BCE_PCIE_FLAG) {
547 kprintf("Bus (PCIe x%d, ", sc->link_width);
548 switch (sc->link_speed) {
550 kprintf("2.5Gbps); ");
556 kprintf("Unknown link speed); ");
560 kprintf("Bus (PCI%s, %s, %dMHz); ",
561 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
562 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
566 /* Firmware version and device features. */
567 kprintf("B/C (%s)", sc->bce_bc_ver);
569 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
570 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
572 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
573 kprintf("MFW[%s]", sc->bce_mfw_ver);
574 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
582 /****************************************************************************/
583 /* PCI Capabilities Probe Function. */
585 /* Walks the PCI capabiites list for the device to find what features are */
590 /****************************************************************************/
592 bce_probe_pci_caps(struct bce_softc *sc)
594 device_t dev = sc->bce_dev;
597 if (pci_is_pcix(dev))
598 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
600 ptr = pci_get_pciecap_ptr(dev);
602 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
604 sc->link_speed = link_status & 0xf;
605 sc->link_width = (link_status >> 4) & 0x3f;
606 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
607 sc->bce_flags |= BCE_PCIE_FLAG;
612 /****************************************************************************/
613 /* Device attach function. */
615 /* Allocates device resources, performs secondary chip identification, */
616 /* resets and initializes the hardware, and initializes driver instance */
620 /* 0 on success, positive value on failure. */
621 /****************************************************************************/
623 bce_attach(device_t dev)
625 struct bce_softc *sc = device_get_softc(dev);
626 struct ifnet *ifp = &sc->arpcom.ac_if;
629 void (*irq_handle)(void *);
632 struct mii_probe_args mii_args;
633 uintptr_t mii_priv = 0;
636 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
638 pci_enable_busmaster(dev);
640 bce_probe_pci_caps(sc);
642 /* Allocate PCI memory resources. */
644 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
645 RF_ACTIVE | PCI_RF_DENSE);
646 if (sc->bce_res_mem == NULL) {
647 device_printf(dev, "PCI memory allocation failed\n");
650 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
651 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
653 /* Allocate PCI IRQ resources. */
654 sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable,
655 &sc->bce_irq_rid, &irq_flags);
657 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
658 &sc->bce_irq_rid, irq_flags);
659 if (sc->bce_res_irq == NULL) {
660 device_printf(dev, "PCI map interrupt failed\n");
666 * Configure byte swap and enable indirect register access.
667 * Rely on CPU to do target byte swapping on big endian systems.
668 * Access to registers outside of PCI configurtion space are not
669 * valid until this is done.
671 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
672 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
673 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
675 /* Save ASIC revsion info. */
676 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
678 /* Weed out any non-production controller revisions. */
679 switch (BCE_CHIP_ID(sc)) {
680 case BCE_CHIP_ID_5706_A0:
681 case BCE_CHIP_ID_5706_A1:
682 case BCE_CHIP_ID_5708_A0:
683 case BCE_CHIP_ID_5708_B0:
684 case BCE_CHIP_ID_5709_A0:
685 case BCE_CHIP_ID_5709_B0:
686 case BCE_CHIP_ID_5709_B1:
688 /* 5709C B2 seems to work fine */
689 case BCE_CHIP_ID_5709_B2:
691 device_printf(dev, "Unsupported chip id 0x%08x!\n",
697 mii_priv |= BRGPHY_FLAG_WIRESPEED;
698 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
699 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
700 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
701 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
703 mii_priv |= BRGPHY_FLAG_BER_BUG;
706 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
707 irq_handle = bce_intr_legacy;
708 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
709 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
710 irq_handle = bce_intr_msi_oneshot;
711 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
713 irq_handle = bce_intr_msi;
714 sc->bce_flags |= BCE_CHECK_MSI_FLAG;
717 panic("%s: unsupported intr type %d",
718 device_get_nameunit(dev), sc->bce_irq_type);
722 * Find the base address for shared memory access.
723 * Newer versions of bootcode use a signature and offset
724 * while older versions use a fixed address.
726 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
727 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
728 BCE_SHM_HDR_SIGNATURE_SIG) {
729 /* Multi-port devices use different offsets in shared memory. */
730 sc->bce_shmem_base = REG_RD_IND(sc,
731 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
733 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
736 /* Fetch the bootcode revision. */
737 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
738 for (i = 0, j = 0; i < 3; i++) {
742 num = (uint8_t)(val >> (24 - (i * 8)));
743 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
744 if (num >= k || !skip0 || k == 1) {
745 sc->bce_bc_ver[j++] = (num / k) + '0';
750 sc->bce_bc_ver[j++] = '.';
753 /* Check if any management firwmare is running. */
754 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
755 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
756 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
758 /* Allow time for firmware to enter the running state. */
759 for (i = 0; i < 30; i++) {
760 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
761 if (val & BCE_CONDITION_MFW_RUN_MASK)
767 /* Check the current bootcode state. */
768 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
769 BCE_CONDITION_MFW_RUN_MASK;
770 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
771 val != BCE_CONDITION_MFW_RUN_NONE) {
772 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
774 for (i = 0, j = 0; j < 3; j++) {
775 val = bce_reg_rd_ind(sc, addr + j * 4);
777 memcpy(&sc->bce_mfw_ver[i], &val, 4);
782 /* Get PCI bus information (speed and type). */
783 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
784 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
787 sc->bce_flags |= BCE_PCIX_FLAG;
789 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
790 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
792 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
793 sc->bus_speed_mhz = 133;
796 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
797 sc->bus_speed_mhz = 100;
800 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
801 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
802 sc->bus_speed_mhz = 66;
805 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
806 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
807 sc->bus_speed_mhz = 50;
810 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
811 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
812 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
813 sc->bus_speed_mhz = 33;
817 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
818 sc->bus_speed_mhz = 66;
820 sc->bus_speed_mhz = 33;
823 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
824 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
826 /* Reset the controller. */
827 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
831 /* Initialize the controller. */
832 rc = bce_chipinit(sc);
834 device_printf(dev, "Controller initialization failed!\n");
838 /* Perform NVRAM test. */
839 rc = bce_nvram_test(sc);
841 device_printf(dev, "NVRAM test failed!\n");
845 /* Fetch the permanent Ethernet MAC address. */
846 bce_get_mac_addr(sc);
849 * Trip points control how many BDs
850 * should be ready before generating an
851 * interrupt while ticks control how long
852 * a BD can sit in the chain before
853 * generating an interrupt. Set the default
854 * values for the RX and TX rings.
858 /* Force more frequent interrupts. */
859 sc->bce_tx_quick_cons_trip_int = 1;
860 sc->bce_tx_quick_cons_trip = 1;
861 sc->bce_tx_ticks_int = 0;
862 sc->bce_tx_ticks = 0;
864 sc->bce_rx_quick_cons_trip_int = 1;
865 sc->bce_rx_quick_cons_trip = 1;
866 sc->bce_rx_ticks_int = 0;
867 sc->bce_rx_ticks = 0;
869 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
870 sc->bce_tx_quick_cons_trip = bce_tx_bds;
871 sc->bce_tx_ticks_int = bce_tx_ticks_int;
872 sc->bce_tx_ticks = bce_tx_ticks;
874 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
875 sc->bce_rx_quick_cons_trip = bce_rx_bds;
876 sc->bce_rx_ticks_int = bce_rx_ticks_int;
877 sc->bce_rx_ticks = bce_rx_ticks;
880 /* Update statistics once every second. */
881 sc->bce_stats_ticks = 1000000 & 0xffff00;
883 /* Find the media type for the adapter. */
886 /* Find out RX/TX ring count */
887 sc->ring_cnt = 1; /* XXX */
889 /* Allocate DMA memory resources. */
890 rc = bce_dma_alloc(sc);
892 device_printf(dev, "DMA resource allocation failed!\n");
896 /* Initialize the ifnet interface. */
898 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
899 ifp->if_ioctl = bce_ioctl;
900 ifp->if_start = bce_start;
901 ifp->if_init = bce_init;
902 ifp->if_watchdog = bce_watchdog;
904 ifp->if_npoll = bce_npoll;
906 ifp->if_mtu = ETHERMTU;
907 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
908 ifp->if_capabilities = BCE_IF_CAPABILITIES;
909 ifp->if_capenable = ifp->if_capabilities;
910 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
911 ifq_set_ready(&ifp->if_snd);
913 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
914 ifp->if_baudrate = IF_Gbps(2.5);
916 ifp->if_baudrate = IF_Gbps(1);
921 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
922 mii_args.mii_probemask = 1 << sc->bce_phy_addr;
923 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
924 mii_args.mii_priv = mii_priv;
926 rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
928 device_printf(dev, "PHY probe failed!\n");
932 /* Attach to the Ethernet interface list. */
933 ether_ifattach(ifp, sc->eaddr, NULL);
935 callout_init_mp(&sc->bce_tick_callout);
936 callout_init_mp(&sc->bce_pulse_callout);
937 callout_init_mp(&sc->bce_ckmsi_callout);
939 /* Hookup IRQ last. */
940 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc,
941 &sc->bce_intrhand, ifp->if_serializer);
943 device_printf(dev, "Failed to setup IRQ!\n");
948 sc->bce_intr_cpuid = rman_get_cpuid(sc->bce_res_irq);
949 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
951 /* Add the supported sysctls to the kernel. */
955 ifpoll_compat_setup(&sc->bce_npoll,
956 &sc->bce_sysctl_ctx, sc->bce_sysctl_tree, device_get_unit(dev),
961 * The chip reset earlier notified the bootcode that
962 * a driver is present. We now need to start our pulse
963 * routine so that the bootcode is reminded that we're
968 /* Get the firmware running so IPMI still works */
972 bce_print_adapter_info(sc);
981 /****************************************************************************/
982 /* Device detach function. */
984 /* Stops the controller, resets the controller, and releases resources. */
987 /* 0 on success, positive value on failure. */
988 /****************************************************************************/
990 bce_detach(device_t dev)
992 struct bce_softc *sc = device_get_softc(dev);
994 if (device_is_attached(dev)) {
995 struct ifnet *ifp = &sc->arpcom.ac_if;
998 /* Stop and reset the controller. */
999 lwkt_serialize_enter(ifp->if_serializer);
1000 callout_stop(&sc->bce_pulse_callout);
1002 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1003 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1005 msg = BCE_DRV_MSG_CODE_UNLOAD;
1007 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
1008 lwkt_serialize_exit(ifp->if_serializer);
1010 ether_ifdetach(ifp);
1013 /* If we have a child device on the MII bus remove it too. */
1015 device_delete_child(dev, sc->bce_miibus);
1016 bus_generic_detach(dev);
1018 if (sc->bce_res_irq != NULL) {
1019 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
1023 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
1024 pci_release_msi(dev);
1026 if (sc->bce_res_mem != NULL) {
1027 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1033 if (sc->bce_sysctl_tree != NULL)
1034 sysctl_ctx_free(&sc->bce_sysctl_ctx);
1040 /****************************************************************************/
1041 /* Device shutdown function. */
1043 /* Stops and resets the controller. */
1047 /****************************************************************************/
1049 bce_shutdown(device_t dev)
1051 struct bce_softc *sc = device_get_softc(dev);
1052 struct ifnet *ifp = &sc->arpcom.ac_if;
1055 lwkt_serialize_enter(ifp->if_serializer);
1057 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1058 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1060 msg = BCE_DRV_MSG_CODE_UNLOAD;
1062 lwkt_serialize_exit(ifp->if_serializer);
1066 /****************************************************************************/
1067 /* Indirect register read. */
1069 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1070 /* configuration space. Using this mechanism avoids issues with posted */
1071 /* reads but is much slower than memory-mapped I/O. */
1074 /* The value of the register. */
1075 /****************************************************************************/
1077 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1079 device_t dev = sc->bce_dev;
1081 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1082 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1086 /****************************************************************************/
1087 /* Indirect register write. */
1089 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1090 /* configuration space. Using this mechanism avoids issues with posted */
1091 /* writes but is muchh slower than memory-mapped I/O. */
1095 /****************************************************************************/
1097 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1099 device_t dev = sc->bce_dev;
1101 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1102 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1106 /****************************************************************************/
1107 /* Shared memory write. */
1109 /* Writes NetXtreme II shared memory region. */
1113 /****************************************************************************/
1115 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1117 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1121 /****************************************************************************/
1122 /* Shared memory read. */
1124 /* Reads NetXtreme II shared memory region. */
1127 /* The 32 bit value read. */
1128 /****************************************************************************/
1130 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1132 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1136 /****************************************************************************/
1137 /* Context memory write. */
1139 /* The NetXtreme II controller uses context memory to track connection */
1140 /* information for L2 and higher network protocols. */
1144 /****************************************************************************/
1146 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1149 uint32_t idx, offset = ctx_offset + cid_addr;
1150 uint32_t val, retry_cnt = 5;
1152 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1153 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1154 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1155 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1157 for (idx = 0; idx < retry_cnt; idx++) {
1158 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1159 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1164 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1165 device_printf(sc->bce_dev,
1166 "Unable to write CTX memory: "
1167 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1168 cid_addr, ctx_offset);
1171 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1172 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1177 /****************************************************************************/
1178 /* PHY register read. */
1180 /* Implements register reads on the MII bus. */
1183 /* The value of the register. */
1184 /****************************************************************************/
1186 bce_miibus_read_reg(device_t dev, int phy, int reg)
1188 struct bce_softc *sc = device_get_softc(dev);
1192 /* Make sure we are accessing the correct PHY address. */
1193 KASSERT(phy == sc->bce_phy_addr,
1194 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1196 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1197 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1198 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1200 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1201 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1206 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1207 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1208 BCE_EMAC_MDIO_COMM_START_BUSY;
1209 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1211 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1214 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1215 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1218 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1219 val &= BCE_EMAC_MDIO_COMM_DATA;
1224 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1225 if_printf(&sc->arpcom.ac_if,
1226 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1230 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1233 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1234 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1235 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1237 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1238 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1242 return (val & 0xffff);
1246 /****************************************************************************/
1247 /* PHY register write. */
1249 /* Implements register writes on the MII bus. */
1252 /* The value of the register. */
1253 /****************************************************************************/
1255 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1257 struct bce_softc *sc = device_get_softc(dev);
1261 /* Make sure we are accessing the correct PHY address. */
1262 KASSERT(phy == sc->bce_phy_addr,
1263 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1265 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1266 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1267 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1269 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1270 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1275 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1276 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1277 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1278 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1280 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1283 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1284 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1290 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1291 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1293 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1294 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1295 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1297 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1298 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1306 /****************************************************************************/
1307 /* MII bus status change. */
1309 /* Called by the MII bus driver when the PHY establishes link to set the */
1310 /* MAC interface registers. */
1314 /****************************************************************************/
1316 bce_miibus_statchg(device_t dev)
1318 struct bce_softc *sc = device_get_softc(dev);
1319 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1321 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1324 * Set MII or GMII interface based on the speed negotiated
1327 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1328 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1329 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1331 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1335 * Set half or full duplex based on the duplicity negotiated
1338 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1339 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1341 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1346 /****************************************************************************/
1347 /* Acquire NVRAM lock. */
1349 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1350 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1351 /* for use by the driver. */
1354 /* 0 on success, positive value on failure. */
1355 /****************************************************************************/
1357 bce_acquire_nvram_lock(struct bce_softc *sc)
1362 /* Request access to the flash interface. */
1363 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1364 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1365 val = REG_RD(sc, BCE_NVM_SW_ARB);
1366 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1372 if (j >= NVRAM_TIMEOUT_COUNT) {
1379 /****************************************************************************/
1380 /* Release NVRAM lock. */
1382 /* When the caller is finished accessing NVRAM the lock must be released. */
1383 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1384 /* for use by the driver. */
1387 /* 0 on success, positive value on failure. */
1388 /****************************************************************************/
1390 bce_release_nvram_lock(struct bce_softc *sc)
1396 * Relinquish nvram interface.
1398 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1400 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1401 val = REG_RD(sc, BCE_NVM_SW_ARB);
1402 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1408 if (j >= NVRAM_TIMEOUT_COUNT) {
1415 /****************************************************************************/
1416 /* Enable NVRAM access. */
1418 /* Before accessing NVRAM for read or write operations the caller must */
1419 /* enabled NVRAM access. */
1423 /****************************************************************************/
1425 bce_enable_nvram_access(struct bce_softc *sc)
1429 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1430 /* Enable both bits, even on read. */
1431 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1432 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1436 /****************************************************************************/
1437 /* Disable NVRAM access. */
1439 /* When the caller is finished accessing NVRAM access must be disabled. */
1443 /****************************************************************************/
1445 bce_disable_nvram_access(struct bce_softc *sc)
1449 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1451 /* Disable both bits, even after read. */
1452 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1453 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1457 /****************************************************************************/
1458 /* Read a dword (32 bits) from NVRAM. */
1460 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1461 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1464 /* 0 on success and the 32 bit value read, positive value on failure. */
1465 /****************************************************************************/
1467 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1473 /* Build the command word. */
1474 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1476 /* Calculate the offset for buffered flash. */
1477 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1478 offset = ((offset / sc->bce_flash_info->page_size) <<
1479 sc->bce_flash_info->page_bits) +
1480 (offset % sc->bce_flash_info->page_size);
1484 * Clear the DONE bit separately, set the address to read,
1485 * and issue the read.
1487 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1488 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1489 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1491 /* Wait for completion. */
1492 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1497 val = REG_RD(sc, BCE_NVM_COMMAND);
1498 if (val & BCE_NVM_COMMAND_DONE) {
1499 val = REG_RD(sc, BCE_NVM_READ);
1502 memcpy(ret_val, &val, 4);
1507 /* Check for errors. */
1508 if (i >= NVRAM_TIMEOUT_COUNT) {
1509 if_printf(&sc->arpcom.ac_if,
1510 "Timeout error reading NVRAM at offset 0x%08X!\n",
1518 /****************************************************************************/
1519 /* Initialize NVRAM access. */
1521 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1522 /* access that device. */
1525 /* 0 on success, positive value on failure. */
1526 /****************************************************************************/
1528 bce_init_nvram(struct bce_softc *sc)
1531 int j, entry_count, rc = 0;
1532 const struct flash_spec *flash;
1534 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1535 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1536 sc->bce_flash_info = &flash_5709;
1537 goto bce_init_nvram_get_flash_size;
1540 /* Determine the selected interface. */
1541 val = REG_RD(sc, BCE_NVM_CFG1);
1543 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1546 * Flash reconfiguration is required to support additional
1547 * NVRAM devices not directly supported in hardware.
1548 * Check if the flash interface was reconfigured
1552 if (val & 0x40000000) {
1553 /* Flash interface reconfigured by bootcode. */
1554 for (j = 0, flash = flash_table; j < entry_count;
1556 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1557 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1558 sc->bce_flash_info = flash;
1563 /* Flash interface not yet reconfigured. */
1566 if (val & (1 << 23))
1567 mask = FLASH_BACKUP_STRAP_MASK;
1569 mask = FLASH_STRAP_MASK;
1571 /* Look for the matching NVRAM device configuration data. */
1572 for (j = 0, flash = flash_table; j < entry_count;
1574 /* Check if the device matches any of the known devices. */
1575 if ((val & mask) == (flash->strapping & mask)) {
1576 /* Found a device match. */
1577 sc->bce_flash_info = flash;
1579 /* Request access to the flash interface. */
1580 rc = bce_acquire_nvram_lock(sc);
1584 /* Reconfigure the flash interface. */
1585 bce_enable_nvram_access(sc);
1586 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1587 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1588 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1589 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1590 bce_disable_nvram_access(sc);
1591 bce_release_nvram_lock(sc);
1597 /* Check if a matching device was found. */
1598 if (j == entry_count) {
1599 sc->bce_flash_info = NULL;
1600 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1604 bce_init_nvram_get_flash_size:
1605 /* Write the flash config data to the shared memory interface. */
1606 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1607 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1609 sc->bce_flash_size = val;
1611 sc->bce_flash_size = sc->bce_flash_info->total_size;
1617 /****************************************************************************/
1618 /* Read an arbitrary range of data from NVRAM. */
1620 /* Prepares the NVRAM interface for access and reads the requested data */
1621 /* into the supplied buffer. */
1624 /* 0 on success and the data read, positive value on failure. */
1625 /****************************************************************************/
1627 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1630 uint32_t cmd_flags, offset32, len32, extra;
1636 /* Request access to the flash interface. */
1637 rc = bce_acquire_nvram_lock(sc);
1641 /* Enable access to flash interface */
1642 bce_enable_nvram_access(sc);
1650 /* XXX should we release nvram lock if read_dword() fails? */
1656 pre_len = 4 - (offset & 3);
1658 if (pre_len >= len32) {
1660 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1662 cmd_flags = BCE_NVM_COMMAND_FIRST;
1665 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1669 memcpy(ret_buf, buf + (offset & 3), pre_len);
1677 extra = 4 - (len32 & 3);
1678 len32 = (len32 + 4) & ~3;
1685 cmd_flags = BCE_NVM_COMMAND_LAST;
1687 cmd_flags = BCE_NVM_COMMAND_FIRST |
1688 BCE_NVM_COMMAND_LAST;
1690 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1692 memcpy(ret_buf, buf, 4 - extra);
1693 } else if (len32 > 0) {
1696 /* Read the first word. */
1700 cmd_flags = BCE_NVM_COMMAND_FIRST;
1702 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1704 /* Advance to the next dword. */
1709 while (len32 > 4 && rc == 0) {
1710 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1712 /* Advance to the next dword. */
1719 goto bce_nvram_read_locked_exit;
1721 cmd_flags = BCE_NVM_COMMAND_LAST;
1722 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1724 memcpy(ret_buf, buf, 4 - extra);
1727 bce_nvram_read_locked_exit:
1728 /* Disable access to flash interface and release the lock. */
1729 bce_disable_nvram_access(sc);
1730 bce_release_nvram_lock(sc);
1736 /****************************************************************************/
1737 /* Verifies that NVRAM is accessible and contains valid data. */
1739 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1743 /* 0 on success, positive value on failure. */
1744 /****************************************************************************/
1746 bce_nvram_test(struct bce_softc *sc)
1748 uint32_t buf[BCE_NVRAM_SIZE / 4];
1749 uint32_t magic, csum;
1750 uint8_t *data = (uint8_t *)buf;
1754 * Check that the device NVRAM is valid by reading
1755 * the magic value at offset 0.
1757 rc = bce_nvram_read(sc, 0, data, 4);
1761 magic = be32toh(buf[0]);
1762 if (magic != BCE_NVRAM_MAGIC) {
1763 if_printf(&sc->arpcom.ac_if,
1764 "Invalid NVRAM magic value! Expected: 0x%08X, "
1765 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1770 * Verify that the device NVRAM includes valid
1771 * configuration data.
1773 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1777 csum = ether_crc32_le(data, 0x100);
1778 if (csum != BCE_CRC32_RESIDUAL) {
1779 if_printf(&sc->arpcom.ac_if,
1780 "Invalid Manufacturing Information NVRAM CRC! "
1781 "Expected: 0x%08X, Found: 0x%08X\n",
1782 BCE_CRC32_RESIDUAL, csum);
1786 csum = ether_crc32_le(data + 0x100, 0x100);
1787 if (csum != BCE_CRC32_RESIDUAL) {
1788 if_printf(&sc->arpcom.ac_if,
1789 "Invalid Feature Configuration Information "
1790 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1791 BCE_CRC32_RESIDUAL, csum);
1798 /****************************************************************************/
1799 /* Identifies the current media type of the controller and sets the PHY */
1804 /****************************************************************************/
1806 bce_get_media(struct bce_softc *sc)
1810 sc->bce_phy_addr = 1;
1812 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1813 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1814 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1815 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1819 * The BCM5709S is software configurable
1820 * for Copper or SerDes operation.
1822 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1824 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1825 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1829 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1830 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1833 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1836 if (pci_get_function(sc->bce_dev) == 0) {
1841 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1849 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1853 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1854 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1857 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1858 sc->bce_flags |= BCE_NO_WOL_FLAG;
1859 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1860 sc->bce_phy_addr = 2;
1861 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1862 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1863 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1865 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1866 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1867 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1873 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1877 /* Destroy the TX buffer descriptor DMA stuffs. */
1878 if (txr->tx_bd_chain_tag != NULL) {
1879 for (i = 0; i < txr->tx_pages; i++) {
1880 if (txr->tx_bd_chain[i] != NULL) {
1881 bus_dmamap_unload(txr->tx_bd_chain_tag,
1882 txr->tx_bd_chain_map[i]);
1883 bus_dmamem_free(txr->tx_bd_chain_tag,
1884 txr->tx_bd_chain[i],
1885 txr->tx_bd_chain_map[i]);
1888 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1891 /* Destroy the TX mbuf DMA stuffs. */
1892 if (txr->tx_mbuf_tag != NULL) {
1893 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1894 /* Must have been unloaded in bce_stop() */
1895 KKASSERT(txr->tx_mbuf_ptr[i] == NULL);
1896 bus_dmamap_destroy(txr->tx_mbuf_tag,
1897 txr->tx_mbuf_map[i]);
1899 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1902 if (txr->tx_bd_chain_map != NULL)
1903 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1904 if (txr->tx_bd_chain != NULL)
1905 kfree(txr->tx_bd_chain, M_DEVBUF);
1906 if (txr->tx_bd_chain_paddr != NULL)
1907 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1909 if (txr->tx_mbuf_map != NULL)
1910 kfree(txr->tx_mbuf_map, M_DEVBUF);
1911 if (txr->tx_mbuf_ptr != NULL)
1912 kfree(txr->tx_mbuf_ptr, M_DEVBUF);
1917 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1921 /* Destroy the RX buffer descriptor DMA stuffs. */
1922 if (rxr->rx_bd_chain_tag != NULL) {
1923 for (i = 0; i < rxr->rx_pages; i++) {
1924 if (rxr->rx_bd_chain[i] != NULL) {
1925 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1926 rxr->rx_bd_chain_map[i]);
1927 bus_dmamem_free(rxr->rx_bd_chain_tag,
1928 rxr->rx_bd_chain[i],
1929 rxr->rx_bd_chain_map[i]);
1932 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1935 /* Destroy the RX mbuf DMA stuffs. */
1936 if (rxr->rx_mbuf_tag != NULL) {
1937 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1938 /* Must have been unloaded in bce_stop() */
1939 KKASSERT(rxr->rx_mbuf_ptr[i] == NULL);
1940 bus_dmamap_destroy(rxr->rx_mbuf_tag,
1941 rxr->rx_mbuf_map[i]);
1943 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1944 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
1947 if (rxr->rx_bd_chain_map != NULL)
1948 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
1949 if (rxr->rx_bd_chain != NULL)
1950 kfree(rxr->rx_bd_chain, M_DEVBUF);
1951 if (rxr->rx_bd_chain_paddr != NULL)
1952 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
1954 if (rxr->rx_mbuf_map != NULL)
1955 kfree(rxr->rx_mbuf_map, M_DEVBUF);
1956 if (rxr->rx_mbuf_ptr != NULL)
1957 kfree(rxr->rx_mbuf_ptr, M_DEVBUF);
1958 if (rxr->rx_mbuf_paddr != NULL)
1959 kfree(rxr->rx_mbuf_paddr, M_DEVBUF);
1963 /****************************************************************************/
1964 /* Free any DMA memory owned by the driver. */
1966 /* Scans through each data structre that requires DMA memory and frees */
1967 /* the memory if allocated. */
1971 /****************************************************************************/
1973 bce_dma_free(struct bce_softc *sc)
1977 /* Destroy the status block. */
1978 if (sc->status_tag != NULL) {
1979 if (sc->status_block != NULL) {
1980 bus_dmamap_unload(sc->status_tag, sc->status_map);
1981 bus_dmamem_free(sc->status_tag, sc->status_block,
1984 bus_dma_tag_destroy(sc->status_tag);
1987 /* Destroy the statistics block. */
1988 if (sc->stats_tag != NULL) {
1989 if (sc->stats_block != NULL) {
1990 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
1991 bus_dmamem_free(sc->stats_tag, sc->stats_block,
1994 bus_dma_tag_destroy(sc->stats_tag);
1997 /* Destroy the CTX DMA stuffs. */
1998 if (sc->ctx_tag != NULL) {
1999 for (i = 0; i < sc->ctx_pages; i++) {
2000 if (sc->ctx_block[i] != NULL) {
2001 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2002 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2006 bus_dma_tag_destroy(sc->ctx_tag);
2010 if (sc->tx_rings != NULL) {
2011 for (i = 0; i < sc->ring_cnt; ++i)
2012 bce_destroy_tx_ring(&sc->tx_rings[i]);
2013 kfree(sc->tx_rings, M_DEVBUF);
2017 if (sc->rx_rings != NULL) {
2018 for (i = 0; i < sc->ring_cnt; ++i)
2019 bce_destroy_rx_ring(&sc->rx_rings[i]);
2020 kfree(sc->rx_rings, M_DEVBUF);
2023 /* Destroy the parent tag */
2024 if (sc->parent_tag != NULL)
2025 bus_dma_tag_destroy(sc->parent_tag);
2029 /****************************************************************************/
2030 /* Get DMA memory from the OS. */
2032 /* Validates that the OS has provided DMA buffers in response to a */
2033 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2034 /* When the callback is used the OS will return 0 for the mapping function */
2035 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2036 /* failures back to the caller. */
2040 /****************************************************************************/
2042 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2044 bus_addr_t *busaddr = arg;
2046 /* Check for an error and signal the caller that an error occurred. */
2050 KASSERT(nseg == 1, ("only one segment is allowed"));
2051 *busaddr = segs->ds_addr;
2056 bce_create_tx_ring(struct bce_tx_ring *txr)
2060 txr->tx_wreg = bce_tx_wreg;
2062 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2063 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2064 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2065 pages = TX_PAGES_DEFAULT;
2067 txr->tx_pages = pages;
2069 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2070 M_DEVBUF, M_WAITOK | M_ZERO);
2071 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2072 M_DEVBUF, M_WAITOK | M_ZERO);
2073 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2074 M_DEVBUF, M_WAITOK | M_ZERO);
2076 txr->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(txr),
2077 M_DEVBUF, M_WAITOK | M_ZERO);
2078 txr->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(txr),
2079 M_DEVBUF, M_WAITOK | M_ZERO);
2082 * Create a DMA tag for the TX buffer descriptor chain,
2083 * allocate and clear the memory, and fetch the
2084 * physical address of the block.
2086 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2087 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2088 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2089 0, &txr->tx_bd_chain_tag);
2091 device_printf(txr->sc->bce_dev, "Could not allocate "
2092 "TX descriptor chain DMA tag!\n");
2096 for (i = 0; i < txr->tx_pages; i++) {
2099 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2100 (void **)&txr->tx_bd_chain[i],
2101 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2102 &txr->tx_bd_chain_map[i]);
2104 device_printf(txr->sc->bce_dev,
2105 "Could not allocate %dth TX descriptor "
2106 "chain DMA memory!\n", i);
2110 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2111 txr->tx_bd_chain_map[i],
2112 txr->tx_bd_chain[i],
2113 BCE_TX_CHAIN_PAGE_SZ,
2114 bce_dma_map_addr, &busaddr,
2117 if (rc == EINPROGRESS) {
2118 panic("%s coherent memory loading "
2119 "is still in progress!",
2120 txr->sc->arpcom.ac_if.if_xname);
2122 device_printf(txr->sc->bce_dev, "Could not map %dth "
2123 "TX descriptor chain DMA memory!\n", i);
2124 bus_dmamem_free(txr->tx_bd_chain_tag,
2125 txr->tx_bd_chain[i],
2126 txr->tx_bd_chain_map[i]);
2127 txr->tx_bd_chain[i] = NULL;
2131 txr->tx_bd_chain_paddr[i] = busaddr;
2134 /* Create a DMA tag for TX mbufs. */
2135 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2136 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2137 IP_MAXPACKET + sizeof(struct ether_vlan_header),
2138 BCE_MAX_SEGMENTS, PAGE_SIZE,
2139 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2142 device_printf(txr->sc->bce_dev,
2143 "Could not allocate TX mbuf DMA tag!\n");
2147 /* Create DMA maps for the TX mbufs clusters. */
2148 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2149 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2150 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2151 &txr->tx_mbuf_map[i]);
2155 for (j = 0; j < i; ++j) {
2156 bus_dmamap_destroy(txr->tx_mbuf_tag,
2157 txr->tx_mbuf_map[i]);
2159 bus_dma_tag_destroy(txr->tx_mbuf_tag);
2160 txr->tx_mbuf_tag = NULL;
2162 device_printf(txr->sc->bce_dev, "Unable to create "
2163 "%dth TX mbuf DMA map!\n", i);
2172 bce_create_rx_ring(struct bce_rx_ring *rxr)
2176 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2177 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2178 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2179 pages = RX_PAGES_DEFAULT;
2181 rxr->rx_pages = pages;
2183 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2184 M_DEVBUF, M_WAITOK | M_ZERO);
2185 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2186 M_DEVBUF, M_WAITOK | M_ZERO);
2187 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2188 M_DEVBUF, M_WAITOK | M_ZERO);
2190 rxr->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(rxr),
2191 M_DEVBUF, M_WAITOK | M_ZERO);
2192 rxr->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(rxr),
2193 M_DEVBUF, M_WAITOK | M_ZERO);
2194 rxr->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(rxr),
2195 M_DEVBUF, M_WAITOK | M_ZERO);
2198 * Create a DMA tag for the RX buffer descriptor chain,
2199 * allocate and clear the memory, and fetch the physical
2200 * address of the blocks.
2202 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2203 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2204 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2205 0, &rxr->rx_bd_chain_tag);
2207 device_printf(rxr->sc->bce_dev, "Could not allocate "
2208 "RX descriptor chain DMA tag!\n");
2212 for (i = 0; i < rxr->rx_pages; i++) {
2215 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2216 (void **)&rxr->rx_bd_chain[i],
2217 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2218 &rxr->rx_bd_chain_map[i]);
2220 device_printf(rxr->sc->bce_dev,
2221 "Could not allocate %dth RX descriptor "
2222 "chain DMA memory!\n", i);
2226 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2227 rxr->rx_bd_chain_map[i],
2228 rxr->rx_bd_chain[i],
2229 BCE_RX_CHAIN_PAGE_SZ,
2230 bce_dma_map_addr, &busaddr,
2233 if (rc == EINPROGRESS) {
2234 panic("%s coherent memory loading "
2235 "is still in progress!",
2236 rxr->sc->arpcom.ac_if.if_xname);
2238 device_printf(rxr->sc->bce_dev,
2239 "Could not map %dth RX descriptor "
2240 "chain DMA memory!\n", i);
2241 bus_dmamem_free(rxr->rx_bd_chain_tag,
2242 rxr->rx_bd_chain[i],
2243 rxr->rx_bd_chain_map[i]);
2244 rxr->rx_bd_chain[i] = NULL;
2248 rxr->rx_bd_chain_paddr[i] = busaddr;
2251 /* Create a DMA tag for RX mbufs. */
2252 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2253 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2254 MCLBYTES, 1, MCLBYTES,
2255 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2258 device_printf(rxr->sc->bce_dev,
2259 "Could not allocate RX mbuf DMA tag!\n");
2263 /* Create tmp DMA map for RX mbuf clusters. */
2264 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2265 &rxr->rx_mbuf_tmpmap);
2267 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2268 rxr->rx_mbuf_tag = NULL;
2270 device_printf(rxr->sc->bce_dev,
2271 "Could not create RX mbuf tmp DMA map!\n");
2275 /* Create DMA maps for the RX mbuf clusters. */
2276 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2277 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2278 &rxr->rx_mbuf_map[i]);
2282 for (j = 0; j < i; ++j) {
2283 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2284 rxr->rx_mbuf_map[j]);
2286 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2287 rxr->rx_mbuf_tag = NULL;
2289 device_printf(rxr->sc->bce_dev, "Unable to create "
2290 "%dth RX mbuf DMA map!\n", i);
2298 /****************************************************************************/
2299 /* Allocate any DMA memory needed by the driver. */
2301 /* Allocates DMA memory needed for the various global structures needed by */
2304 /* Memory alignment requirements: */
2305 /* -----------------+----------+----------+----------+----------+ */
2306 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */
2307 /* -----------------+----------+----------+----------+----------+ */
2308 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2309 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2310 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2311 /* PG Buffers | none | none | none | none | */
2312 /* TX Buffers | none | none | none | none | */
2313 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2314 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */
2315 /* -----------------+----------+----------+----------+----------+ */
2317 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2320 /* 0 for success, positive value for failure. */
2321 /****************************************************************************/
2323 bce_dma_alloc(struct bce_softc *sc)
2325 struct ifnet *ifp = &sc->arpcom.ac_if;
2327 bus_addr_t busaddr, max_busaddr;
2328 bus_size_t status_align, stats_align;
2331 * The embedded PCIe to PCI-X bridge (EPB)
2332 * in the 5708 cannot address memory above
2333 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2335 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2336 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2338 max_busaddr = BUS_SPACE_MAXADDR;
2341 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2343 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2344 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2345 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2346 if (sc->ctx_pages == 0)
2348 if (sc->ctx_pages > BCE_CTX_PAGES) {
2349 device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2361 * Allocate the parent bus DMA tag appropriate for PCI.
2363 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2364 max_busaddr, BUS_SPACE_MAXADDR,
2366 BUS_SPACE_MAXSIZE_32BIT, 0,
2367 BUS_SPACE_MAXSIZE_32BIT,
2368 0, &sc->parent_tag);
2370 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2375 * Allocate status block.
2377 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2378 status_align, BCE_STATUS_BLK_SZ,
2379 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2380 &sc->status_tag, &sc->status_map,
2381 &sc->status_block_paddr);
2382 if (sc->status_block == NULL) {
2383 if_printf(ifp, "Could not allocate status block!\n");
2388 * Allocate statistics block.
2390 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2391 stats_align, BCE_STATS_BLK_SZ,
2392 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2393 &sc->stats_tag, &sc->stats_map,
2394 &sc->stats_block_paddr);
2395 if (sc->stats_block == NULL) {
2396 if_printf(ifp, "Could not allocate statistics block!\n");
2401 * Allocate context block, if needed
2403 if (sc->ctx_pages != 0) {
2404 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2405 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2407 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2410 if_printf(ifp, "Could not allocate "
2411 "context block DMA tag!\n");
2415 for (i = 0; i < sc->ctx_pages; i++) {
2416 rc = bus_dmamem_alloc(sc->ctx_tag,
2417 (void **)&sc->ctx_block[i],
2418 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2422 if_printf(ifp, "Could not allocate %dth context "
2423 "DMA memory!\n", i);
2427 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2428 sc->ctx_block[i], BCM_PAGE_SIZE,
2429 bce_dma_map_addr, &busaddr,
2432 if (rc == EINPROGRESS) {
2433 panic("%s coherent memory loading "
2434 "is still in progress!", ifp->if_xname);
2436 if_printf(ifp, "Could not map %dth context "
2437 "DMA memory!\n", i);
2438 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2440 sc->ctx_block[i] = NULL;
2443 sc->ctx_paddr[i] = busaddr;
2447 sc->tx_rings = kmalloc_cachealign(
2448 sizeof(struct bce_tx_ring) * sc->ring_cnt, M_DEVBUF,
2450 for (i = 0; i < sc->ring_cnt; ++i) {
2451 sc->tx_rings[i].sc = sc;
2453 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2455 device_printf(sc->bce_dev,
2456 "can't create %dth tx ring\n", i);
2461 sc->rx_rings = kmalloc_cachealign(
2462 sizeof(struct bce_rx_ring) * sc->ring_cnt, M_DEVBUF,
2464 for (i = 0; i < sc->ring_cnt; ++i) {
2465 sc->rx_rings[i].sc = sc;
2467 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2469 device_printf(sc->bce_dev,
2470 "can't create %dth rx ring\n", i);
2479 /****************************************************************************/
2480 /* Firmware synchronization. */
2482 /* Before performing certain events such as a chip reset, synchronize with */
2483 /* the firmware first. */
2486 /* 0 for success, positive value for failure. */
2487 /****************************************************************************/
2489 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2494 /* Don't waste any time if we've timed out before. */
2495 if (sc->bce_fw_timed_out)
2498 /* Increment the message sequence number. */
2499 sc->bce_fw_wr_seq++;
2500 msg_data |= sc->bce_fw_wr_seq;
2502 /* Send the message to the bootcode driver mailbox. */
2503 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2505 /* Wait for the bootcode to acknowledge the message. */
2506 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2507 /* Check for a response in the bootcode firmware mailbox. */
2508 val = bce_shmem_rd(sc, BCE_FW_MB);
2509 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2514 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2515 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2516 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2517 if_printf(&sc->arpcom.ac_if,
2518 "Firmware synchronization timeout! "
2519 "msg_data = 0x%08X\n", msg_data);
2521 msg_data &= ~BCE_DRV_MSG_CODE;
2522 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2524 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2526 sc->bce_fw_timed_out = 1;
2533 /****************************************************************************/
2534 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2538 /****************************************************************************/
2540 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2541 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2546 for (i = 0; i < rv2p_code_len; i += 8) {
2547 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2549 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2552 if (rv2p_proc == RV2P_PROC1) {
2553 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2554 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2556 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2557 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2561 /* Reset the processor, un-stall is done later. */
2562 if (rv2p_proc == RV2P_PROC1)
2563 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2565 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2569 /****************************************************************************/
2570 /* Load RISC processor firmware. */
2572 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2573 /* associated with a particular processor. */
2577 /****************************************************************************/
2579 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2585 bce_halt_cpu(sc, cpu_reg);
2587 /* Load the Text area. */
2588 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2590 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2591 REG_WR_IND(sc, offset, fw->text[j]);
2594 /* Load the Data area. */
2595 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2597 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2598 REG_WR_IND(sc, offset, fw->data[j]);
2601 /* Load the SBSS area. */
2602 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2604 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2605 REG_WR_IND(sc, offset, fw->sbss[j]);
2608 /* Load the BSS area. */
2609 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2611 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2612 REG_WR_IND(sc, offset, fw->bss[j]);
2615 /* Load the Read-Only area. */
2616 offset = cpu_reg->spad_base +
2617 (fw->rodata_addr - cpu_reg->mips_view_base);
2619 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2620 REG_WR_IND(sc, offset, fw->rodata[j]);
2623 /* Clear the pre-fetch instruction and set the FW start address. */
2624 REG_WR_IND(sc, cpu_reg->inst, 0);
2625 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2629 /****************************************************************************/
2630 /* Starts the RISC processor. */
2632 /* Assumes the CPU starting address has already been set. */
2636 /****************************************************************************/
2638 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2642 /* Start the CPU. */
2643 val = REG_RD_IND(sc, cpu_reg->mode);
2644 val &= ~cpu_reg->mode_value_halt;
2645 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2646 REG_WR_IND(sc, cpu_reg->mode, val);
2650 /****************************************************************************/
2651 /* Halts the RISC processor. */
2655 /****************************************************************************/
2657 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2662 val = REG_RD_IND(sc, cpu_reg->mode);
2663 val |= cpu_reg->mode_value_halt;
2664 REG_WR_IND(sc, cpu_reg->mode, val);
2665 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2669 /****************************************************************************/
2670 /* Start the RX CPU. */
2674 /****************************************************************************/
2676 bce_start_rxp_cpu(struct bce_softc *sc)
2678 struct cpu_reg cpu_reg;
2680 cpu_reg.mode = BCE_RXP_CPU_MODE;
2681 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2682 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2683 cpu_reg.state = BCE_RXP_CPU_STATE;
2684 cpu_reg.state_value_clear = 0xffffff;
2685 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2686 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2687 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2688 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2689 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2690 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2691 cpu_reg.mips_view_base = 0x8000000;
2693 bce_start_cpu(sc, &cpu_reg);
2697 /****************************************************************************/
2698 /* Initialize the RX CPU. */
2702 /****************************************************************************/
2704 bce_init_rxp_cpu(struct bce_softc *sc)
2706 struct cpu_reg cpu_reg;
2709 cpu_reg.mode = BCE_RXP_CPU_MODE;
2710 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2711 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2712 cpu_reg.state = BCE_RXP_CPU_STATE;
2713 cpu_reg.state_value_clear = 0xffffff;
2714 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2715 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2716 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2717 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2718 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2719 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2720 cpu_reg.mips_view_base = 0x8000000;
2722 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2723 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2724 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2725 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2726 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2727 fw.start_addr = bce_RXP_b09FwStartAddr;
2729 fw.text_addr = bce_RXP_b09FwTextAddr;
2730 fw.text_len = bce_RXP_b09FwTextLen;
2732 fw.text = bce_RXP_b09FwText;
2734 fw.data_addr = bce_RXP_b09FwDataAddr;
2735 fw.data_len = bce_RXP_b09FwDataLen;
2737 fw.data = bce_RXP_b09FwData;
2739 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2740 fw.sbss_len = bce_RXP_b09FwSbssLen;
2742 fw.sbss = bce_RXP_b09FwSbss;
2744 fw.bss_addr = bce_RXP_b09FwBssAddr;
2745 fw.bss_len = bce_RXP_b09FwBssLen;
2747 fw.bss = bce_RXP_b09FwBss;
2749 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2750 fw.rodata_len = bce_RXP_b09FwRodataLen;
2751 fw.rodata_index = 0;
2752 fw.rodata = bce_RXP_b09FwRodata;
2754 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2755 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2756 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2757 fw.start_addr = bce_RXP_b06FwStartAddr;
2759 fw.text_addr = bce_RXP_b06FwTextAddr;
2760 fw.text_len = bce_RXP_b06FwTextLen;
2762 fw.text = bce_RXP_b06FwText;
2764 fw.data_addr = bce_RXP_b06FwDataAddr;
2765 fw.data_len = bce_RXP_b06FwDataLen;
2767 fw.data = bce_RXP_b06FwData;
2769 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2770 fw.sbss_len = bce_RXP_b06FwSbssLen;
2772 fw.sbss = bce_RXP_b06FwSbss;
2774 fw.bss_addr = bce_RXP_b06FwBssAddr;
2775 fw.bss_len = bce_RXP_b06FwBssLen;
2777 fw.bss = bce_RXP_b06FwBss;
2779 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2780 fw.rodata_len = bce_RXP_b06FwRodataLen;
2781 fw.rodata_index = 0;
2782 fw.rodata = bce_RXP_b06FwRodata;
2785 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2786 /* Delay RXP start until initialization is complete. */
2790 /****************************************************************************/
2791 /* Initialize the TX CPU. */
2795 /****************************************************************************/
2797 bce_init_txp_cpu(struct bce_softc *sc)
2799 struct cpu_reg cpu_reg;
2802 cpu_reg.mode = BCE_TXP_CPU_MODE;
2803 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2804 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2805 cpu_reg.state = BCE_TXP_CPU_STATE;
2806 cpu_reg.state_value_clear = 0xffffff;
2807 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2808 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2809 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2810 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2811 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2812 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2813 cpu_reg.mips_view_base = 0x8000000;
2815 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2816 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2817 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2818 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2819 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2820 fw.start_addr = bce_TXP_b09FwStartAddr;
2822 fw.text_addr = bce_TXP_b09FwTextAddr;
2823 fw.text_len = bce_TXP_b09FwTextLen;
2825 fw.text = bce_TXP_b09FwText;
2827 fw.data_addr = bce_TXP_b09FwDataAddr;
2828 fw.data_len = bce_TXP_b09FwDataLen;
2830 fw.data = bce_TXP_b09FwData;
2832 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2833 fw.sbss_len = bce_TXP_b09FwSbssLen;
2835 fw.sbss = bce_TXP_b09FwSbss;
2837 fw.bss_addr = bce_TXP_b09FwBssAddr;
2838 fw.bss_len = bce_TXP_b09FwBssLen;
2840 fw.bss = bce_TXP_b09FwBss;
2842 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2843 fw.rodata_len = bce_TXP_b09FwRodataLen;
2844 fw.rodata_index = 0;
2845 fw.rodata = bce_TXP_b09FwRodata;
2847 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2848 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2849 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2850 fw.start_addr = bce_TXP_b06FwStartAddr;
2852 fw.text_addr = bce_TXP_b06FwTextAddr;
2853 fw.text_len = bce_TXP_b06FwTextLen;
2855 fw.text = bce_TXP_b06FwText;
2857 fw.data_addr = bce_TXP_b06FwDataAddr;
2858 fw.data_len = bce_TXP_b06FwDataLen;
2860 fw.data = bce_TXP_b06FwData;
2862 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2863 fw.sbss_len = bce_TXP_b06FwSbssLen;
2865 fw.sbss = bce_TXP_b06FwSbss;
2867 fw.bss_addr = bce_TXP_b06FwBssAddr;
2868 fw.bss_len = bce_TXP_b06FwBssLen;
2870 fw.bss = bce_TXP_b06FwBss;
2872 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2873 fw.rodata_len = bce_TXP_b06FwRodataLen;
2874 fw.rodata_index = 0;
2875 fw.rodata = bce_TXP_b06FwRodata;
2878 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2879 bce_start_cpu(sc, &cpu_reg);
2883 /****************************************************************************/
2884 /* Initialize the TPAT CPU. */
2888 /****************************************************************************/
2890 bce_init_tpat_cpu(struct bce_softc *sc)
2892 struct cpu_reg cpu_reg;
2895 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2896 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2897 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2898 cpu_reg.state = BCE_TPAT_CPU_STATE;
2899 cpu_reg.state_value_clear = 0xffffff;
2900 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2901 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2902 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2903 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2904 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2905 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2906 cpu_reg.mips_view_base = 0x8000000;
2908 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2909 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2910 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2911 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2912 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2913 fw.start_addr = bce_TPAT_b09FwStartAddr;
2915 fw.text_addr = bce_TPAT_b09FwTextAddr;
2916 fw.text_len = bce_TPAT_b09FwTextLen;
2918 fw.text = bce_TPAT_b09FwText;
2920 fw.data_addr = bce_TPAT_b09FwDataAddr;
2921 fw.data_len = bce_TPAT_b09FwDataLen;
2923 fw.data = bce_TPAT_b09FwData;
2925 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
2926 fw.sbss_len = bce_TPAT_b09FwSbssLen;
2928 fw.sbss = bce_TPAT_b09FwSbss;
2930 fw.bss_addr = bce_TPAT_b09FwBssAddr;
2931 fw.bss_len = bce_TPAT_b09FwBssLen;
2933 fw.bss = bce_TPAT_b09FwBss;
2935 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
2936 fw.rodata_len = bce_TPAT_b09FwRodataLen;
2937 fw.rodata_index = 0;
2938 fw.rodata = bce_TPAT_b09FwRodata;
2940 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2941 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2942 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2943 fw.start_addr = bce_TPAT_b06FwStartAddr;
2945 fw.text_addr = bce_TPAT_b06FwTextAddr;
2946 fw.text_len = bce_TPAT_b06FwTextLen;
2948 fw.text = bce_TPAT_b06FwText;
2950 fw.data_addr = bce_TPAT_b06FwDataAddr;
2951 fw.data_len = bce_TPAT_b06FwDataLen;
2953 fw.data = bce_TPAT_b06FwData;
2955 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2956 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2958 fw.sbss = bce_TPAT_b06FwSbss;
2960 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2961 fw.bss_len = bce_TPAT_b06FwBssLen;
2963 fw.bss = bce_TPAT_b06FwBss;
2965 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2966 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2967 fw.rodata_index = 0;
2968 fw.rodata = bce_TPAT_b06FwRodata;
2971 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2972 bce_start_cpu(sc, &cpu_reg);
2976 /****************************************************************************/
2977 /* Initialize the CP CPU. */
2981 /****************************************************************************/
2983 bce_init_cp_cpu(struct bce_softc *sc)
2985 struct cpu_reg cpu_reg;
2988 cpu_reg.mode = BCE_CP_CPU_MODE;
2989 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
2990 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
2991 cpu_reg.state = BCE_CP_CPU_STATE;
2992 cpu_reg.state_value_clear = 0xffffff;
2993 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
2994 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
2995 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
2996 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
2997 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
2998 cpu_reg.spad_base = BCE_CP_SCRATCH;
2999 cpu_reg.mips_view_base = 0x8000000;
3001 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3002 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3003 fw.ver_major = bce_CP_b09FwReleaseMajor;
3004 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3005 fw.ver_fix = bce_CP_b09FwReleaseFix;
3006 fw.start_addr = bce_CP_b09FwStartAddr;
3008 fw.text_addr = bce_CP_b09FwTextAddr;
3009 fw.text_len = bce_CP_b09FwTextLen;
3011 fw.text = bce_CP_b09FwText;
3013 fw.data_addr = bce_CP_b09FwDataAddr;
3014 fw.data_len = bce_CP_b09FwDataLen;
3016 fw.data = bce_CP_b09FwData;
3018 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3019 fw.sbss_len = bce_CP_b09FwSbssLen;
3021 fw.sbss = bce_CP_b09FwSbss;
3023 fw.bss_addr = bce_CP_b09FwBssAddr;
3024 fw.bss_len = bce_CP_b09FwBssLen;
3026 fw.bss = bce_CP_b09FwBss;
3028 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3029 fw.rodata_len = bce_CP_b09FwRodataLen;
3030 fw.rodata_index = 0;
3031 fw.rodata = bce_CP_b09FwRodata;
3033 fw.ver_major = bce_CP_b06FwReleaseMajor;
3034 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3035 fw.ver_fix = bce_CP_b06FwReleaseFix;
3036 fw.start_addr = bce_CP_b06FwStartAddr;
3038 fw.text_addr = bce_CP_b06FwTextAddr;
3039 fw.text_len = bce_CP_b06FwTextLen;
3041 fw.text = bce_CP_b06FwText;
3043 fw.data_addr = bce_CP_b06FwDataAddr;
3044 fw.data_len = bce_CP_b06FwDataLen;
3046 fw.data = bce_CP_b06FwData;
3048 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3049 fw.sbss_len = bce_CP_b06FwSbssLen;
3051 fw.sbss = bce_CP_b06FwSbss;
3053 fw.bss_addr = bce_CP_b06FwBssAddr;
3054 fw.bss_len = bce_CP_b06FwBssLen;
3056 fw.bss = bce_CP_b06FwBss;
3058 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3059 fw.rodata_len = bce_CP_b06FwRodataLen;
3060 fw.rodata_index = 0;
3061 fw.rodata = bce_CP_b06FwRodata;
3064 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3065 bce_start_cpu(sc, &cpu_reg);
3069 /****************************************************************************/
3070 /* Initialize the COM CPU. */
3074 /****************************************************************************/
3076 bce_init_com_cpu(struct bce_softc *sc)
3078 struct cpu_reg cpu_reg;
3081 cpu_reg.mode = BCE_COM_CPU_MODE;
3082 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3083 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3084 cpu_reg.state = BCE_COM_CPU_STATE;
3085 cpu_reg.state_value_clear = 0xffffff;
3086 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3087 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3088 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3089 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3090 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3091 cpu_reg.spad_base = BCE_COM_SCRATCH;
3092 cpu_reg.mips_view_base = 0x8000000;
3094 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3095 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3096 fw.ver_major = bce_COM_b09FwReleaseMajor;
3097 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3098 fw.ver_fix = bce_COM_b09FwReleaseFix;
3099 fw.start_addr = bce_COM_b09FwStartAddr;
3101 fw.text_addr = bce_COM_b09FwTextAddr;
3102 fw.text_len = bce_COM_b09FwTextLen;
3104 fw.text = bce_COM_b09FwText;
3106 fw.data_addr = bce_COM_b09FwDataAddr;
3107 fw.data_len = bce_COM_b09FwDataLen;
3109 fw.data = bce_COM_b09FwData;
3111 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3112 fw.sbss_len = bce_COM_b09FwSbssLen;
3114 fw.sbss = bce_COM_b09FwSbss;
3116 fw.bss_addr = bce_COM_b09FwBssAddr;
3117 fw.bss_len = bce_COM_b09FwBssLen;
3119 fw.bss = bce_COM_b09FwBss;
3121 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3122 fw.rodata_len = bce_COM_b09FwRodataLen;
3123 fw.rodata_index = 0;
3124 fw.rodata = bce_COM_b09FwRodata;
3126 fw.ver_major = bce_COM_b06FwReleaseMajor;
3127 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3128 fw.ver_fix = bce_COM_b06FwReleaseFix;
3129 fw.start_addr = bce_COM_b06FwStartAddr;
3131 fw.text_addr = bce_COM_b06FwTextAddr;
3132 fw.text_len = bce_COM_b06FwTextLen;
3134 fw.text = bce_COM_b06FwText;
3136 fw.data_addr = bce_COM_b06FwDataAddr;
3137 fw.data_len = bce_COM_b06FwDataLen;
3139 fw.data = bce_COM_b06FwData;
3141 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3142 fw.sbss_len = bce_COM_b06FwSbssLen;
3144 fw.sbss = bce_COM_b06FwSbss;
3146 fw.bss_addr = bce_COM_b06FwBssAddr;
3147 fw.bss_len = bce_COM_b06FwBssLen;
3149 fw.bss = bce_COM_b06FwBss;
3151 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3152 fw.rodata_len = bce_COM_b06FwRodataLen;
3153 fw.rodata_index = 0;
3154 fw.rodata = bce_COM_b06FwRodata;
3157 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3158 bce_start_cpu(sc, &cpu_reg);
3162 /****************************************************************************/
3163 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
3165 /* Loads the firmware for each CPU and starts the CPU. */
3169 /****************************************************************************/
3171 bce_init_cpus(struct bce_softc *sc)
3173 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3174 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3175 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3176 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3177 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3178 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3179 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3181 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3182 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3183 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3184 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3187 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3188 sizeof(bce_rv2p_proc1), RV2P_PROC1);
3189 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3190 sizeof(bce_rv2p_proc2), RV2P_PROC2);
3193 bce_init_rxp_cpu(sc);
3194 bce_init_txp_cpu(sc);
3195 bce_init_tpat_cpu(sc);
3196 bce_init_com_cpu(sc);
3197 bce_init_cp_cpu(sc);
3201 /****************************************************************************/
3202 /* Initialize context memory. */
3204 /* Clears the memory associated with each Context ID (CID). */
3208 /****************************************************************************/
3210 bce_init_ctx(struct bce_softc *sc)
3212 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3213 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3214 /* DRC: Replace this constant value with a #define. */
3215 int i, retry_cnt = 10;
3219 * BCM5709 context memory may be cached
3220 * in host memory so prepare the host memory
3223 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3225 val |= (BCM_PAGE_BITS - 8) << 16;
3226 REG_WR(sc, BCE_CTX_COMMAND, val);
3228 /* Wait for mem init command to complete. */
3229 for (i = 0; i < retry_cnt; i++) {
3230 val = REG_RD(sc, BCE_CTX_COMMAND);
3231 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3235 if (i == retry_cnt) {
3236 device_printf(sc->bce_dev,
3237 "Context memory initialization failed!\n");
3241 for (i = 0; i < sc->ctx_pages; i++) {
3245 * Set the physical address of the context
3248 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3249 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3250 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3251 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3252 BCE_ADDR_HI(sc->ctx_paddr[i]));
3253 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3254 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3257 * Verify that the context memory write was successful.
3259 for (j = 0; j < retry_cnt; j++) {
3260 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3262 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3266 if (j == retry_cnt) {
3267 device_printf(sc->bce_dev,
3268 "Failed to initialize context page!\n");
3273 uint32_t vcid_addr, offset;
3276 * For the 5706/5708, context memory is local to
3277 * the controller, so initialize the controller
3281 vcid_addr = GET_CID_ADDR(96);
3283 vcid_addr -= PHY_CTX_SIZE;
3285 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3286 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3288 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3289 CTX_WR(sc, 0x00, offset, 0);
3291 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3292 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3299 /****************************************************************************/
3300 /* Fetch the permanent MAC address of the controller. */
3304 /****************************************************************************/
3306 bce_get_mac_addr(struct bce_softc *sc)
3308 uint32_t mac_lo = 0, mac_hi = 0;
3311 * The NetXtreme II bootcode populates various NIC
3312 * power-on and runtime configuration items in a
3313 * shared memory area. The factory configured MAC
3314 * address is available from both NVRAM and the
3315 * shared memory area so we'll read the value from
3316 * shared memory for speed.
3319 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
3320 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3322 if (mac_lo == 0 && mac_hi == 0) {
3323 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3325 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3326 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3327 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3328 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3329 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3330 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3335 /****************************************************************************/
3336 /* Program the MAC address. */
3340 /****************************************************************************/
3342 bce_set_mac_addr(struct bce_softc *sc)
3344 const uint8_t *mac_addr = sc->eaddr;
3347 val = (mac_addr[0] << 8) | mac_addr[1];
3348 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3350 val = (mac_addr[2] << 24) |
3351 (mac_addr[3] << 16) |
3352 (mac_addr[4] << 8) |
3354 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3358 /****************************************************************************/
3359 /* Stop the controller. */
3363 /****************************************************************************/
3365 bce_stop(struct bce_softc *sc)
3367 struct ifnet *ifp = &sc->arpcom.ac_if;
3370 ASSERT_SERIALIZED(ifp->if_serializer);
3372 callout_stop(&sc->bce_tick_callout);
3374 /* Disable the transmit/receive blocks. */
3375 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3376 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3379 bce_disable_intr(sc);
3381 /* Free the RX lists. */
3382 for (i = 0; i < sc->ring_cnt; ++i)
3383 bce_free_rx_chain(&sc->rx_rings[i]);
3385 /* Free TX buffers. */
3386 for (i = 0; i < sc->ring_cnt; ++i)
3387 bce_free_tx_chain(&sc->tx_rings[i]);
3390 sc->bce_coalchg_mask = 0;
3392 ifp->if_flags &= ~IFF_RUNNING;
3393 ifq_clr_oactive(&ifp->if_snd);
3399 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3404 /* Wait for pending PCI transactions to complete. */
3405 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3406 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3407 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3408 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3409 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3410 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3414 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3415 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3416 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3417 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3418 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3421 /* Assume bootcode is running. */
3422 sc->bce_fw_timed_out = 0;
3423 sc->bce_drv_cardiac_arrest = 0;
3425 /* Give the firmware a chance to prepare for the reset. */
3426 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3428 if_printf(&sc->arpcom.ac_if,
3429 "Firmware is not ready for reset\n");
3433 /* Set a firmware reminder that this is a soft reset. */
3434 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3435 BCE_DRV_RESET_SIGNATURE_MAGIC);
3437 /* Dummy read to force the chip to complete all current transactions. */
3438 val = REG_RD(sc, BCE_MISC_ID);
3441 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3442 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3443 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3444 REG_RD(sc, BCE_MISC_COMMAND);
3447 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3448 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3450 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3452 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3453 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3454 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3455 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3457 /* Allow up to 30us for reset to complete. */
3458 for (i = 0; i < 10; i++) {
3459 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3460 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3461 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3466 /* Check that reset completed successfully. */
3467 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3468 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3469 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3474 /* Make sure byte swapping is properly configured. */
3475 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3476 if (val != 0x01020304) {
3477 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3481 /* Just completed a reset, assume that firmware is running again. */
3482 sc->bce_fw_timed_out = 0;
3483 sc->bce_drv_cardiac_arrest = 0;
3485 /* Wait for the firmware to finish its initialization. */
3486 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3488 if_printf(&sc->arpcom.ac_if,
3489 "Firmware did not complete initialization!\n");
3496 bce_chipinit(struct bce_softc *sc)
3501 /* Make sure the interrupt is not active. */
3502 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3503 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3506 * Initialize DMA byte/word swapping, configure the number of DMA
3507 * channels and PCI clock compensation delay.
3509 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3510 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3511 #if BYTE_ORDER == BIG_ENDIAN
3512 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3514 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3515 DMA_READ_CHANS << 12 |
3516 DMA_WRITE_CHANS << 16;
3518 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3520 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3521 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3524 * This setting resolves a problem observed on certain Intel PCI
3525 * chipsets that cannot handle multiple outstanding DMA operations.
3526 * See errata E9_5706A1_65.
3528 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3529 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3530 !(sc->bce_flags & BCE_PCIX_FLAG))
3531 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3533 REG_WR(sc, BCE_DMA_CONFIG, val);
3535 /* Enable the RX_V2P and Context state machines before access. */
3536 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3537 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3538 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3539 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3541 /* Initialize context mapping and zero out the quick contexts. */
3542 rc = bce_init_ctx(sc);
3546 /* Initialize the on-boards CPUs */
3549 /* Enable management frames (NC-SI) to flow to the MCP. */
3550 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3551 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3552 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3553 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3556 /* Prepare NVRAM for access. */
3557 rc = bce_init_nvram(sc);
3561 /* Set the kernel bypass block size */
3562 val = REG_RD(sc, BCE_MQ_CONFIG);
3563 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3564 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3566 /* Enable bins used on the 5709/5716. */
3567 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3568 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3569 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3570 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3571 val |= BCE_MQ_CONFIG_HALT_DIS;
3574 REG_WR(sc, BCE_MQ_CONFIG, val);
3576 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3577 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3578 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3580 /* Set the page size and clear the RV2P processor stall bits. */
3581 val = (BCM_PAGE_BITS - 8) << 24;
3582 REG_WR(sc, BCE_RV2P_CONFIG, val);
3584 /* Configure page size. */
3585 val = REG_RD(sc, BCE_TBDR_CONFIG);
3586 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3587 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3588 REG_WR(sc, BCE_TBDR_CONFIG, val);
3590 /* Set the perfect match control register to default. */
3591 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3597 /****************************************************************************/
3598 /* Initialize the controller in preparation to send/receive traffic. */
3601 /* 0 for success, positive value for failure. */
3602 /****************************************************************************/
3604 bce_blockinit(struct bce_softc *sc)
3608 /* Load the hardware default MAC address. */
3609 bce_set_mac_addr(sc);
3611 /* Set the Ethernet backoff seed value */
3612 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3613 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3614 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3616 sc->last_status_idx = 0;
3617 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3619 /* Set up link change interrupt generation. */
3620 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3622 /* Program the physical address of the status block. */
3623 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3624 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3626 /* Program the physical address of the statistics block. */
3627 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3628 BCE_ADDR_LO(sc->stats_block_paddr));
3629 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3630 BCE_ADDR_HI(sc->stats_block_paddr));
3632 /* Program various host coalescing parameters. */
3633 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3634 (sc->bce_tx_quick_cons_trip_int << 16) |
3635 sc->bce_tx_quick_cons_trip);
3636 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3637 (sc->bce_rx_quick_cons_trip_int << 16) |
3638 sc->bce_rx_quick_cons_trip);
3639 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3640 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3641 REG_WR(sc, BCE_HC_TX_TICKS,
3642 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3643 REG_WR(sc, BCE_HC_RX_TICKS,
3644 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3645 REG_WR(sc, BCE_HC_COM_TICKS,
3646 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3647 REG_WR(sc, BCE_HC_CMD_TICKS,
3648 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3649 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3650 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3652 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3653 if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) {
3655 if_printf(&sc->arpcom.ac_if, "oneshot MSI\n");
3656 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3658 REG_WR(sc, BCE_HC_CONFIG, val);
3660 /* Clear the internal statistics counters. */
3661 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3663 /* Verify that bootcode is running. */
3664 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3666 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3667 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3668 if_printf(&sc->arpcom.ac_if,
3669 "Bootcode not running! Found: 0x%08X, "
3670 "Expected: 08%08X\n",
3671 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3672 BCE_DEV_INFO_SIGNATURE_MAGIC);
3677 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3678 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3679 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3680 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3681 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3684 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3685 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3687 /* Enable link state change interrupt generation. */
3688 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3690 /* Enable the RXP. */
3691 bce_start_rxp_cpu(sc);
3693 /* Disable management frames (NC-SI) from flowing to the MCP. */
3694 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3695 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3696 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3697 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3700 /* Enable all remaining blocks in the MAC. */
3701 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3702 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3703 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3704 BCE_MISC_ENABLE_DEFAULT_XI);
3706 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3708 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3711 /* Save the current host coalescing block settings. */
3712 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3718 /****************************************************************************/
3719 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3721 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3722 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3726 /* 0 for success, positive value for failure. */
3727 /****************************************************************************/
3729 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t *chain_prod,
3730 uint32_t *prod_bseq, int init)
3733 bus_dma_segment_t seg;
3737 /* This is a new mbuf allocation. */
3738 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3742 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3744 /* Map the mbuf cluster into device memory. */
3745 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3746 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3750 if_printf(&rxr->sc->arpcom.ac_if,
3751 "Error mapping mbuf into RX chain!\n");
3756 if (rxr->rx_mbuf_ptr[*chain_prod] != NULL) {
3757 bus_dmamap_unload(rxr->rx_mbuf_tag,
3758 rxr->rx_mbuf_map[*chain_prod]);
3761 map = rxr->rx_mbuf_map[*chain_prod];
3762 rxr->rx_mbuf_map[*chain_prod] = rxr->rx_mbuf_tmpmap;
3763 rxr->rx_mbuf_tmpmap = map;
3765 /* Save the mbuf and update our counter. */
3766 rxr->rx_mbuf_ptr[*chain_prod] = m_new;
3767 rxr->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3770 bce_setup_rxdesc_std(rxr, *chain_prod, prod_bseq);
3777 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3778 uint32_t *prod_bseq)
3784 paddr = rxr->rx_mbuf_paddr[chain_prod];
3785 len = rxr->rx_mbuf_ptr[chain_prod]->m_len;
3787 /* Setup the rx_bd for the first segment. */
3788 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3790 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3791 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3792 rxbd->rx_bd_len = htole32(len);
3793 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3796 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3800 /****************************************************************************/
3801 /* Initialize the TX context memory. */
3805 /****************************************************************************/
3807 bce_init_tx_context(struct bce_tx_ring *txr)
3811 /* Initialize the context ID for an L2 TX chain. */
3812 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3813 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3814 /* Set the CID type to support an L2 connection. */
3815 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3816 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3817 BCE_L2CTX_TX_TYPE_XI, val);
3818 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3819 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3820 BCE_L2CTX_TX_CMD_TYPE_XI, val);
3822 /* Point the hardware to the first page in the chain. */
3823 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3824 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3825 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3826 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3827 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3828 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3830 /* Set the CID type to support an L2 connection. */
3831 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3832 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
3833 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3834 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3835 BCE_L2CTX_TX_CMD_TYPE, val);
3837 /* Point the hardware to the first page in the chain. */
3838 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3839 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3840 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3841 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3842 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3843 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3848 /****************************************************************************/
3849 /* Allocate memory and initialize the TX data structures. */
3852 /* 0 for success, positive value for failure. */
3853 /****************************************************************************/
3855 bce_init_tx_chain(struct bce_tx_ring *txr)
3860 /* Set the initial TX producer/consumer indices. */
3863 txr->tx_prod_bseq = 0;
3864 txr->used_tx_bd = 0;
3865 txr->max_tx_bd = USABLE_TX_BD(txr);
3868 * The NetXtreme II supports a linked-list structre called
3869 * a Buffer Descriptor Chain (or BD chain). A BD chain
3870 * consists of a series of 1 or more chain pages, each of which
3871 * consists of a fixed number of BD entries.
3872 * The last BD entry on each page is a pointer to the next page
3873 * in the chain, and the last pointer in the BD chain
3874 * points back to the beginning of the chain.
3877 /* Set the TX next pointer chain entries. */
3878 for (i = 0; i < txr->tx_pages; i++) {
3881 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3883 /* Check if we've reached the last page. */
3884 if (i == (txr->tx_pages - 1))
3889 txbd->tx_bd_haddr_hi =
3890 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
3891 txbd->tx_bd_haddr_lo =
3892 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
3894 bce_init_tx_context(txr);
3900 /****************************************************************************/
3901 /* Free memory and clear the TX data structures. */
3905 /****************************************************************************/
3907 bce_free_tx_chain(struct bce_tx_ring *txr)
3911 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3912 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
3913 if (txr->tx_mbuf_ptr[i] != NULL) {
3914 bus_dmamap_unload(txr->tx_mbuf_tag,
3915 txr->tx_mbuf_map[i]);
3916 m_freem(txr->tx_mbuf_ptr[i]);
3917 txr->tx_mbuf_ptr[i] = NULL;
3921 /* Clear each TX chain page. */
3922 for (i = 0; i < txr->tx_pages; i++)
3923 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3924 txr->used_tx_bd = 0;
3928 /****************************************************************************/
3929 /* Initialize the RX context memory. */
3933 /****************************************************************************/
3935 bce_init_rx_context(struct bce_rx_ring *rxr)
3939 /* Initialize the context ID for an L2 RX chain. */
3940 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3941 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3944 * Set the level for generating pause frames
3945 * when the number of available rx_bd's gets
3946 * too low (the low watermark) and the level
3947 * when pause frames can be stopped (the high
3950 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
3951 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
3952 uint32_t lo_water, hi_water;
3954 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3955 hi_water = USABLE_RX_BD(rxr) / 4;
3957 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
3958 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
3962 else if (hi_water == 0)
3965 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
3968 CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
3970 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3971 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
3972 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
3973 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
3974 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
3977 /* Point the hardware to the first page in the chain. */
3978 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
3979 CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
3980 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
3981 CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
3985 /****************************************************************************/
3986 /* Allocate memory and initialize the RX data structures. */
3989 /* 0 for success, positive value for failure. */
3990 /****************************************************************************/
3992 bce_init_rx_chain(struct bce_rx_ring *rxr)
3996 uint16_t prod, chain_prod;
3999 /* Initialize the RX producer and consumer indices. */
4002 rxr->rx_prod_bseq = 0;
4003 rxr->free_rx_bd = USABLE_RX_BD(rxr);
4004 rxr->max_rx_bd = USABLE_RX_BD(rxr);
4006 /* Initialize the RX next pointer chain entries. */
4007 for (i = 0; i < rxr->rx_pages; i++) {
4010 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4012 /* Check if we've reached the last page. */
4013 if (i == (rxr->rx_pages - 1))
4018 /* Setup the chain page pointers. */
4019 rxbd->rx_bd_haddr_hi =
4020 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4021 rxbd->rx_bd_haddr_lo =
4022 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4025 /* Allocate mbuf clusters for the rx_bd chain. */
4026 prod = prod_bseq = 0;
4027 while (prod < TOTAL_RX_BD(rxr)) {
4028 chain_prod = RX_CHAIN_IDX(rxr, prod);
4029 if (bce_newbuf_std(rxr, &prod, &chain_prod, &prod_bseq, 1)) {
4030 if_printf(&rxr->sc->arpcom.ac_if,
4031 "Error filling RX chain: rx_bd[0x%04X]!\n",
4036 prod = NEXT_RX_BD(prod);
4039 /* Save the RX chain producer index. */
4040 rxr->rx_prod = prod;
4041 rxr->rx_prod_bseq = prod_bseq;
4043 /* Tell the chip about the waiting rx_bd's. */
4044 REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4046 REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4049 bce_init_rx_context(rxr);
4055 /****************************************************************************/
4056 /* Free memory and clear the RX data structures. */
4060 /****************************************************************************/
4062 bce_free_rx_chain(struct bce_rx_ring *rxr)
4066 /* Free any mbufs still in the RX mbuf chain. */
4067 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4068 if (rxr->rx_mbuf_ptr[i] != NULL) {
4069 bus_dmamap_unload(rxr->rx_mbuf_tag,
4070 rxr->rx_mbuf_map[i]);
4071 m_freem(rxr->rx_mbuf_ptr[i]);
4072 rxr->rx_mbuf_ptr[i] = NULL;
4076 /* Clear each RX chain page. */
4077 for (i = 0; i < rxr->rx_pages; i++)
4078 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4082 /****************************************************************************/
4083 /* Set media options. */
4086 /* 0 for success, positive value for failure. */
4087 /****************************************************************************/
4089 bce_ifmedia_upd(struct ifnet *ifp)
4091 struct bce_softc *sc = ifp->if_softc;
4092 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4096 * 'mii' will be NULL, when this function is called on following
4097 * code path: bce_attach() -> bce_mgmt_init()
4100 /* Make sure the MII bus has been enumerated. */
4102 if (mii->mii_instance) {
4103 struct mii_softc *miisc;
4105 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4106 mii_phy_reset(miisc);
4108 error = mii_mediachg(mii);
4114 /****************************************************************************/
4115 /* Reports current media status. */
4119 /****************************************************************************/
4121 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4123 struct bce_softc *sc = ifp->if_softc;
4124 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4127 ifmr->ifm_active = mii->mii_media_active;
4128 ifmr->ifm_status = mii->mii_media_status;
4132 /****************************************************************************/
4133 /* Handles PHY generated interrupt events. */
4137 /****************************************************************************/
4139 bce_phy_intr(struct bce_softc *sc)
4141 uint32_t new_link_state, old_link_state;
4142 struct ifnet *ifp = &sc->arpcom.ac_if;
4144 ASSERT_SERIALIZED(ifp->if_serializer);
4146 new_link_state = sc->status_block->status_attn_bits &
4147 STATUS_ATTN_BITS_LINK_STATE;
4148 old_link_state = sc->status_block->status_attn_bits_ack &
4149 STATUS_ATTN_BITS_LINK_STATE;
4151 /* Handle any changes if the link state has changed. */
4152 if (new_link_state != old_link_state) { /* XXX redundant? */
4153 /* Update the status_attn_bits_ack field in the status block. */
4154 if (new_link_state) {
4155 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4156 STATUS_ATTN_BITS_LINK_STATE);
4158 if_printf(ifp, "Link is now UP.\n");
4160 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4161 STATUS_ATTN_BITS_LINK_STATE);
4163 if_printf(ifp, "Link is now DOWN.\n");
4167 * Assume link is down and allow tick routine to
4168 * update the state based on the actual media state.
4171 callout_stop(&sc->bce_tick_callout);
4172 bce_tick_serialized(sc);
4175 /* Acknowledge the link change interrupt. */
4176 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4180 /****************************************************************************/
4181 /* Reads the receive consumer value from the status block (skipping over */
4182 /* chain page pointer if necessary). */
4186 /****************************************************************************/
4187 static __inline uint16_t
4188 bce_get_hw_rx_cons(struct bce_softc *sc)
4190 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4192 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4198 /****************************************************************************/
4199 /* Handles received frame interrupt events. */
4203 /****************************************************************************/
4205 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4207 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4208 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4209 uint32_t sw_prod_bseq;
4211 ASSERT_SERIALIZED(ifp->if_serializer);
4213 /* Get working copies of the driver's view of the RX indices. */
4214 sw_cons = rxr->rx_cons;
4215 sw_prod = rxr->rx_prod;
4216 sw_prod_bseq = rxr->rx_prod_bseq;
4218 /* Scan through the receive chain as long as there is work to do. */
4219 while (sw_cons != hw_cons) {
4220 struct mbuf *m = NULL;
4221 struct l2_fhdr *l2fhdr = NULL;
4223 uint32_t status = 0;
4225 #ifdef IFPOLL_ENABLE
4226 if (count >= 0 && count-- == 0)
4231 * Convert the producer/consumer indices
4232 * to an actual rx_bd index.
4234 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4235 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4239 /* The mbuf is stored with the last rx_bd entry of a packet. */
4240 if (rxr->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4241 if (sw_chain_cons != sw_chain_prod) {
4242 if_printf(ifp, "RX cons(%d) != prod(%d), "
4243 "drop!\n", sw_chain_cons, sw_chain_prod);
4244 IFNET_STAT_INC(ifp, ierrors, 1);
4246 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4249 goto bce_rx_int_next_rx;
4252 /* Unmap the mbuf from DMA space. */
4253 bus_dmamap_sync(rxr->rx_mbuf_tag,
4254 rxr->rx_mbuf_map[sw_chain_cons],
4255 BUS_DMASYNC_POSTREAD);
4257 /* Save the mbuf from the driver's chain. */
4258 m = rxr->rx_mbuf_ptr[sw_chain_cons];
4261 * Frames received on the NetXteme II are prepended
4262 * with an l2_fhdr structure which provides status
4263 * information about the received frame (including
4264 * VLAN tags and checksum info). The frames are also
4265 * automatically adjusted to align the IP header
4266 * (i.e. two null bytes are inserted before the
4267 * Ethernet header). As a result the data DMA'd by
4268 * the controller into the mbuf is as follows:
4270 * +---------+-----+---------------------+-----+
4271 * | l2_fhdr | pad | packet data | FCS |
4272 * +---------+-----+---------------------+-----+
4274 * The l2_fhdr needs to be checked and skipped and the
4275 * FCS needs to be stripped before sending the packet
4278 l2fhdr = mtod(m, struct l2_fhdr *);
4280 len = l2fhdr->l2_fhdr_pkt_len;
4281 status = l2fhdr->l2_fhdr_status;
4283 len -= ETHER_CRC_LEN;
4285 /* Check the received frame for errors. */
4286 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4287 L2_FHDR_ERRORS_PHY_DECODE |
4288 L2_FHDR_ERRORS_ALIGNMENT |
4289 L2_FHDR_ERRORS_TOO_SHORT |
4290 L2_FHDR_ERRORS_GIANT_FRAME)) {
4291 IFNET_STAT_INC(ifp, ierrors, 1);
4293 /* Reuse the mbuf for a new frame. */
4294 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4297 goto bce_rx_int_next_rx;
4301 * Get a new mbuf for the rx_bd. If no new
4302 * mbufs are available then reuse the current mbuf,
4303 * log an ierror on the interface, and generate
4304 * an error in the system log.
4306 if (bce_newbuf_std(rxr, &sw_prod, &sw_chain_prod,
4307 &sw_prod_bseq, 0)) {
4308 IFNET_STAT_INC(ifp, ierrors, 1);
4310 /* Try and reuse the exisitng mbuf. */
4311 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4314 goto bce_rx_int_next_rx;
4318 * Skip over the l2_fhdr when passing
4319 * the data up the stack.
4321 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4323 m->m_pkthdr.len = m->m_len = len;
4324 m->m_pkthdr.rcvif = ifp;
4326 /* Validate the checksum if offload enabled. */
4327 if (ifp->if_capenable & IFCAP_RXCSUM) {
4328 /* Check for an IP datagram. */
4329 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4330 m->m_pkthdr.csum_flags |=
4333 /* Check if the IP checksum is valid. */
4334 if ((l2fhdr->l2_fhdr_ip_xsum ^
4336 m->m_pkthdr.csum_flags |=
4341 /* Check for a valid TCP/UDP frame. */
4342 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4343 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4345 /* Check for a good TCP/UDP checksum. */
4347 (L2_FHDR_ERRORS_TCP_XSUM |
4348 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4349 m->m_pkthdr.csum_data =
4350 l2fhdr->l2_fhdr_tcp_udp_xsum;
4351 m->m_pkthdr.csum_flags |=
4358 IFNET_STAT_INC(ifp, ipackets, 1);
4360 sw_prod = NEXT_RX_BD(sw_prod);
4363 sw_cons = NEXT_RX_BD(sw_cons);
4365 /* If we have a packet, pass it up the stack */
4367 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4368 m->m_flags |= M_VLANTAG;
4369 m->m_pkthdr.ether_vlantag =
4370 l2fhdr->l2_fhdr_vlan_tag;
4372 ifp->if_input(ifp, m);
4376 rxr->rx_cons = sw_cons;
4377 rxr->rx_prod = sw_prod;
4378 rxr->rx_prod_bseq = sw_prod_bseq;
4380 REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4382 REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4387 /****************************************************************************/
4388 /* Reads the transmit consumer value from the status block (skipping over */
4389 /* chain page pointer if necessary). */
4393 /****************************************************************************/
4394 static __inline uint16_t
4395 bce_get_hw_tx_cons(struct bce_softc *sc)
4397 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4399 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4405 /****************************************************************************/
4406 /* Handles transmit completion interrupt events. */
4410 /****************************************************************************/
4412 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4414 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4415 uint16_t sw_tx_cons, sw_tx_chain_cons;
4417 ASSERT_SERIALIZED(ifp->if_serializer);
4419 /* Get the hardware's view of the TX consumer index. */
4420 sw_tx_cons = txr->tx_cons;
4422 /* Cycle through any completed TX chain page entries. */
4423 while (sw_tx_cons != hw_tx_cons) {
4424 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4427 * Free the associated mbuf. Remember
4428 * that only the last tx_bd of a packet
4429 * has an mbuf pointer and DMA map.
4431 if (txr->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4432 /* Unmap the mbuf. */
4433 bus_dmamap_unload(txr->tx_mbuf_tag,
4434 txr->tx_mbuf_map[sw_tx_chain_cons]);
4436 /* Free the mbuf. */
4437 m_freem(txr->tx_mbuf_ptr[sw_tx_chain_cons]);
4438 txr->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4440 IFNET_STAT_INC(ifp, opackets, 1);
4444 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4447 if (txr->used_tx_bd == 0) {
4448 /* Clear the TX timeout timer. */
4452 /* Clear the tx hardware queue full flag. */
4453 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4454 ifq_clr_oactive(&ifp->if_snd);
4455 txr->tx_cons = sw_tx_cons;
4459 /****************************************************************************/
4460 /* Disables interrupt generation. */
4464 /****************************************************************************/
4466 bce_disable_intr(struct bce_softc *sc)
4468 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4469 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4471 callout_stop(&sc->bce_ckmsi_callout);
4472 sc->bce_msi_maylose = FALSE;
4473 sc->bce_check_rx_cons = 0;
4474 sc->bce_check_tx_cons = 0;
4475 sc->bce_check_status_idx = 0xffff;
4477 sc->bce_npoll.ifpc_stcount = 0;
4479 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4483 /****************************************************************************/
4484 /* Enables interrupt generation. */
4488 /****************************************************************************/
4490 bce_enable_intr(struct bce_softc *sc)
4492 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4494 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4495 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4496 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4497 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4498 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4500 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4502 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4503 sc->bce_msi_maylose = FALSE;
4504 sc->bce_check_rx_cons = 0;
4505 sc->bce_check_tx_cons = 0;
4506 sc->bce_check_status_idx = 0xffff;
4509 if_printf(&sc->arpcom.ac_if, "check msi\n");
4511 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4512 bce_check_msi, sc, sc->bce_intr_cpuid);
4517 /****************************************************************************/
4518 /* Reenables interrupt generation during interrupt handling. */
4522 /****************************************************************************/
4524 bce_reenable_intr(struct bce_softc *sc)
4526 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
4527 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4528 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4529 BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4531 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4532 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4536 /****************************************************************************/
4537 /* Handles controller initialization. */
4541 /****************************************************************************/
4545 struct bce_softc *sc = xsc;
4546 struct ifnet *ifp = &sc->arpcom.ac_if;
4550 ASSERT_SERIALIZED(ifp->if_serializer);
4552 /* Check if the driver is still running and bail out if it is. */
4553 if (ifp->if_flags & IFF_RUNNING)
4558 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4560 if_printf(ifp, "Controller reset failed!\n");
4564 error = bce_chipinit(sc);
4566 if_printf(ifp, "Controller initialization failed!\n");
4570 error = bce_blockinit(sc);
4572 if_printf(ifp, "Block initialization failed!\n");
4576 /* Load our MAC address. */
4577 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4578 bce_set_mac_addr(sc);
4580 /* Calculate and program the Ethernet MTU size. */
4581 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4584 * Program the mtu, enabling jumbo frame
4585 * support if necessary. Also set the mbuf
4586 * allocation count for RX frames.
4588 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4590 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4591 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4592 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4594 panic("jumbo buffer is not supported yet");
4597 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4600 /* Program appropriate promiscuous/multicast filtering. */
4601 bce_set_rx_mode(sc);
4603 /* Init RX buffer descriptor chain. */
4604 for (i = 0; i < sc->ring_cnt; ++i)
4605 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */
4607 /* Init TX buffer descriptor chain. */
4608 for (i = 0; i < sc->ring_cnt; ++i)
4609 bce_init_tx_chain(&sc->tx_rings[i]);
4611 #ifdef IFPOLL_ENABLE
4612 /* Disable interrupts if we are polling. */
4613 if (ifp->if_flags & IFF_NPOLLING) {
4614 bce_disable_intr(sc);
4616 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4617 (1 << 16) | sc->bce_rx_quick_cons_trip);
4618 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4619 (1 << 16) | sc->bce_tx_quick_cons_trip);
4622 /* Enable host interrupts. */
4623 bce_enable_intr(sc);
4625 bce_ifmedia_upd(ifp);
4627 ifp->if_flags |= IFF_RUNNING;
4628 ifq_clr_oactive(&ifp->if_snd);
4630 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4631 sc->bce_intr_cpuid);
4638 /****************************************************************************/
4639 /* Initialize the controller just enough so that any management firmware */
4640 /* running on the device will continue to operate corectly. */
4644 /****************************************************************************/
4646 bce_mgmt_init(struct bce_softc *sc)
4648 struct ifnet *ifp = &sc->arpcom.ac_if;
4650 /* Bail out if management firmware is not running. */
4651 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4654 /* Enable all critical blocks in the MAC. */
4655 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4656 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4657 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4658 BCE_MISC_ENABLE_DEFAULT_XI);
4660 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4662 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4665 bce_ifmedia_upd(ifp);
4669 /****************************************************************************/
4670 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4671 /* memory visible to the controller. */
4674 /* 0 for success, positive value for failure. */
4675 /****************************************************************************/
4677 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4679 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4680 bus_dmamap_t map, tmp_map;
4681 struct mbuf *m0 = *m_head;
4682 struct tx_bd *txbd = NULL;
4683 uint16_t vlan_tag = 0, flags = 0, mss = 0;
4684 uint16_t chain_prod, chain_prod_start, prod;
4686 int i, error, maxsegs, nsegs;
4688 /* Transfer any checksum offload flags to the bd. */
4689 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4690 error = bce_tso_setup(txr, m_head, &flags, &mss);
4694 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4695 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4696 flags |= TX_BD_FLAGS_IP_CKSUM;
4697 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4698 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4701 /* Transfer any VLAN tags to the bd. */
4702 if (m0->m_flags & M_VLANTAG) {
4703 flags |= TX_BD_FLAGS_VLAN_TAG;
4704 vlan_tag = m0->m_pkthdr.ether_vlantag;
4707 prod = txr->tx_prod;
4708 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4710 /* Map the mbuf into DMAable memory. */
4711 map = txr->tx_mbuf_map[chain_prod_start];
4713 maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4714 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4715 ("not enough segments %d", maxsegs));
4716 if (maxsegs > BCE_MAX_SEGMENTS)
4717 maxsegs = BCE_MAX_SEGMENTS;
4719 /* Map the mbuf into our DMA address space. */
4720 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4721 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4724 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4726 *nsegs_used += nsegs;
4731 /* prod points to an empty tx_bd at this point. */
4732 prod_bseq = txr->tx_prod_bseq;
4735 * Cycle through each mbuf segment that makes up
4736 * the outgoing frame, gathering the mapping info
4737 * for that segment and creating a tx_bd to for
4740 for (i = 0; i < nsegs; i++) {
4741 chain_prod = TX_CHAIN_IDX(txr, prod);
4743 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4745 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4746 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4747 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4748 htole16(segs[i].ds_len);
4749 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4750 txbd->tx_bd_flags = htole16(flags);
4752 prod_bseq += segs[i].ds_len;
4754 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4755 prod = NEXT_TX_BD(prod);
4758 /* Set the END flag on the last TX buffer descriptor. */
4759 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4762 * Ensure that the mbuf pointer for this transmission
4763 * is placed at the array index of the last
4764 * descriptor in this chain. This is done
4765 * because a single map is used for all
4766 * segments of the mbuf and we don't want to
4767 * unload the map before all of the segments
4770 txr->tx_mbuf_ptr[chain_prod] = m0;
4772 tmp_map = txr->tx_mbuf_map[chain_prod];
4773 txr->tx_mbuf_map[chain_prod] = map;
4774 txr->tx_mbuf_map[chain_prod_start] = tmp_map;
4776 txr->used_tx_bd += nsegs;
4778 /* prod points to the next free tx_bd at this point. */
4779 txr->tx_prod = prod;
4780 txr->tx_prod_bseq = prod_bseq;
4791 bce_xmit(struct bce_tx_ring *txr)
4793 /* Start the transmit. */
4794 REG_WR16(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX,
4796 REG_WR(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ,
4801 /****************************************************************************/
4802 /* Main transmit routine when called from another routine with a lock. */
4806 /****************************************************************************/
4808 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4810 struct bce_softc *sc = ifp->if_softc;
4811 struct bce_tx_ring *txr = &sc->tx_rings[0];
4814 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
4815 ASSERT_SERIALIZED(ifp->if_serializer);
4817 /* If there's no link or the transmit queue is empty then just exit. */
4818 if (!sc->bce_link) {
4819 ifq_purge(&ifp->if_snd);
4823 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
4827 struct mbuf *m_head;
4830 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4833 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4834 ifq_set_oactive(&ifp->if_snd);
4838 /* Check for any frames to send. */
4839 m_head = ifq_dequeue(&ifp->if_snd, NULL);
4844 * Pack the data into the transmit ring. If we
4845 * don't have room, place the mbuf back at the
4846 * head of the queue and set the OACTIVE flag
4847 * to wait for the NIC to drain the chain.
4849 if (bce_encap(txr, &m_head, &count)) {
4850 IFNET_STAT_INC(ifp, oerrors, 1);
4851 if (txr->used_tx_bd == 0) {
4854 ifq_set_oactive(&ifp->if_snd);
4859 if (count >= txr->tx_wreg) {
4864 /* Send a copy of the frame to any BPF listeners. */
4865 ETHER_BPF_MTAP(ifp, m_head);
4867 /* Set the tx timeout. */
4868 ifp->if_timer = BCE_TX_TIMEOUT;
4875 /****************************************************************************/
4876 /* Handles any IOCTL calls from the operating system. */
4879 /* 0 for success, positive value for failure. */
4880 /****************************************************************************/
4882 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4884 struct bce_softc *sc = ifp->if_softc;
4885 struct ifreq *ifr = (struct ifreq *)data;
4886 struct mii_data *mii;
4887 int mask, error = 0;
4889 ASSERT_SERIALIZED(ifp->if_serializer);
4893 /* Check that the MTU setting is supported. */
4894 if (ifr->ifr_mtu < BCE_MIN_MTU ||
4896 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4898 ifr->ifr_mtu > ETHERMTU
4905 ifp->if_mtu = ifr->ifr_mtu;
4906 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
4911 if (ifp->if_flags & IFF_UP) {
4912 if (ifp->if_flags & IFF_RUNNING) {
4913 mask = ifp->if_flags ^ sc->bce_if_flags;
4915 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4916 bce_set_rx_mode(sc);
4920 } else if (ifp->if_flags & IFF_RUNNING) {
4923 /* If MFW is running, restart the controller a bit. */
4924 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4925 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4930 sc->bce_if_flags = ifp->if_flags;
4935 if (ifp->if_flags & IFF_RUNNING)
4936 bce_set_rx_mode(sc);
4941 mii = device_get_softc(sc->bce_miibus);
4942 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4946 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4947 if (mask & IFCAP_HWCSUM) {
4948 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
4949 if (ifp->if_capenable & IFCAP_TXCSUM)
4950 ifp->if_hwassist |= BCE_CSUM_FEATURES;
4952 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
4954 if (mask & IFCAP_TSO) {
4955 ifp->if_capenable ^= IFCAP_TSO;
4956 if (ifp->if_capenable & IFCAP_TSO)
4957 ifp->if_hwassist |= CSUM_TSO;
4959 ifp->if_hwassist &= ~CSUM_TSO;
4964 error = ether_ioctl(ifp, command, data);
4971 /****************************************************************************/
4972 /* Transmit timeout handler. */
4976 /****************************************************************************/
4978 bce_watchdog(struct ifnet *ifp)
4980 struct bce_softc *sc = ifp->if_softc;
4982 ASSERT_SERIALIZED(ifp->if_serializer);
4985 * If we are in this routine because of pause frames, then
4986 * don't reset the hardware.
4988 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
4991 if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4993 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
4996 IFNET_STAT_INC(ifp, oerrors, 1);
4998 if (!ifq_is_empty(&ifp->if_snd))
5003 #ifdef IFPOLL_ENABLE
5006 bce_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
5008 struct bce_softc *sc = ifp->if_softc;
5009 struct status_block *sblk = sc->status_block;
5010 struct bce_tx_ring *txr = &sc->tx_rings[0];
5011 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5012 uint16_t hw_tx_cons, hw_rx_cons;
5014 ASSERT_SERIALIZED(ifp->if_serializer);
5017 * Save the status block index value for use when enabling
5020 sc->last_status_idx = sblk->status_idx;
5022 /* Make sure status index is extracted before rx/tx cons */
5025 if (sc->bce_npoll.ifpc_stcount-- == 0) {
5026 uint32_t status_attn_bits;
5028 sc->bce_npoll.ifpc_stcount = sc->bce_npoll.ifpc_stfrac;
5030 status_attn_bits = sblk->status_attn_bits;
5032 /* Was it a link change interrupt? */
5033 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5034 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5038 * Clear any transient status updates during link state change.
5040 REG_WR(sc, BCE_HC_COMMAND,
5041 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5042 REG_RD(sc, BCE_HC_COMMAND);
5045 * If any other attention is asserted then the chip is toast.
5047 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5048 (sblk->status_attn_bits_ack &
5049 ~STATUS_ATTN_BITS_LINK_STATE)) {
5050 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5051 sblk->status_attn_bits);
5057 hw_rx_cons = bce_get_hw_rx_cons(sc);
5058 hw_tx_cons = bce_get_hw_tx_cons(sc);
5060 /* Check for any completed RX frames. */
5061 if (hw_rx_cons != rxr->rx_cons)
5062 bce_rx_intr(rxr, count, hw_rx_cons);
5064 /* Check for any completed TX frames. */
5065 if (hw_tx_cons != txr->tx_cons)
5066 bce_tx_intr(txr, hw_tx_cons);
5068 if (sc->bce_coalchg_mask)
5069 bce_coal_change(sc);
5071 /* Check for new frames to transmit. */
5072 if (!ifq_is_empty(&ifp->if_snd))
5077 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5079 struct bce_softc *sc = ifp->if_softc;
5081 ASSERT_SERIALIZED(ifp->if_serializer);
5084 int cpuid = sc->bce_npoll.ifpc_cpuid;
5086 info->ifpi_rx[cpuid].poll_func = bce_npoll_compat;
5087 info->ifpi_rx[cpuid].arg = NULL;
5088 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
5090 if (ifp->if_flags & IFF_RUNNING) {
5091 bce_disable_intr(sc);
5093 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5094 (1 << 16) | sc->bce_rx_quick_cons_trip);
5095 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5096 (1 << 16) | sc->bce_tx_quick_cons_trip);
5098 ifq_set_cpuid(&ifp->if_snd, cpuid);
5100 if (ifp->if_flags & IFF_RUNNING) {
5101 bce_enable_intr(sc);
5103 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5104 (sc->bce_tx_quick_cons_trip_int << 16) |
5105 sc->bce_tx_quick_cons_trip);
5106 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5107 (sc->bce_rx_quick_cons_trip_int << 16) |
5108 sc->bce_rx_quick_cons_trip);
5110 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
5114 #endif /* IFPOLL_ENABLE */
5118 * Interrupt handler.
5120 /****************************************************************************/
5121 /* Main interrupt entry point. Verifies that the controller generated the */
5122 /* interrupt and then calls a separate routine for handle the various */
5123 /* interrupt causes (PHY, TX, RX). */
5126 /* 0 for success, positive value for failure. */
5127 /****************************************************************************/
5129 bce_intr(struct bce_softc *sc)
5131 struct ifnet *ifp = &sc->arpcom.ac_if;
5132 struct status_block *sblk;
5133 uint16_t hw_rx_cons, hw_tx_cons;
5134 uint32_t status_attn_bits;
5135 struct bce_tx_ring *txr = &sc->tx_rings[0];
5136 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5138 ASSERT_SERIALIZED(ifp->if_serializer);
5140 sblk = sc->status_block;
5143 * Save the status block index value for use during
5144 * the next interrupt.
5146 sc->last_status_idx = sblk->status_idx;
5148 /* Make sure status index is extracted before rx/tx cons */
5151 /* Check if the hardware has finished any work. */
5152 hw_rx_cons = bce_get_hw_rx_cons(sc);
5153 hw_tx_cons = bce_get_hw_tx_cons(sc);
5155 status_attn_bits = sblk->status_attn_bits;
5157 /* Was it a link change interrupt? */
5158 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5159 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5163 * Clear any transient status updates during link state
5166 REG_WR(sc, BCE_HC_COMMAND,
5167 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5168 REG_RD(sc, BCE_HC_COMMAND);
5172 * If any other attention is asserted then
5173 * the chip is toast.
5175 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5176 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5177 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5178 sblk->status_attn_bits);
5183 /* Check for any completed RX frames. */
5184 if (hw_rx_cons != rxr->rx_cons)
5185 bce_rx_intr(rxr, -1, hw_rx_cons);
5187 /* Check for any completed TX frames. */
5188 if (hw_tx_cons != txr->tx_cons)
5189 bce_tx_intr(txr, hw_tx_cons);
5191 /* Re-enable interrupts. */
5192 bce_reenable_intr(sc);
5194 if (sc->bce_coalchg_mask)
5195 bce_coal_change(sc);
5197 /* Handle any frames that arrived while handling the interrupt. */
5198 if (!ifq_is_empty(&ifp->if_snd))
5203 bce_intr_legacy(void *xsc)
5205 struct bce_softc *sc = xsc;
5206 struct status_block *sblk;
5208 sblk = sc->status_block;
5211 * If the hardware status block index matches the last value
5212 * read by the driver and we haven't asserted our interrupt
5213 * then there's nothing to do.
5215 if (sblk->status_idx == sc->last_status_idx &&
5216 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5217 BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5220 /* Ack the interrupt and stop others from occuring. */
5221 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5222 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5223 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5226 * Read back to deassert IRQ immediately to avoid too
5227 * many spurious interrupts.
5229 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5235 bce_intr_msi(void *xsc)
5237 struct bce_softc *sc = xsc;
5239 /* Ack the interrupt and stop others from occuring. */
5240 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5241 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5242 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5248 bce_intr_msi_oneshot(void *xsc)
5254 /****************************************************************************/
5255 /* Programs the various packet receive modes (broadcast and multicast). */
5259 /****************************************************************************/
5261 bce_set_rx_mode(struct bce_softc *sc)
5263 struct ifnet *ifp = &sc->arpcom.ac_if;
5264 struct ifmultiaddr *ifma;
5265 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5266 uint32_t rx_mode, sort_mode;
5269 ASSERT_SERIALIZED(ifp->if_serializer);
5271 /* Initialize receive mode default settings. */
5272 rx_mode = sc->rx_mode &
5273 ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5274 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5275 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5278 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5281 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5282 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5283 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5286 * Check for promiscuous, all multicast, or selected
5287 * multicast address filtering.
5289 if (ifp->if_flags & IFF_PROMISC) {
5290 /* Enable promiscuous mode. */
5291 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5292 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5293 } else if (ifp->if_flags & IFF_ALLMULTI) {
5294 /* Enable all multicast addresses. */
5295 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5296 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5299 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5301 /* Accept one or more multicast(s). */
5302 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5303 if (ifma->ifma_addr->sa_family != AF_LINK)
5306 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5307 ETHER_ADDR_LEN) & 0xFF;
5308 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5311 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5312 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5315 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5318 /* Only make changes if the recive mode has actually changed. */
5319 if (rx_mode != sc->rx_mode) {
5320 sc->rx_mode = rx_mode;
5321 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5324 /* Disable and clear the exisitng sort before enabling a new sort. */
5325 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5326 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5327 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5331 /****************************************************************************/
5332 /* Called periodically to updates statistics from the controllers */
5333 /* statistics block. */
5337 /****************************************************************************/
5339 bce_stats_update(struct bce_softc *sc)
5341 struct ifnet *ifp = &sc->arpcom.ac_if;
5342 struct statistics_block *stats = sc->stats_block;
5344 ASSERT_SERIALIZED(ifp->if_serializer);
5347 * Certain controllers don't report carrier sense errors correctly.
5348 * See errata E11_5708CA0_1165.
5350 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5351 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5352 IFNET_STAT_INC(ifp, oerrors,
5353 (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5357 * Update the sysctl statistics from the hardware statistics.
5359 sc->stat_IfHCInOctets =
5360 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5361 (uint64_t)stats->stat_IfHCInOctets_lo;
5363 sc->stat_IfHCInBadOctets =
5364 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5365 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5367 sc->stat_IfHCOutOctets =
5368 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5369 (uint64_t)stats->stat_IfHCOutOctets_lo;
5371 sc->stat_IfHCOutBadOctets =
5372 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5373 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5375 sc->stat_IfHCInUcastPkts =
5376 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5377 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5379 sc->stat_IfHCInMulticastPkts =
5380 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5381 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5383 sc->stat_IfHCInBroadcastPkts =
5384 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5385 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5387 sc->stat_IfHCOutUcastPkts =
5388 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5389 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5391 sc->stat_IfHCOutMulticastPkts =
5392 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5393 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5395 sc->stat_IfHCOutBroadcastPkts =
5396 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5397 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5399 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5400 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5402 sc->stat_Dot3StatsCarrierSenseErrors =
5403 stats->stat_Dot3StatsCarrierSenseErrors;
5405 sc->stat_Dot3StatsFCSErrors =
5406 stats->stat_Dot3StatsFCSErrors;
5408 sc->stat_Dot3StatsAlignmentErrors =
5409 stats->stat_Dot3StatsAlignmentErrors;
5411 sc->stat_Dot3StatsSingleCollisionFrames =
5412 stats->stat_Dot3StatsSingleCollisionFrames;
5414 sc->stat_Dot3StatsMultipleCollisionFrames =
5415 stats->stat_Dot3StatsMultipleCollisionFrames;
5417 sc->stat_Dot3StatsDeferredTransmissions =
5418 stats->stat_Dot3StatsDeferredTransmissions;
5420 sc->stat_Dot3StatsExcessiveCollisions =
5421 stats->stat_Dot3StatsExcessiveCollisions;
5423 sc->stat_Dot3StatsLateCollisions =
5424 stats->stat_Dot3StatsLateCollisions;
5426 sc->stat_EtherStatsCollisions =
5427 stats->stat_EtherStatsCollisions;
5429 sc->stat_EtherStatsFragments =
5430 stats->stat_EtherStatsFragments;
5432 sc->stat_EtherStatsJabbers =
5433 stats->stat_EtherStatsJabbers;
5435 sc->stat_EtherStatsUndersizePkts =
5436 stats->stat_EtherStatsUndersizePkts;
5438 sc->stat_EtherStatsOverrsizePkts =
5439 stats->stat_EtherStatsOverrsizePkts;
5441 sc->stat_EtherStatsPktsRx64Octets =
5442 stats->stat_EtherStatsPktsRx64Octets;
5444 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5445 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5447 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5448 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5450 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5451 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5453 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5454 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5456 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5457 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5459 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5460 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5462 sc->stat_EtherStatsPktsTx64Octets =
5463 stats->stat_EtherStatsPktsTx64Octets;
5465 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5466 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5468 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5469 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5471 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5472 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5474 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5475 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5477 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5478 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5480 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5481 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5483 sc->stat_XonPauseFramesReceived =
5484 stats->stat_XonPauseFramesReceived;
5486 sc->stat_XoffPauseFramesReceived =
5487 stats->stat_XoffPauseFramesReceived;
5489 sc->stat_OutXonSent =
5490 stats->stat_OutXonSent;
5492 sc->stat_OutXoffSent =
5493 stats->stat_OutXoffSent;
5495 sc->stat_FlowControlDone =
5496 stats->stat_FlowControlDone;
5498 sc->stat_MacControlFramesReceived =
5499 stats->stat_MacControlFramesReceived;
5501 sc->stat_XoffStateEntered =
5502 stats->stat_XoffStateEntered;
5504 sc->stat_IfInFramesL2FilterDiscards =
5505 stats->stat_IfInFramesL2FilterDiscards;
5507 sc->stat_IfInRuleCheckerDiscards =
5508 stats->stat_IfInRuleCheckerDiscards;
5510 sc->stat_IfInFTQDiscards =
5511 stats->stat_IfInFTQDiscards;
5513 sc->stat_IfInMBUFDiscards =
5514 stats->stat_IfInMBUFDiscards;
5516 sc->stat_IfInRuleCheckerP4Hit =
5517 stats->stat_IfInRuleCheckerP4Hit;
5519 sc->stat_CatchupInRuleCheckerDiscards =
5520 stats->stat_CatchupInRuleCheckerDiscards;
5522 sc->stat_CatchupInFTQDiscards =
5523 stats->stat_CatchupInFTQDiscards;
5525 sc->stat_CatchupInMBUFDiscards =
5526 stats->stat_CatchupInMBUFDiscards;
5528 sc->stat_CatchupInRuleCheckerP4Hit =
5529 stats->stat_CatchupInRuleCheckerP4Hit;
5531 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5534 * Update the interface statistics from the
5535 * hardware statistics.
5537 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5539 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5540 (u_long)sc->stat_EtherStatsOverrsizePkts +
5541 (u_long)sc->stat_IfInMBUFDiscards +
5542 (u_long)sc->stat_Dot3StatsAlignmentErrors +
5543 (u_long)sc->stat_Dot3StatsFCSErrors +
5544 (u_long)sc->stat_IfInRuleCheckerDiscards +
5545 (u_long)sc->stat_IfInFTQDiscards +
5546 (u_long)sc->com_no_buffers);
5548 IFNET_STAT_SET(ifp, oerrors,
5549 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5550 (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5551 (u_long)sc->stat_Dot3StatsLateCollisions);
5555 /****************************************************************************/
5556 /* Periodic function to notify the bootcode that the driver is still */
5561 /****************************************************************************/
5563 bce_pulse(void *xsc)
5565 struct bce_softc *sc = xsc;
5566 struct ifnet *ifp = &sc->arpcom.ac_if;
5569 lwkt_serialize_enter(ifp->if_serializer);
5571 /* Tell the firmware that the driver is still running. */
5572 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5573 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5575 /* Update the bootcode condition. */
5576 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5578 /* Report whether the bootcode still knows the driver is running. */
5579 if (!sc->bce_drv_cardiac_arrest) {
5580 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5581 sc->bce_drv_cardiac_arrest = 1;
5582 if_printf(ifp, "Bootcode lost the driver pulse! "
5583 "(bc_state = 0x%08X)\n", sc->bc_state);
5587 * Not supported by all bootcode versions.
5588 * (v5.0.11+ and v5.2.1+) Older bootcode
5589 * will require the driver to reset the
5590 * controller to clear this condition.
5592 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5593 sc->bce_drv_cardiac_arrest = 0;
5594 if_printf(ifp, "Bootcode found the driver pulse! "
5595 "(bc_state = 0x%08X)\n", sc->bc_state);
5599 /* Schedule the next pulse. */
5600 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5601 sc->bce_intr_cpuid);
5603 lwkt_serialize_exit(ifp->if_serializer);
5607 /****************************************************************************/
5608 /* Periodic function to check whether MSI is lost */
5612 /****************************************************************************/
5614 bce_check_msi(void *xsc)
5616 struct bce_softc *sc = xsc;
5617 struct ifnet *ifp = &sc->arpcom.ac_if;
5618 struct status_block *sblk = sc->status_block;
5619 struct bce_tx_ring *txr = &sc->tx_rings[0];
5620 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5622 lwkt_serialize_enter(ifp->if_serializer);
5624 KKASSERT(mycpuid == sc->bce_intr_cpuid);
5626 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5627 lwkt_serialize_exit(ifp->if_serializer);
5631 if (bce_get_hw_rx_cons(sc) != rxr->rx_cons ||
5632 bce_get_hw_tx_cons(sc) != txr->tx_cons ||
5633 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5634 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5635 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5636 sc->bce_check_tx_cons == txr->tx_cons &&
5637 sc->bce_check_status_idx == sc->last_status_idx) {
5640 if (!sc->bce_msi_maylose) {
5641 sc->bce_msi_maylose = TRUE;
5645 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5646 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5648 if_printf(ifp, "lost MSI\n");
5650 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5651 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5652 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5655 } else if (bootverbose) {
5656 if_printf(ifp, "MSI may be lost\n");
5660 sc->bce_msi_maylose = FALSE;
5661 sc->bce_check_rx_cons = rxr->rx_cons;
5662 sc->bce_check_tx_cons = txr->tx_cons;
5663 sc->bce_check_status_idx = sc->last_status_idx;
5666 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5668 lwkt_serialize_exit(ifp->if_serializer);
5672 /****************************************************************************/
5673 /* Periodic function to perform maintenance tasks. */
5677 /****************************************************************************/
5679 bce_tick_serialized(struct bce_softc *sc)
5681 struct ifnet *ifp = &sc->arpcom.ac_if;
5682 struct mii_data *mii;
5684 ASSERT_SERIALIZED(ifp->if_serializer);
5686 /* Update the statistics from the hardware statistics block. */
5687 bce_stats_update(sc);
5689 /* Schedule the next tick. */
5690 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5691 sc->bce_intr_cpuid);
5693 /* If link is up already up then we're done. */
5697 mii = device_get_softc(sc->bce_miibus);
5700 /* Check if the link has come up. */
5701 if ((mii->mii_media_status & IFM_ACTIVE) &&
5702 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5704 /* Now that link is up, handle any outstanding TX traffic. */
5705 if (!ifq_is_empty(&ifp->if_snd))
5714 struct bce_softc *sc = xsc;
5715 struct ifnet *ifp = &sc->arpcom.ac_if;
5717 lwkt_serialize_enter(ifp->if_serializer);
5718 bce_tick_serialized(sc);
5719 lwkt_serialize_exit(ifp->if_serializer);
5723 /****************************************************************************/
5724 /* Adds any sysctl parameters for tuning or debugging purposes. */
5727 /* 0 for success, positive value for failure. */
5728 /****************************************************************************/
5730 bce_add_sysctls(struct bce_softc *sc)
5732 struct sysctl_ctx_list *ctx;
5733 struct sysctl_oid_list *children;
5735 sysctl_ctx_init(&sc->bce_sysctl_ctx);
5736 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5737 SYSCTL_STATIC_CHILDREN(_hw),
5739 device_get_nameunit(sc->bce_dev),
5741 if (sc->bce_sysctl_tree == NULL) {
5742 device_printf(sc->bce_dev, "can't add sysctl node\n");
5746 ctx = &sc->bce_sysctl_ctx;
5747 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5749 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5750 CTLTYPE_INT | CTLFLAG_RW,
5751 sc, 0, bce_sysctl_tx_bds_int, "I",
5752 "Send max coalesced BD count during interrupt");
5753 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5754 CTLTYPE_INT | CTLFLAG_RW,
5755 sc, 0, bce_sysctl_tx_bds, "I",
5756 "Send max coalesced BD count");
5757 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5758 CTLTYPE_INT | CTLFLAG_RW,
5759 sc, 0, bce_sysctl_tx_ticks_int, "I",
5760 "Send coalescing ticks during interrupt");
5761 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5762 CTLTYPE_INT | CTLFLAG_RW,
5763 sc, 0, bce_sysctl_tx_ticks, "I",
5764 "Send coalescing ticks");
5766 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5767 CTLTYPE_INT | CTLFLAG_RW,
5768 sc, 0, bce_sysctl_rx_bds_int, "I",
5769 "Receive max coalesced BD count during interrupt");
5770 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5771 CTLTYPE_INT | CTLFLAG_RW,
5772 sc, 0, bce_sysctl_rx_bds, "I",
5773 "Receive max coalesced BD count");
5774 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5775 CTLTYPE_INT | CTLFLAG_RW,
5776 sc, 0, bce_sysctl_rx_ticks_int, "I",
5777 "Receive coalescing ticks during interrupt");
5778 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5779 CTLTYPE_INT | CTLFLAG_RW,
5780 sc, 0, bce_sysctl_rx_ticks, "I",
5781 "Receive coalescing ticks");
5783 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
5784 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
5785 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
5786 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
5788 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
5789 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
5790 "# segments before write to hardware registers");
5792 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5793 "stat_IfHCInOctets",
5794 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5797 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5798 "stat_IfHCInBadOctets",
5799 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5800 "Bad bytes received");
5802 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5803 "stat_IfHCOutOctets",
5804 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5807 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5808 "stat_IfHCOutBadOctets",
5809 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5812 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5813 "stat_IfHCInUcastPkts",
5814 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5815 "Unicast packets received");
5817 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5818 "stat_IfHCInMulticastPkts",
5819 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5820 "Multicast packets received");
5822 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5823 "stat_IfHCInBroadcastPkts",
5824 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5825 "Broadcast packets received");
5827 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5828 "stat_IfHCOutUcastPkts",
5829 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5830 "Unicast packets sent");
5832 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5833 "stat_IfHCOutMulticastPkts",
5834 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5835 "Multicast packets sent");
5837 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
5838 "stat_IfHCOutBroadcastPkts",
5839 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5840 "Broadcast packets sent");
5842 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5843 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5844 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5845 0, "Internal MAC transmit errors");
5847 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5848 "stat_Dot3StatsCarrierSenseErrors",
5849 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5850 0, "Carrier sense errors");
5852 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5853 "stat_Dot3StatsFCSErrors",
5854 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5855 0, "Frame check sequence errors");
5857 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5858 "stat_Dot3StatsAlignmentErrors",
5859 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5860 0, "Alignment errors");
5862 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5863 "stat_Dot3StatsSingleCollisionFrames",
5864 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5865 0, "Single Collision Frames");
5867 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5868 "stat_Dot3StatsMultipleCollisionFrames",
5869 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5870 0, "Multiple Collision Frames");
5872 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5873 "stat_Dot3StatsDeferredTransmissions",
5874 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5875 0, "Deferred Transmissions");
5877 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5878 "stat_Dot3StatsExcessiveCollisions",
5879 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5880 0, "Excessive Collisions");
5882 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5883 "stat_Dot3StatsLateCollisions",
5884 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5885 0, "Late Collisions");
5887 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5888 "stat_EtherStatsCollisions",
5889 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5892 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5893 "stat_EtherStatsFragments",
5894 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5897 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5898 "stat_EtherStatsJabbers",
5899 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5902 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5903 "stat_EtherStatsUndersizePkts",
5904 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5905 0, "Undersize packets");
5907 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5908 "stat_EtherStatsOverrsizePkts",
5909 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5910 0, "stat_EtherStatsOverrsizePkts");
5912 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5913 "stat_EtherStatsPktsRx64Octets",
5914 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5915 0, "Bytes received in 64 byte packets");
5917 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5918 "stat_EtherStatsPktsRx65Octetsto127Octets",
5919 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5920 0, "Bytes received in 65 to 127 byte packets");
5922 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5923 "stat_EtherStatsPktsRx128Octetsto255Octets",
5924 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5925 0, "Bytes received in 128 to 255 byte packets");
5927 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5928 "stat_EtherStatsPktsRx256Octetsto511Octets",
5929 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5930 0, "Bytes received in 256 to 511 byte packets");
5932 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5933 "stat_EtherStatsPktsRx512Octetsto1023Octets",
5934 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5935 0, "Bytes received in 512 to 1023 byte packets");
5937 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5938 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
5939 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5940 0, "Bytes received in 1024 t0 1522 byte packets");
5942 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5943 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
5944 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5945 0, "Bytes received in 1523 to 9022 byte packets");
5947 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5948 "stat_EtherStatsPktsTx64Octets",
5949 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5950 0, "Bytes sent in 64 byte packets");
5952 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5953 "stat_EtherStatsPktsTx65Octetsto127Octets",
5954 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5955 0, "Bytes sent in 65 to 127 byte packets");
5957 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5958 "stat_EtherStatsPktsTx128Octetsto255Octets",
5959 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5960 0, "Bytes sent in 128 to 255 byte packets");
5962 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5963 "stat_EtherStatsPktsTx256Octetsto511Octets",
5964 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5965 0, "Bytes sent in 256 to 511 byte packets");
5967 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5968 "stat_EtherStatsPktsTx512Octetsto1023Octets",
5969 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5970 0, "Bytes sent in 512 to 1023 byte packets");
5972 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5973 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
5974 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5975 0, "Bytes sent in 1024 to 1522 byte packets");
5977 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5978 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
5979 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5980 0, "Bytes sent in 1523 to 9022 byte packets");
5982 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5983 "stat_XonPauseFramesReceived",
5984 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5985 0, "XON pause frames receved");
5987 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5988 "stat_XoffPauseFramesReceived",
5989 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5990 0, "XOFF pause frames received");
5992 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5994 CTLFLAG_RD, &sc->stat_OutXonSent,
5995 0, "XON pause frames sent");
5997 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
5999 CTLFLAG_RD, &sc->stat_OutXoffSent,
6000 0, "XOFF pause frames sent");
6002 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6003 "stat_FlowControlDone",
6004 CTLFLAG_RD, &sc->stat_FlowControlDone,
6005 0, "Flow control done");
6007 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6008 "stat_MacControlFramesReceived",
6009 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6010 0, "MAC control frames received");
6012 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6013 "stat_XoffStateEntered",
6014 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6015 0, "XOFF state entered");
6017 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6018 "stat_IfInFramesL2FilterDiscards",
6019 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6020 0, "Received L2 packets discarded");
6022 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6023 "stat_IfInRuleCheckerDiscards",
6024 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6025 0, "Received packets discarded by rule");
6027 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6028 "stat_IfInFTQDiscards",
6029 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6030 0, "Received packet FTQ discards");
6032 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6033 "stat_IfInMBUFDiscards",
6034 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6035 0, "Received packets discarded due to lack of controller buffer memory");
6037 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6038 "stat_IfInRuleCheckerP4Hit",
6039 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6040 0, "Received packets rule checker hits");
6042 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6043 "stat_CatchupInRuleCheckerDiscards",
6044 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6045 0, "Received packets discarded in Catchup path");
6047 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6048 "stat_CatchupInFTQDiscards",
6049 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6050 0, "Received packets discarded in FTQ in Catchup path");
6052 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6053 "stat_CatchupInMBUFDiscards",
6054 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6055 0, "Received packets discarded in controller buffer memory in Catchup path");
6057 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6058 "stat_CatchupInRuleCheckerP4Hit",
6059 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6060 0, "Received packets rule checker hits in Catchup path");
6062 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6064 CTLFLAG_RD, &sc->com_no_buffers,
6065 0, "Valid packets received but no RX buffers available");
6069 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6071 struct bce_softc *sc = arg1;
6073 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6074 &sc->bce_tx_quick_cons_trip_int,
6075 BCE_COALMASK_TX_BDS_INT);
6079 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6081 struct bce_softc *sc = arg1;
6083 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6084 &sc->bce_tx_quick_cons_trip,
6085 BCE_COALMASK_TX_BDS);
6089 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6091 struct bce_softc *sc = arg1;
6093 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6094 &sc->bce_tx_ticks_int,
6095 BCE_COALMASK_TX_TICKS_INT);
6099 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6101 struct bce_softc *sc = arg1;
6103 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6105 BCE_COALMASK_TX_TICKS);
6109 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6111 struct bce_softc *sc = arg1;
6113 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6114 &sc->bce_rx_quick_cons_trip_int,
6115 BCE_COALMASK_RX_BDS_INT);
6119 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6121 struct bce_softc *sc = arg1;
6123 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6124 &sc->bce_rx_quick_cons_trip,
6125 BCE_COALMASK_RX_BDS);
6129 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6131 struct bce_softc *sc = arg1;
6133 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6134 &sc->bce_rx_ticks_int,
6135 BCE_COALMASK_RX_TICKS_INT);
6139 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6141 struct bce_softc *sc = arg1;
6143 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6145 BCE_COALMASK_RX_TICKS);
6149 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6150 uint32_t coalchg_mask)
6152 struct bce_softc *sc = arg1;
6153 struct ifnet *ifp = &sc->arpcom.ac_if;
6156 lwkt_serialize_enter(ifp->if_serializer);
6159 error = sysctl_handle_int(oidp, &v, 0, req);
6160 if (!error && req->newptr != NULL) {
6165 sc->bce_coalchg_mask |= coalchg_mask;
6169 lwkt_serialize_exit(ifp->if_serializer);
6174 bce_coal_change(struct bce_softc *sc)
6176 struct ifnet *ifp = &sc->arpcom.ac_if;
6178 ASSERT_SERIALIZED(ifp->if_serializer);
6180 if ((ifp->if_flags & IFF_RUNNING) == 0) {
6181 sc->bce_coalchg_mask = 0;
6185 if (sc->bce_coalchg_mask &
6186 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6187 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6188 (sc->bce_tx_quick_cons_trip_int << 16) |
6189 sc->bce_tx_quick_cons_trip);
6191 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6192 sc->bce_tx_quick_cons_trip,
6193 sc->bce_tx_quick_cons_trip_int);
6197 if (sc->bce_coalchg_mask &
6198 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6199 REG_WR(sc, BCE_HC_TX_TICKS,
6200 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6202 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6203 sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6207 if (sc->bce_coalchg_mask &
6208 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6209 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6210 (sc->bce_rx_quick_cons_trip_int << 16) |
6211 sc->bce_rx_quick_cons_trip);
6213 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6214 sc->bce_rx_quick_cons_trip,
6215 sc->bce_rx_quick_cons_trip_int);
6219 if (sc->bce_coalchg_mask &
6220 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6221 REG_WR(sc, BCE_HC_RX_TICKS,
6222 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6224 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6225 sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6229 sc->bce_coalchg_mask = 0;
6233 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6234 uint16_t *flags0, uint16_t *mss0)
6238 int thoff, iphlen, hoff;
6241 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6243 hoff = m->m_pkthdr.csum_lhlen;
6244 iphlen = m->m_pkthdr.csum_iphlen;
6245 thoff = m->m_pkthdr.csum_thlen;
6247 KASSERT(hoff >= sizeof(struct ether_header),
6248 ("invalid ether header len %d", hoff));
6249 KASSERT(iphlen >= sizeof(struct ip),
6250 ("invalid ip header len %d", iphlen));
6251 KASSERT(thoff >= sizeof(struct tcphdr),
6252 ("invalid tcp header len %d", thoff));
6254 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6255 m = m_pullup(m, hoff + iphlen + thoff);
6263 /* Set the LSO flag in the TX BD */
6264 flags = TX_BD_FLAGS_SW_LSO;
6266 /* Set the length of IP + TCP options (in 32 bit words) */
6267 flags |= (((iphlen + thoff -
6268 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6270 *mss0 = htole16(m->m_pkthdr.tso_segsz);