2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
48 * BCM5709S A0, A1, B0, B1, B2, C0
51 * Note about MSI-X on 5709/5716:
52 * - 9 MSI-X vectors are supported.
53 * - MSI-X vectors, RX/TX rings and status blocks' association
55 * o The first RX ring and the first TX ring use the first
57 * o The first MSI-X vector is associated with the first
59 * o The second RX ring and the second TX ring use the second
61 * o The second MSI-X vector is associated with the second
65 * - Status blocks must reside in physically contiguous memory
66 * and each status block consumes 128bytes. In addition to
67 * this, the memory for the status blocks is aligned on 128bytes
68 * in this driver. (see bce_dma_alloc() and HC_CONFIG)
69 * - Each status block has its own coalesce parameters, which also
70 * serve as the related MSI-X vector's interrupt moderation
71 * parameters. (see bce_coal_change())
75 #include "opt_ifpoll.h"
77 #include <sys/param.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
95 #include <net/ethernet.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/toeplitz.h>
104 #include <net/toeplitz2.h>
105 #include <net/vlan/if_vlan_var.h>
106 #include <net/vlan/if_vlan_ether.h>
108 #include <dev/netif/mii_layer/mii.h>
109 #include <dev/netif/mii_layer/miivar.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
115 #include "miibus_if.h"
117 #include <dev/netif/bce/if_bcereg.h>
118 #include <dev/netif/bce/if_bcefw.h>
120 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */
123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
125 if (sc->rss_debug >= lvl) \
126 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
128 #else /* !BCE_RSS_DEBUG */
129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
130 #endif /* BCE_RSS_DEBUG */
132 /****************************************************************************/
133 /* PCI Device ID Table */
135 /* Used by bce_probe() to identify the devices supported by this driver. */
136 /****************************************************************************/
137 #define BCE_DEVDESC_MAX 64
139 static struct bce_type bce_devs[] = {
140 /* BCM5706C Controllers and OEM boards. */
141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
142 "HP NC370T Multifunction Gigabit Server Adapter" },
143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
144 "HP NC370i Multifunction Gigabit Server Adapter" },
145 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
146 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
148 "HP NC371i Multifunction Gigabit Server Adapter" },
149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
150 "Broadcom NetXtreme II BCM5706 1000Base-T" },
152 /* BCM5706S controllers and OEM boards. */
153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
154 "HP NC370F Multifunction Gigabit Server Adapter" },
155 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
156 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
158 /* BCM5708C controllers and OEM boards. */
159 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
160 "HP NC373T PCIe Multifunction Gig Server Adapter" },
161 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
162 "HP NC373i Multifunction Gigabit Server Adapter" },
163 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
164 "HP NC374m PCIe Multifunction Adapter" },
165 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
166 "Broadcom NetXtreme II BCM5708 1000Base-T" },
168 /* BCM5708S controllers and OEM boards. */
169 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
170 "HP NC373m Multifunction Gigabit Server Adapter" },
171 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
172 "HP NC373i Multifunction Gigabit Server Adapter" },
173 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
174 "HP NC373F PCIe Multifunc Giga Server Adapter" },
175 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
176 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
178 /* BCM5709C controllers and OEM boards. */
179 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
180 "HP NC382i DP Multifunction Gigabit Server Adapter" },
181 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
182 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
183 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
184 "Broadcom NetXtreme II BCM5709 1000Base-T" },
186 /* BCM5709S controllers and OEM boards. */
187 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
188 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
189 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
190 "HP NC382i DP Multifunction Gigabit Server Adapter" },
191 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
192 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
194 /* BCM5716 controllers and OEM boards. */
195 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
196 "Broadcom NetXtreme II BCM5716 1000Base-T" },
201 /****************************************************************************/
202 /* Supported Flash NVRAM device data. */
203 /****************************************************************************/
204 static const struct flash_spec flash_table[] =
206 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
207 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
210 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
211 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
214 /* Expansion entry 0001 */
215 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
216 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 /* Saifun SA25F010 (non-buffered flash) */
220 /* strap, cfg1, & write1 need updates */
221 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
224 "Non-buffered flash (128kB)"},
225 /* Saifun SA25F020 (non-buffered flash) */
226 /* strap, cfg1, & write1 need updates */
227 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
228 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
230 "Non-buffered flash (256kB)"},
231 /* Expansion entry 0100 */
232 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
233 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
236 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
237 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
238 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
240 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
241 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
242 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
243 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
244 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
245 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
246 /* Saifun SA25F005 (non-buffered flash) */
247 /* strap, cfg1, & write1 need updates */
248 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
249 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
251 "Non-buffered flash (64kB)"},
253 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
254 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
255 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
257 /* Expansion entry 1001 */
258 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
259 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
262 /* Expansion entry 1010 */
263 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
264 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
265 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
267 /* ATMEL AT45DB011B (buffered flash) */
268 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
269 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
270 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
271 "Buffered flash (128kB)"},
272 /* Expansion entry 1100 */
273 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
274 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
277 /* Expansion entry 1101 */
278 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
279 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
280 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
282 /* Ateml Expansion entry 1110 */
283 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
284 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
286 "Entry 1110 (Atmel)"},
287 /* ATMEL AT45DB021B (buffered flash) */
288 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
289 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
290 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
291 "Buffered flash (256kB)"},
295 * The BCM5709 controllers transparently handle the
296 * differences between Atmel 264 byte pages and all
297 * flash devices which use 256 byte pages, so no
298 * logical-to-physical mapping is required in the
301 static struct flash_spec flash_5709 = {
302 .flags = BCE_NV_BUFFERED,
303 .page_bits = BCM5709_FLASH_PAGE_BITS,
304 .page_size = BCM5709_FLASH_PAGE_SIZE,
305 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
306 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
307 .name = "5709/5716 buffered flash (256kB)",
310 /****************************************************************************/
311 /* DragonFly device entry points. */
312 /****************************************************************************/
313 static int bce_probe(device_t);
314 static int bce_attach(device_t);
315 static int bce_detach(device_t);
316 static void bce_shutdown(device_t);
317 static int bce_miibus_read_reg(device_t, int, int);
318 static int bce_miibus_write_reg(device_t, int, int, int);
319 static void bce_miibus_statchg(device_t);
321 /****************************************************************************/
322 /* BCE Register/Memory Access Routines */
323 /****************************************************************************/
324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
325 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
326 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
327 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
328 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
330 /****************************************************************************/
331 /* BCE NVRAM Access Routines */
332 /****************************************************************************/
333 static int bce_acquire_nvram_lock(struct bce_softc *);
334 static int bce_release_nvram_lock(struct bce_softc *);
335 static void bce_enable_nvram_access(struct bce_softc *);
336 static void bce_disable_nvram_access(struct bce_softc *);
337 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
339 static int bce_init_nvram(struct bce_softc *);
340 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
341 static int bce_nvram_test(struct bce_softc *);
343 /****************************************************************************/
344 /* BCE DMA Allocate/Free Routines */
345 /****************************************************************************/
346 static int bce_dma_alloc(struct bce_softc *);
347 static void bce_dma_free(struct bce_softc *);
348 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
350 /****************************************************************************/
351 /* BCE Firmware Synchronization and Load */
352 /****************************************************************************/
353 static int bce_fw_sync(struct bce_softc *, uint32_t);
354 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
356 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
358 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *);
359 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
360 static void bce_start_rxp_cpu(struct bce_softc *);
361 static void bce_init_rxp_cpu(struct bce_softc *);
362 static void bce_init_txp_cpu(struct bce_softc *);
363 static void bce_init_tpat_cpu(struct bce_softc *);
364 static void bce_init_cp_cpu(struct bce_softc *);
365 static void bce_init_com_cpu(struct bce_softc *);
366 static void bce_init_cpus(struct bce_softc *);
367 static void bce_setup_msix_table(struct bce_softc *);
368 static void bce_init_rss(struct bce_softc *);
370 static void bce_stop(struct bce_softc *);
371 static int bce_reset(struct bce_softc *, uint32_t);
372 static int bce_chipinit(struct bce_softc *);
373 static int bce_blockinit(struct bce_softc *);
374 static void bce_probe_pci_caps(struct bce_softc *);
375 static void bce_print_adapter_info(struct bce_softc *);
376 static void bce_get_media(struct bce_softc *);
377 static void bce_mgmt_init(struct bce_softc *);
378 static int bce_init_ctx(struct bce_softc *);
379 static void bce_get_mac_addr(struct bce_softc *);
380 static void bce_set_mac_addr(struct bce_softc *);
381 static void bce_set_rx_mode(struct bce_softc *);
382 static void bce_coal_change(struct bce_softc *);
383 static void bce_npoll_coal_change(struct bce_softc *);
384 static void bce_setup_serialize(struct bce_softc *);
385 static void bce_serialize_skipmain(struct bce_softc *);
386 static void bce_deserialize_skipmain(struct bce_softc *);
387 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t);
388 static int bce_alloc_intr(struct bce_softc *);
389 static void bce_free_intr(struct bce_softc *);
390 static void bce_try_alloc_msix(struct bce_softc *);
391 static void bce_free_msix(struct bce_softc *, boolean_t);
392 static void bce_setup_ring_cnt(struct bce_softc *);
393 static int bce_setup_intr(struct bce_softc *);
394 static void bce_teardown_intr(struct bce_softc *);
395 static int bce_setup_msix(struct bce_softc *);
396 static void bce_teardown_msix(struct bce_softc *, int);
398 static int bce_create_tx_ring(struct bce_tx_ring *);
399 static void bce_destroy_tx_ring(struct bce_tx_ring *);
400 static void bce_init_tx_context(struct bce_tx_ring *);
401 static int bce_init_tx_chain(struct bce_tx_ring *);
402 static void bce_free_tx_chain(struct bce_tx_ring *);
403 static void bce_xmit(struct bce_tx_ring *);
404 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
405 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
406 uint16_t *, uint16_t *);
408 static int bce_create_rx_ring(struct bce_rx_ring *);
409 static void bce_destroy_rx_ring(struct bce_rx_ring *);
410 static void bce_init_rx_context(struct bce_rx_ring *);
411 static int bce_init_rx_chain(struct bce_rx_ring *);
412 static void bce_free_rx_chain(struct bce_rx_ring *);
413 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
415 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
417 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
418 const struct l2_fhdr *);
420 static void bce_start(struct ifnet *, struct ifaltq_subque *);
421 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
422 static void bce_watchdog(struct ifaltq_subque *);
423 static int bce_ifmedia_upd(struct ifnet *);
424 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
425 static void bce_init(void *);
427 static void bce_npoll(struct ifnet *, struct ifpoll_info *);
428 static void bce_npoll_rx(struct ifnet *, void *, int);
429 static void bce_npoll_tx(struct ifnet *, void *, int);
430 static void bce_npoll_status(struct ifnet *);
431 static void bce_npoll_rx_pack(struct ifnet *, void *, int);
433 static void bce_serialize(struct ifnet *, enum ifnet_serialize);
434 static void bce_deserialize(struct ifnet *, enum ifnet_serialize);
435 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize);
437 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
441 static void bce_intr(struct bce_softc *);
442 static void bce_intr_legacy(void *);
443 static void bce_intr_msi(void *);
444 static void bce_intr_msi_oneshot(void *);
445 static void bce_intr_msix_rxtx(void *);
446 static void bce_intr_msix_rx(void *);
447 static void bce_tx_intr(struct bce_tx_ring *, uint16_t);
448 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
449 static void bce_phy_intr(struct bce_softc *);
450 static void bce_disable_intr(struct bce_softc *);
451 static void bce_enable_intr(struct bce_softc *);
452 static void bce_reenable_intr(struct bce_rx_ring *);
453 static void bce_check_msi(void *);
455 static void bce_stats_update(struct bce_softc *);
456 static void bce_tick(void *);
457 static void bce_tick_serialized(struct bce_softc *);
458 static void bce_pulse(void *);
460 static void bce_add_sysctls(struct bce_softc *);
461 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
462 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
463 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
464 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
465 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
466 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
467 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
468 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
470 static int bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
472 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
473 uint32_t *, uint32_t);
477 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
478 * takes 1023 as the TX ticks limit. However, using 1023 will
479 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
480 * there is _no_ network activity on the NIC.
482 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
483 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
484 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
485 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
486 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
487 static uint32_t bce_rx_bds = 0; /* bcm: 6 */
488 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */
489 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */
491 static int bce_tx_wreg = 8;
493 static int bce_msi_enable = 1;
494 static int bce_msix_enable = 1;
496 static int bce_rx_pages = RX_PAGES_DEFAULT;
497 static int bce_tx_pages = TX_PAGES_DEFAULT;
499 static int bce_rx_rings = 0; /* auto */
500 static int bce_tx_rings = 0; /* auto */
502 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
503 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
504 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
505 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
506 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
507 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
508 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
509 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
510 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
511 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
512 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
513 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
514 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
515 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
516 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
518 /****************************************************************************/
519 /* DragonFly device dispatch table. */
520 /****************************************************************************/
521 static device_method_t bce_methods[] = {
522 /* Device interface */
523 DEVMETHOD(device_probe, bce_probe),
524 DEVMETHOD(device_attach, bce_attach),
525 DEVMETHOD(device_detach, bce_detach),
526 DEVMETHOD(device_shutdown, bce_shutdown),
529 DEVMETHOD(bus_print_child, bus_generic_print_child),
530 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
533 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
534 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
535 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
540 static driver_t bce_driver = {
543 sizeof(struct bce_softc)
546 static devclass_t bce_devclass;
548 DECLARE_DUMMY_MODULE(if_bce);
549 MODULE_DEPEND(bce, miibus, 1, 1, 1);
550 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
551 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
553 /****************************************************************************/
554 /* Device probe function. */
556 /* Compares the device to the driver's list of supported devices and */
557 /* reports back to the OS whether this is the right driver for the device. */
560 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
561 /****************************************************************************/
563 bce_probe(device_t dev)
566 uint16_t vid, did, svid, sdid;
568 /* Get the data for the device to be probed. */
569 vid = pci_get_vendor(dev);
570 did = pci_get_device(dev);
571 svid = pci_get_subvendor(dev);
572 sdid = pci_get_subdevice(dev);
574 /* Look through the list of known devices for a match. */
575 for (t = bce_devs; t->bce_name != NULL; ++t) {
576 if (vid == t->bce_vid && did == t->bce_did &&
577 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
578 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
579 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
582 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
584 /* Print out the device identity. */
585 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
587 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
589 device_set_desc_copy(dev, descbuf);
590 kfree(descbuf, M_TEMP);
597 /****************************************************************************/
598 /* PCI Capabilities Probe Function. */
600 /* Walks the PCI capabiites list for the device to find what features are */
605 /****************************************************************************/
607 bce_print_adapter_info(struct bce_softc *sc)
609 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
611 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
612 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
615 if (sc->bce_flags & BCE_PCIE_FLAG) {
616 kprintf("Bus (PCIe x%d, ", sc->link_width);
617 switch (sc->link_speed) {
619 kprintf("2.5Gbps); ");
625 kprintf("Unknown link speed); ");
629 kprintf("Bus (PCI%s, %s, %dMHz); ",
630 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
631 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
635 /* Firmware version and device features. */
636 kprintf("B/C (%s)", sc->bce_bc_ver);
638 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
639 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
641 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
642 kprintf("MFW[%s]", sc->bce_mfw_ver);
643 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
650 /****************************************************************************/
651 /* PCI Capabilities Probe Function. */
653 /* Walks the PCI capabiites list for the device to find what features are */
658 /****************************************************************************/
660 bce_probe_pci_caps(struct bce_softc *sc)
662 device_t dev = sc->bce_dev;
665 if (pci_is_pcix(dev))
666 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
668 ptr = pci_get_pciecap_ptr(dev);
670 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
672 sc->link_speed = link_status & 0xf;
673 sc->link_width = (link_status >> 4) & 0x3f;
674 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
675 sc->bce_flags |= BCE_PCIE_FLAG;
679 /****************************************************************************/
680 /* Device attach function. */
682 /* Allocates device resources, performs secondary chip identification, */
683 /* resets and initializes the hardware, and initializes driver instance */
687 /* 0 on success, positive value on failure. */
688 /****************************************************************************/
690 bce_attach(device_t dev)
692 struct bce_softc *sc = device_get_softc(dev);
693 struct ifnet *ifp = &sc->arpcom.ac_if;
697 struct mii_probe_args mii_args;
698 uintptr_t mii_priv = 0;
700 int offset, offset_def;
704 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
706 lwkt_serialize_init(&sc->main_serialize);
707 for (i = 0; i < BCE_MSIX_MAX; ++i) {
708 struct bce_msix_data *msix = &sc->bce_msix[i];
710 msix->msix_cpuid = -1;
714 pci_enable_busmaster(dev);
716 bce_probe_pci_caps(sc);
718 /* Allocate PCI memory resources. */
720 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
721 RF_ACTIVE | PCI_RF_DENSE);
722 if (sc->bce_res_mem == NULL) {
723 device_printf(dev, "PCI memory allocation failed\n");
726 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
727 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
730 * Configure byte swap and enable indirect register access.
731 * Rely on CPU to do target byte swapping on big endian systems.
732 * Access to registers outside of PCI configurtion space are not
733 * valid until this is done.
735 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
736 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
737 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
739 /* Save ASIC revsion info. */
740 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
742 /* Weed out any non-production controller revisions. */
743 switch (BCE_CHIP_ID(sc)) {
744 case BCE_CHIP_ID_5706_A0:
745 case BCE_CHIP_ID_5706_A1:
746 case BCE_CHIP_ID_5708_A0:
747 case BCE_CHIP_ID_5708_B0:
748 case BCE_CHIP_ID_5709_A0:
749 case BCE_CHIP_ID_5709_B0:
750 case BCE_CHIP_ID_5709_B1:
752 /* 5709C B2 seems to work fine */
753 case BCE_CHIP_ID_5709_B2:
755 device_printf(dev, "Unsupported chip id 0x%08x!\n",
761 mii_priv |= BRGPHY_FLAG_WIRESPEED;
762 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
763 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
764 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
765 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
767 mii_priv |= BRGPHY_FLAG_BER_BUG;
771 * Find the base address for shared memory access.
772 * Newer versions of bootcode use a signature and offset
773 * while older versions use a fixed address.
775 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
776 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
777 BCE_SHM_HDR_SIGNATURE_SIG) {
778 /* Multi-port devices use different offsets in shared memory. */
779 sc->bce_shmem_base = REG_RD_IND(sc,
780 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
782 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
785 /* Fetch the bootcode revision. */
786 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
787 for (i = 0, j = 0; i < 3; i++) {
791 num = (uint8_t)(val >> (24 - (i * 8)));
792 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
793 if (num >= k || !skip0 || k == 1) {
794 sc->bce_bc_ver[j++] = (num / k) + '0';
799 sc->bce_bc_ver[j++] = '.';
802 /* Check if any management firwmare is running. */
803 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
804 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
805 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
807 /* Allow time for firmware to enter the running state. */
808 for (i = 0; i < 30; i++) {
809 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
810 if (val & BCE_CONDITION_MFW_RUN_MASK)
816 /* Check the current bootcode state. */
817 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
818 BCE_CONDITION_MFW_RUN_MASK;
819 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
820 val != BCE_CONDITION_MFW_RUN_NONE) {
821 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
823 for (i = 0, j = 0; j < 3; j++) {
824 val = bce_reg_rd_ind(sc, addr + j * 4);
826 memcpy(&sc->bce_mfw_ver[i], &val, 4);
831 /* Get PCI bus information (speed and type). */
832 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
833 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
836 sc->bce_flags |= BCE_PCIX_FLAG;
838 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
839 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
841 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
842 sc->bus_speed_mhz = 133;
845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
846 sc->bus_speed_mhz = 100;
849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
851 sc->bus_speed_mhz = 66;
854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
856 sc->bus_speed_mhz = 50;
859 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
860 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
861 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
862 sc->bus_speed_mhz = 33;
866 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
867 sc->bus_speed_mhz = 66;
869 sc->bus_speed_mhz = 33;
872 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
873 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
875 /* Reset the controller. */
876 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
880 /* Initialize the controller. */
881 rc = bce_chipinit(sc);
883 device_printf(dev, "Controller initialization failed!\n");
887 /* Perform NVRAM test. */
888 rc = bce_nvram_test(sc);
890 device_printf(dev, "NVRAM test failed!\n");
894 /* Fetch the permanent Ethernet MAC address. */
895 bce_get_mac_addr(sc);
898 * Trip points control how many BDs
899 * should be ready before generating an
900 * interrupt while ticks control how long
901 * a BD can sit in the chain before
902 * generating an interrupt. Set the default
903 * values for the RX and TX rings.
907 /* Force more frequent interrupts. */
908 sc->bce_tx_quick_cons_trip_int = 1;
909 sc->bce_tx_quick_cons_trip = 1;
910 sc->bce_tx_ticks_int = 0;
911 sc->bce_tx_ticks = 0;
913 sc->bce_rx_quick_cons_trip_int = 1;
914 sc->bce_rx_quick_cons_trip = 1;
915 sc->bce_rx_ticks_int = 0;
916 sc->bce_rx_ticks = 0;
918 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
919 sc->bce_tx_quick_cons_trip = bce_tx_bds;
920 sc->bce_tx_ticks_int = bce_tx_ticks_int;
921 sc->bce_tx_ticks = bce_tx_ticks;
923 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
924 sc->bce_rx_quick_cons_trip = bce_rx_bds;
925 sc->bce_rx_ticks_int = bce_rx_ticks_int;
926 sc->bce_rx_ticks = bce_rx_ticks;
929 /* Update statistics once every second. */
930 sc->bce_stats_ticks = 1000000 & 0xffff00;
932 /* Find the media type for the adapter. */
935 /* Find out RX/TX ring count */
936 bce_setup_ring_cnt(sc);
938 /* Allocate DMA memory resources. */
939 rc = bce_dma_alloc(sc);
941 device_printf(dev, "DMA resource allocation failed!\n");
947 * NPOLLING RX/TX CPU offset
949 if (sc->rx_ring_cnt2 == ncpus2) {
952 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2;
953 offset = device_getenv_int(dev, "npoll.offset", offset_def);
954 if (offset >= ncpus2 ||
955 offset % sc->rx_ring_cnt2 != 0) {
956 device_printf(dev, "invalid npoll.offset %d, use %d\n",
961 sc->npoll_ofs = offset;
964 /* Allocate PCI IRQ resources. */
965 rc = bce_alloc_intr(sc);
969 /* Setup serializer */
970 bce_setup_serialize(sc);
972 /* Initialize the ifnet interface. */
974 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
975 ifp->if_ioctl = bce_ioctl;
976 ifp->if_start = bce_start;
977 ifp->if_init = bce_init;
978 ifp->if_serialize = bce_serialize;
979 ifp->if_deserialize = bce_deserialize;
980 ifp->if_tryserialize = bce_tryserialize;
982 ifp->if_serialize_assert = bce_serialize_assert;
985 ifp->if_npoll = bce_npoll;
988 ifp->if_mtu = ETHERMTU;
989 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
990 ifp->if_capabilities = BCE_IF_CAPABILITIES;
991 if (sc->rx_ring_cnt > 1)
992 ifp->if_capabilities |= IFCAP_RSS;
993 ifp->if_capenable = ifp->if_capabilities;
995 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
996 ifp->if_baudrate = IF_Gbps(2.5);
998 ifp->if_baudrate = IF_Gbps(1);
1000 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
1001 ifq_set_ready(&ifp->if_snd);
1002 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
1004 if (sc->tx_ring_cnt > 1) {
1005 ifp->if_mapsubq = ifq_mapsubq_mask;
1006 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1);
1012 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
1013 mii_args.mii_probemask = 1 << sc->bce_phy_addr;
1014 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
1015 mii_args.mii_priv = mii_priv;
1017 rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
1019 device_printf(dev, "PHY probe failed!\n");
1023 /* Attach to the Ethernet interface list. */
1024 ether_ifattach(ifp, sc->eaddr, NULL);
1026 /* Setup TX rings and subqueues */
1027 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1028 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1029 struct bce_tx_ring *txr = &sc->tx_rings[i];
1031 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1032 ifsq_set_priv(ifsq, txr);
1033 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1036 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1039 callout_init_mp(&sc->bce_tick_callout);
1040 callout_init_mp(&sc->bce_pulse_callout);
1041 callout_init_mp(&sc->bce_ckmsi_callout);
1043 rc = bce_setup_intr(sc);
1045 device_printf(dev, "Failed to setup IRQ!\n");
1046 ether_ifdetach(ifp);
1050 /* Set timer CPUID */
1051 bce_set_timer_cpuid(sc, FALSE);
1053 /* Add the supported sysctls to the kernel. */
1054 bce_add_sysctls(sc);
1057 * The chip reset earlier notified the bootcode that
1058 * a driver is present. We now need to start our pulse
1059 * routine so that the bootcode is reminded that we're
1064 /* Get the firmware running so IPMI still works */
1068 bce_print_adapter_info(sc);
1076 /****************************************************************************/
1077 /* Device detach function. */
1079 /* Stops the controller, resets the controller, and releases resources. */
1082 /* 0 on success, positive value on failure. */
1083 /****************************************************************************/
1085 bce_detach(device_t dev)
1087 struct bce_softc *sc = device_get_softc(dev);
1089 if (device_is_attached(dev)) {
1090 struct ifnet *ifp = &sc->arpcom.ac_if;
1093 ifnet_serialize_all(ifp);
1095 /* Stop and reset the controller. */
1096 callout_stop(&sc->bce_pulse_callout);
1098 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1099 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1101 msg = BCE_DRV_MSG_CODE_UNLOAD;
1104 bce_teardown_intr(sc);
1106 ifnet_deserialize_all(ifp);
1108 ether_ifdetach(ifp);
1111 /* If we have a child device on the MII bus remove it too. */
1113 device_delete_child(dev, sc->bce_miibus);
1114 bus_generic_detach(dev);
1118 if (sc->bce_res_mem != NULL) {
1119 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1125 if (sc->bce_sysctl_tree != NULL)
1126 sysctl_ctx_free(&sc->bce_sysctl_ctx);
1128 if (sc->serializes != NULL)
1129 kfree(sc->serializes, M_DEVBUF);
1134 /****************************************************************************/
1135 /* Device shutdown function. */
1137 /* Stops and resets the controller. */
1141 /****************************************************************************/
1143 bce_shutdown(device_t dev)
1145 struct bce_softc *sc = device_get_softc(dev);
1146 struct ifnet *ifp = &sc->arpcom.ac_if;
1149 ifnet_serialize_all(ifp);
1152 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1153 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1155 msg = BCE_DRV_MSG_CODE_UNLOAD;
1158 ifnet_deserialize_all(ifp);
1161 /****************************************************************************/
1162 /* Indirect register read. */
1164 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1165 /* configuration space. Using this mechanism avoids issues with posted */
1166 /* reads but is much slower than memory-mapped I/O. */
1169 /* The value of the register. */
1170 /****************************************************************************/
1172 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1174 device_t dev = sc->bce_dev;
1176 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1177 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1180 /****************************************************************************/
1181 /* Indirect register write. */
1183 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1184 /* configuration space. Using this mechanism avoids issues with posted */
1185 /* writes but is muchh slower than memory-mapped I/O. */
1189 /****************************************************************************/
1191 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1193 device_t dev = sc->bce_dev;
1195 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1196 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1199 /****************************************************************************/
1200 /* Shared memory write. */
1202 /* Writes NetXtreme II shared memory region. */
1206 /****************************************************************************/
1208 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1210 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1213 /****************************************************************************/
1214 /* Shared memory read. */
1216 /* Reads NetXtreme II shared memory region. */
1219 /* The 32 bit value read. */
1220 /****************************************************************************/
1222 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1224 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1227 /****************************************************************************/
1228 /* Context memory write. */
1230 /* The NetXtreme II controller uses context memory to track connection */
1231 /* information for L2 and higher network protocols. */
1235 /****************************************************************************/
1237 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1240 uint32_t idx, offset = ctx_offset + cid_addr;
1241 uint32_t val, retry_cnt = 5;
1243 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1244 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1245 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1246 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1248 for (idx = 0; idx < retry_cnt; idx++) {
1249 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1250 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1255 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1256 device_printf(sc->bce_dev,
1257 "Unable to write CTX memory: "
1258 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1259 cid_addr, ctx_offset);
1262 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1263 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1267 /****************************************************************************/
1268 /* PHY register read. */
1270 /* Implements register reads on the MII bus. */
1273 /* The value of the register. */
1274 /****************************************************************************/
1276 bce_miibus_read_reg(device_t dev, int phy, int reg)
1278 struct bce_softc *sc = device_get_softc(dev);
1282 /* Make sure we are accessing the correct PHY address. */
1283 KASSERT(phy == sc->bce_phy_addr,
1284 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1286 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1287 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1288 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1290 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1291 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1296 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1297 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1298 BCE_EMAC_MDIO_COMM_START_BUSY;
1299 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1301 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1304 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1305 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1308 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1309 val &= BCE_EMAC_MDIO_COMM_DATA;
1314 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1315 if_printf(&sc->arpcom.ac_if,
1316 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1320 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1323 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1324 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1325 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1327 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1328 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1332 return (val & 0xffff);
1335 /****************************************************************************/
1336 /* PHY register write. */
1338 /* Implements register writes on the MII bus. */
1341 /* The value of the register. */
1342 /****************************************************************************/
1344 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1346 struct bce_softc *sc = device_get_softc(dev);
1350 /* Make sure we are accessing the correct PHY address. */
1351 KASSERT(phy == sc->bce_phy_addr,
1352 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1354 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1355 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1356 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1358 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1359 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1364 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1365 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1366 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1367 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1369 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1372 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1373 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1379 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1380 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1382 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1383 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1384 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1386 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1387 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1394 /****************************************************************************/
1395 /* MII bus status change. */
1397 /* Called by the MII bus driver when the PHY establishes link to set the */
1398 /* MAC interface registers. */
1402 /****************************************************************************/
1404 bce_miibus_statchg(device_t dev)
1406 struct bce_softc *sc = device_get_softc(dev);
1407 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1409 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1412 * Set MII or GMII interface based on the speed negotiated
1415 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1416 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1417 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1419 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1423 * Set half or full duplex based on the duplicity negotiated
1426 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1427 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1429 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1433 /****************************************************************************/
1434 /* Acquire NVRAM lock. */
1436 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1437 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1438 /* for use by the driver. */
1441 /* 0 on success, positive value on failure. */
1442 /****************************************************************************/
1444 bce_acquire_nvram_lock(struct bce_softc *sc)
1449 /* Request access to the flash interface. */
1450 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1451 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1452 val = REG_RD(sc, BCE_NVM_SW_ARB);
1453 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1459 if (j >= NVRAM_TIMEOUT_COUNT) {
1465 /****************************************************************************/
1466 /* Release NVRAM lock. */
1468 /* When the caller is finished accessing NVRAM the lock must be released. */
1469 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1470 /* for use by the driver. */
1473 /* 0 on success, positive value on failure. */
1474 /****************************************************************************/
1476 bce_release_nvram_lock(struct bce_softc *sc)
1482 * Relinquish nvram interface.
1484 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1486 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1487 val = REG_RD(sc, BCE_NVM_SW_ARB);
1488 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1494 if (j >= NVRAM_TIMEOUT_COUNT) {
1500 /****************************************************************************/
1501 /* Enable NVRAM access. */
1503 /* Before accessing NVRAM for read or write operations the caller must */
1504 /* enabled NVRAM access. */
1508 /****************************************************************************/
1510 bce_enable_nvram_access(struct bce_softc *sc)
1514 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1515 /* Enable both bits, even on read. */
1516 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1517 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1520 /****************************************************************************/
1521 /* Disable NVRAM access. */
1523 /* When the caller is finished accessing NVRAM access must be disabled. */
1527 /****************************************************************************/
1529 bce_disable_nvram_access(struct bce_softc *sc)
1533 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1535 /* Disable both bits, even after read. */
1536 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1537 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1540 /****************************************************************************/
1541 /* Read a dword (32 bits) from NVRAM. */
1543 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1544 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1547 /* 0 on success and the 32 bit value read, positive value on failure. */
1548 /****************************************************************************/
1550 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1556 /* Build the command word. */
1557 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1559 /* Calculate the offset for buffered flash. */
1560 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1561 offset = ((offset / sc->bce_flash_info->page_size) <<
1562 sc->bce_flash_info->page_bits) +
1563 (offset % sc->bce_flash_info->page_size);
1567 * Clear the DONE bit separately, set the address to read,
1568 * and issue the read.
1570 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1571 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1572 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1574 /* Wait for completion. */
1575 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1580 val = REG_RD(sc, BCE_NVM_COMMAND);
1581 if (val & BCE_NVM_COMMAND_DONE) {
1582 val = REG_RD(sc, BCE_NVM_READ);
1585 memcpy(ret_val, &val, 4);
1590 /* Check for errors. */
1591 if (i >= NVRAM_TIMEOUT_COUNT) {
1592 if_printf(&sc->arpcom.ac_if,
1593 "Timeout error reading NVRAM at offset 0x%08X!\n",
1600 /****************************************************************************/
1601 /* Initialize NVRAM access. */
1603 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1604 /* access that device. */
1607 /* 0 on success, positive value on failure. */
1608 /****************************************************************************/
1610 bce_init_nvram(struct bce_softc *sc)
1613 int j, entry_count, rc = 0;
1614 const struct flash_spec *flash;
1616 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1617 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1618 sc->bce_flash_info = &flash_5709;
1619 goto bce_init_nvram_get_flash_size;
1622 /* Determine the selected interface. */
1623 val = REG_RD(sc, BCE_NVM_CFG1);
1625 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1628 * Flash reconfiguration is required to support additional
1629 * NVRAM devices not directly supported in hardware.
1630 * Check if the flash interface was reconfigured
1634 if (val & 0x40000000) {
1635 /* Flash interface reconfigured by bootcode. */
1636 for (j = 0, flash = flash_table; j < entry_count;
1638 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1639 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1640 sc->bce_flash_info = flash;
1645 /* Flash interface not yet reconfigured. */
1648 if (val & (1 << 23))
1649 mask = FLASH_BACKUP_STRAP_MASK;
1651 mask = FLASH_STRAP_MASK;
1653 /* Look for the matching NVRAM device configuration data. */
1654 for (j = 0, flash = flash_table; j < entry_count;
1656 /* Check if the device matches any of the known devices. */
1657 if ((val & mask) == (flash->strapping & mask)) {
1658 /* Found a device match. */
1659 sc->bce_flash_info = flash;
1661 /* Request access to the flash interface. */
1662 rc = bce_acquire_nvram_lock(sc);
1666 /* Reconfigure the flash interface. */
1667 bce_enable_nvram_access(sc);
1668 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1669 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1670 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1671 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1672 bce_disable_nvram_access(sc);
1673 bce_release_nvram_lock(sc);
1679 /* Check if a matching device was found. */
1680 if (j == entry_count) {
1681 sc->bce_flash_info = NULL;
1682 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1686 bce_init_nvram_get_flash_size:
1687 /* Write the flash config data to the shared memory interface. */
1688 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1689 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1691 sc->bce_flash_size = val;
1693 sc->bce_flash_size = sc->bce_flash_info->total_size;
1698 /****************************************************************************/
1699 /* Read an arbitrary range of data from NVRAM. */
1701 /* Prepares the NVRAM interface for access and reads the requested data */
1702 /* into the supplied buffer. */
1705 /* 0 on success and the data read, positive value on failure. */
1706 /****************************************************************************/
1708 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1711 uint32_t cmd_flags, offset32, len32, extra;
1717 /* Request access to the flash interface. */
1718 rc = bce_acquire_nvram_lock(sc);
1722 /* Enable access to flash interface */
1723 bce_enable_nvram_access(sc);
1731 /* XXX should we release nvram lock if read_dword() fails? */
1737 pre_len = 4 - (offset & 3);
1739 if (pre_len >= len32) {
1741 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1743 cmd_flags = BCE_NVM_COMMAND_FIRST;
1746 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1750 memcpy(ret_buf, buf + (offset & 3), pre_len);
1758 extra = 4 - (len32 & 3);
1759 len32 = (len32 + 4) & ~3;
1766 cmd_flags = BCE_NVM_COMMAND_LAST;
1768 cmd_flags = BCE_NVM_COMMAND_FIRST |
1769 BCE_NVM_COMMAND_LAST;
1771 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1773 memcpy(ret_buf, buf, 4 - extra);
1774 } else if (len32 > 0) {
1777 /* Read the first word. */
1781 cmd_flags = BCE_NVM_COMMAND_FIRST;
1783 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1785 /* Advance to the next dword. */
1790 while (len32 > 4 && rc == 0) {
1791 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1793 /* Advance to the next dword. */
1800 goto bce_nvram_read_locked_exit;
1802 cmd_flags = BCE_NVM_COMMAND_LAST;
1803 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1805 memcpy(ret_buf, buf, 4 - extra);
1808 bce_nvram_read_locked_exit:
1809 /* Disable access to flash interface and release the lock. */
1810 bce_disable_nvram_access(sc);
1811 bce_release_nvram_lock(sc);
1816 /****************************************************************************/
1817 /* Verifies that NVRAM is accessible and contains valid data. */
1819 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1823 /* 0 on success, positive value on failure. */
1824 /****************************************************************************/
1826 bce_nvram_test(struct bce_softc *sc)
1828 uint32_t buf[BCE_NVRAM_SIZE / 4];
1829 uint32_t magic, csum;
1830 uint8_t *data = (uint8_t *)buf;
1834 * Check that the device NVRAM is valid by reading
1835 * the magic value at offset 0.
1837 rc = bce_nvram_read(sc, 0, data, 4);
1841 magic = be32toh(buf[0]);
1842 if (magic != BCE_NVRAM_MAGIC) {
1843 if_printf(&sc->arpcom.ac_if,
1844 "Invalid NVRAM magic value! Expected: 0x%08X, "
1845 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1850 * Verify that the device NVRAM includes valid
1851 * configuration data.
1853 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1857 csum = ether_crc32_le(data, 0x100);
1858 if (csum != BCE_CRC32_RESIDUAL) {
1859 if_printf(&sc->arpcom.ac_if,
1860 "Invalid Manufacturing Information NVRAM CRC! "
1861 "Expected: 0x%08X, Found: 0x%08X\n",
1862 BCE_CRC32_RESIDUAL, csum);
1866 csum = ether_crc32_le(data + 0x100, 0x100);
1867 if (csum != BCE_CRC32_RESIDUAL) {
1868 if_printf(&sc->arpcom.ac_if,
1869 "Invalid Feature Configuration Information "
1870 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1871 BCE_CRC32_RESIDUAL, csum);
1877 /****************************************************************************/
1878 /* Identifies the current media type of the controller and sets the PHY */
1883 /****************************************************************************/
1885 bce_get_media(struct bce_softc *sc)
1889 sc->bce_phy_addr = 1;
1891 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1892 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1893 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1894 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1898 * The BCM5709S is software configurable
1899 * for Copper or SerDes operation.
1901 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1903 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1904 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1908 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1909 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1912 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1915 if (pci_get_function(sc->bce_dev) == 0) {
1920 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1928 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1932 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1933 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1936 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1937 sc->bce_flags |= BCE_NO_WOL_FLAG;
1938 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1939 sc->bce_phy_addr = 2;
1940 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1941 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1942 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1944 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1945 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1946 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1951 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1955 /* Destroy the TX buffer descriptor DMA stuffs. */
1956 if (txr->tx_bd_chain_tag != NULL) {
1957 for (i = 0; i < txr->tx_pages; i++) {
1958 if (txr->tx_bd_chain[i] != NULL) {
1959 bus_dmamap_unload(txr->tx_bd_chain_tag,
1960 txr->tx_bd_chain_map[i]);
1961 bus_dmamem_free(txr->tx_bd_chain_tag,
1962 txr->tx_bd_chain[i],
1963 txr->tx_bd_chain_map[i]);
1966 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1969 /* Destroy the TX mbuf DMA stuffs. */
1970 if (txr->tx_mbuf_tag != NULL) {
1971 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1972 /* Must have been unloaded in bce_stop() */
1973 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1974 bus_dmamap_destroy(txr->tx_mbuf_tag,
1975 txr->tx_bufs[i].tx_mbuf_map);
1977 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1980 if (txr->tx_bd_chain_map != NULL)
1981 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1982 if (txr->tx_bd_chain != NULL)
1983 kfree(txr->tx_bd_chain, M_DEVBUF);
1984 if (txr->tx_bd_chain_paddr != NULL)
1985 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1987 if (txr->tx_bufs != NULL)
1988 kfree(txr->tx_bufs, M_DEVBUF);
1992 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1996 /* Destroy the RX buffer descriptor DMA stuffs. */
1997 if (rxr->rx_bd_chain_tag != NULL) {
1998 for (i = 0; i < rxr->rx_pages; i++) {
1999 if (rxr->rx_bd_chain[i] != NULL) {
2000 bus_dmamap_unload(rxr->rx_bd_chain_tag,
2001 rxr->rx_bd_chain_map[i]);
2002 bus_dmamem_free(rxr->rx_bd_chain_tag,
2003 rxr->rx_bd_chain[i],
2004 rxr->rx_bd_chain_map[i]);
2007 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
2010 /* Destroy the RX mbuf DMA stuffs. */
2011 if (rxr->rx_mbuf_tag != NULL) {
2012 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2013 /* Must have been unloaded in bce_stop() */
2014 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
2015 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2016 rxr->rx_bufs[i].rx_mbuf_map);
2018 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
2019 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2022 if (rxr->rx_bd_chain_map != NULL)
2023 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2024 if (rxr->rx_bd_chain != NULL)
2025 kfree(rxr->rx_bd_chain, M_DEVBUF);
2026 if (rxr->rx_bd_chain_paddr != NULL)
2027 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2029 if (rxr->rx_bufs != NULL)
2030 kfree(rxr->rx_bufs, M_DEVBUF);
2033 /****************************************************************************/
2034 /* Free any DMA memory owned by the driver. */
2036 /* Scans through each data structre that requires DMA memory and frees */
2037 /* the memory if allocated. */
2041 /****************************************************************************/
2043 bce_dma_free(struct bce_softc *sc)
2047 /* Destroy the status block. */
2048 if (sc->status_tag != NULL) {
2049 if (sc->status_block != NULL) {
2050 bus_dmamap_unload(sc->status_tag, sc->status_map);
2051 bus_dmamem_free(sc->status_tag, sc->status_block,
2054 bus_dma_tag_destroy(sc->status_tag);
2057 /* Destroy the statistics block. */
2058 if (sc->stats_tag != NULL) {
2059 if (sc->stats_block != NULL) {
2060 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2061 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2064 bus_dma_tag_destroy(sc->stats_tag);
2067 /* Destroy the CTX DMA stuffs. */
2068 if (sc->ctx_tag != NULL) {
2069 for (i = 0; i < sc->ctx_pages; i++) {
2070 if (sc->ctx_block[i] != NULL) {
2071 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2072 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2076 bus_dma_tag_destroy(sc->ctx_tag);
2080 if (sc->tx_rings != NULL) {
2081 for (i = 0; i < sc->tx_ring_cnt; ++i)
2082 bce_destroy_tx_ring(&sc->tx_rings[i]);
2083 kfree(sc->tx_rings, M_DEVBUF);
2087 if (sc->rx_rings != NULL) {
2088 for (i = 0; i < sc->rx_ring_cnt; ++i)
2089 bce_destroy_rx_ring(&sc->rx_rings[i]);
2090 kfree(sc->rx_rings, M_DEVBUF);
2093 /* Destroy the parent tag */
2094 if (sc->parent_tag != NULL)
2095 bus_dma_tag_destroy(sc->parent_tag);
2098 /****************************************************************************/
2099 /* Get DMA memory from the OS. */
2101 /* Validates that the OS has provided DMA buffers in response to a */
2102 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2103 /* When the callback is used the OS will return 0 for the mapping function */
2104 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2105 /* failures back to the caller. */
2109 /****************************************************************************/
2111 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2113 bus_addr_t *busaddr = arg;
2115 /* Check for an error and signal the caller that an error occurred. */
2119 KASSERT(nseg == 1, ("only one segment is allowed"));
2120 *busaddr = segs->ds_addr;
2124 bce_create_tx_ring(struct bce_tx_ring *txr)
2128 lwkt_serialize_init(&txr->tx_serialize);
2129 txr->tx_wreg = bce_tx_wreg;
2131 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2132 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2133 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2134 pages = TX_PAGES_DEFAULT;
2136 txr->tx_pages = pages;
2138 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2139 M_DEVBUF, M_WAITOK | M_ZERO);
2140 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2141 M_DEVBUF, M_WAITOK | M_ZERO);
2142 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2143 M_DEVBUF, M_WAITOK | M_ZERO);
2145 txr->tx_bufs = kmalloc_cachealign(
2146 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2147 M_DEVBUF, M_WAITOK | M_ZERO);
2150 * Create a DMA tag for the TX buffer descriptor chain,
2151 * allocate and clear the memory, and fetch the
2152 * physical address of the block.
2154 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2155 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2156 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2157 0, &txr->tx_bd_chain_tag);
2159 device_printf(txr->sc->bce_dev, "Could not allocate "
2160 "TX descriptor chain DMA tag!\n");
2164 for (i = 0; i < txr->tx_pages; i++) {
2167 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2168 (void **)&txr->tx_bd_chain[i],
2169 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2170 &txr->tx_bd_chain_map[i]);
2172 device_printf(txr->sc->bce_dev,
2173 "Could not allocate %dth TX descriptor "
2174 "chain DMA memory!\n", i);
2178 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2179 txr->tx_bd_chain_map[i],
2180 txr->tx_bd_chain[i],
2181 BCE_TX_CHAIN_PAGE_SZ,
2182 bce_dma_map_addr, &busaddr,
2185 if (rc == EINPROGRESS) {
2186 panic("%s coherent memory loading "
2187 "is still in progress!",
2188 txr->sc->arpcom.ac_if.if_xname);
2190 device_printf(txr->sc->bce_dev, "Could not map %dth "
2191 "TX descriptor chain DMA memory!\n", i);
2192 bus_dmamem_free(txr->tx_bd_chain_tag,
2193 txr->tx_bd_chain[i],
2194 txr->tx_bd_chain_map[i]);
2195 txr->tx_bd_chain[i] = NULL;
2199 txr->tx_bd_chain_paddr[i] = busaddr;
2202 /* Create a DMA tag for TX mbufs. */
2203 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2204 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2205 IP_MAXPACKET + sizeof(struct ether_vlan_header),
2206 BCE_MAX_SEGMENTS, PAGE_SIZE,
2207 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2210 device_printf(txr->sc->bce_dev,
2211 "Could not allocate TX mbuf DMA tag!\n");
2215 /* Create DMA maps for the TX mbufs clusters. */
2216 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2217 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2218 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2219 &txr->tx_bufs[i].tx_mbuf_map);
2223 for (j = 0; j < i; ++j) {
2224 bus_dmamap_destroy(txr->tx_mbuf_tag,
2225 txr->tx_bufs[j].tx_mbuf_map);
2227 bus_dma_tag_destroy(txr->tx_mbuf_tag);
2228 txr->tx_mbuf_tag = NULL;
2230 device_printf(txr->sc->bce_dev, "Unable to create "
2231 "%dth TX mbuf DMA map!\n", i);
2239 bce_create_rx_ring(struct bce_rx_ring *rxr)
2243 lwkt_serialize_init(&rxr->rx_serialize);
2245 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2246 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2247 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2248 pages = RX_PAGES_DEFAULT;
2250 rxr->rx_pages = pages;
2252 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2253 M_DEVBUF, M_WAITOK | M_ZERO);
2254 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2255 M_DEVBUF, M_WAITOK | M_ZERO);
2256 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2257 M_DEVBUF, M_WAITOK | M_ZERO);
2259 rxr->rx_bufs = kmalloc_cachealign(
2260 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2261 M_DEVBUF, M_WAITOK | M_ZERO);
2264 * Create a DMA tag for the RX buffer descriptor chain,
2265 * allocate and clear the memory, and fetch the physical
2266 * address of the blocks.
2268 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2269 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2270 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2271 0, &rxr->rx_bd_chain_tag);
2273 device_printf(rxr->sc->bce_dev, "Could not allocate "
2274 "RX descriptor chain DMA tag!\n");
2278 for (i = 0; i < rxr->rx_pages; i++) {
2281 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2282 (void **)&rxr->rx_bd_chain[i],
2283 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2284 &rxr->rx_bd_chain_map[i]);
2286 device_printf(rxr->sc->bce_dev,
2287 "Could not allocate %dth RX descriptor "
2288 "chain DMA memory!\n", i);
2292 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2293 rxr->rx_bd_chain_map[i],
2294 rxr->rx_bd_chain[i],
2295 BCE_RX_CHAIN_PAGE_SZ,
2296 bce_dma_map_addr, &busaddr,
2299 if (rc == EINPROGRESS) {
2300 panic("%s coherent memory loading "
2301 "is still in progress!",
2302 rxr->sc->arpcom.ac_if.if_xname);
2304 device_printf(rxr->sc->bce_dev,
2305 "Could not map %dth RX descriptor "
2306 "chain DMA memory!\n", i);
2307 bus_dmamem_free(rxr->rx_bd_chain_tag,
2308 rxr->rx_bd_chain[i],
2309 rxr->rx_bd_chain_map[i]);
2310 rxr->rx_bd_chain[i] = NULL;
2314 rxr->rx_bd_chain_paddr[i] = busaddr;
2317 /* Create a DMA tag for RX mbufs. */
2318 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2319 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2320 MCLBYTES, 1, MCLBYTES,
2321 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2324 device_printf(rxr->sc->bce_dev,
2325 "Could not allocate RX mbuf DMA tag!\n");
2329 /* Create tmp DMA map for RX mbuf clusters. */
2330 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2331 &rxr->rx_mbuf_tmpmap);
2333 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2334 rxr->rx_mbuf_tag = NULL;
2336 device_printf(rxr->sc->bce_dev,
2337 "Could not create RX mbuf tmp DMA map!\n");
2341 /* Create DMA maps for the RX mbuf clusters. */
2342 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2343 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2344 &rxr->rx_bufs[i].rx_mbuf_map);
2348 for (j = 0; j < i; ++j) {
2349 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2350 rxr->rx_bufs[j].rx_mbuf_map);
2352 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2353 rxr->rx_mbuf_tag = NULL;
2355 device_printf(rxr->sc->bce_dev, "Unable to create "
2356 "%dth RX mbuf DMA map!\n", i);
2363 /****************************************************************************/
2364 /* Allocate any DMA memory needed by the driver. */
2366 /* Allocates DMA memory needed for the various global structures needed by */
2369 /* Memory alignment requirements: */
2370 /* -----------------+----------+----------+----------+----------+ */
2371 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */
2372 /* -----------------+----------+----------+----------+----------+ */
2373 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2374 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2375 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2376 /* PG Buffers | none | none | none | none | */
2377 /* TX Buffers | none | none | none | none | */
2378 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2379 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */
2380 /* -----------------+----------+----------+----------+----------+ */
2382 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2385 /* 0 for success, positive value for failure. */
2386 /****************************************************************************/
2388 bce_dma_alloc(struct bce_softc *sc)
2390 struct ifnet *ifp = &sc->arpcom.ac_if;
2392 bus_addr_t busaddr, max_busaddr;
2393 bus_size_t status_align, stats_align, status_size;
2396 * The embedded PCIe to PCI-X bridge (EPB)
2397 * in the 5708 cannot address memory above
2398 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2400 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2401 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2403 max_busaddr = BUS_SPACE_MAXADDR;
2406 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2408 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2409 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2410 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2411 if (sc->ctx_pages == 0)
2413 if (sc->ctx_pages > BCE_CTX_PAGES) {
2414 device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2426 * Each MSI-X vector needs a status block; each status block
2427 * consumes 128bytes and is 128bytes aligned.
2429 if (sc->rx_ring_cnt > 1) {
2430 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2431 status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2433 status_size = BCE_STATUS_BLK_SZ;
2437 * Allocate the parent bus DMA tag appropriate for PCI.
2439 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2440 max_busaddr, BUS_SPACE_MAXADDR,
2442 BUS_SPACE_MAXSIZE_32BIT, 0,
2443 BUS_SPACE_MAXSIZE_32BIT,
2444 0, &sc->parent_tag);
2446 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2451 * Allocate status block.
2453 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2454 status_align, status_size,
2455 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2456 &sc->status_tag, &sc->status_map,
2457 &sc->status_block_paddr);
2458 if (sc->status_block == NULL) {
2459 if_printf(ifp, "Could not allocate status block!\n");
2464 * Allocate statistics block.
2466 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2467 stats_align, BCE_STATS_BLK_SZ,
2468 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2469 &sc->stats_tag, &sc->stats_map,
2470 &sc->stats_block_paddr);
2471 if (sc->stats_block == NULL) {
2472 if_printf(ifp, "Could not allocate statistics block!\n");
2477 * Allocate context block, if needed
2479 if (sc->ctx_pages != 0) {
2480 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2481 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2483 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2486 if_printf(ifp, "Could not allocate "
2487 "context block DMA tag!\n");
2491 for (i = 0; i < sc->ctx_pages; i++) {
2492 rc = bus_dmamem_alloc(sc->ctx_tag,
2493 (void **)&sc->ctx_block[i],
2494 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2498 if_printf(ifp, "Could not allocate %dth context "
2499 "DMA memory!\n", i);
2503 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2504 sc->ctx_block[i], BCM_PAGE_SIZE,
2505 bce_dma_map_addr, &busaddr,
2508 if (rc == EINPROGRESS) {
2509 panic("%s coherent memory loading "
2510 "is still in progress!", ifp->if_xname);
2512 if_printf(ifp, "Could not map %dth context "
2513 "DMA memory!\n", i);
2514 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2516 sc->ctx_block[i] = NULL;
2519 sc->ctx_paddr[i] = busaddr;
2523 sc->tx_rings = kmalloc_cachealign(
2524 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2526 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2527 sc->tx_rings[i].sc = sc;
2529 sc->tx_rings[i].tx_cid = TX_CID;
2530 sc->tx_rings[i].tx_hw_cons =
2531 &sc->status_block->status_tx_quick_consumer_index0;
2533 struct status_block_msix *sblk =
2534 (struct status_block_msix *)
2535 (((uint8_t *)(sc->status_block)) +
2536 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2538 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2539 sc->tx_rings[i].tx_hw_cons =
2540 &sblk->status_tx_quick_consumer_index;
2543 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2545 device_printf(sc->bce_dev,
2546 "can't create %dth tx ring\n", i);
2551 sc->rx_rings = kmalloc_cachealign(
2552 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2554 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2555 sc->rx_rings[i].sc = sc;
2556 sc->rx_rings[i].idx = i;
2558 sc->rx_rings[i].rx_cid = RX_CID;
2559 sc->rx_rings[i].rx_hw_cons =
2560 &sc->status_block->status_rx_quick_consumer_index0;
2561 sc->rx_rings[i].hw_status_idx =
2562 &sc->status_block->status_idx;
2564 struct status_block_msix *sblk =
2565 (struct status_block_msix *)
2566 (((uint8_t *)(sc->status_block)) +
2567 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2569 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2570 sc->rx_rings[i].rx_hw_cons =
2571 &sblk->status_rx_quick_consumer_index;
2572 sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2575 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2577 device_printf(sc->bce_dev,
2578 "can't create %dth rx ring\n", i);
2586 /****************************************************************************/
2587 /* Firmware synchronization. */
2589 /* Before performing certain events such as a chip reset, synchronize with */
2590 /* the firmware first. */
2593 /* 0 for success, positive value for failure. */
2594 /****************************************************************************/
2596 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2601 /* Don't waste any time if we've timed out before. */
2602 if (sc->bce_fw_timed_out)
2605 /* Increment the message sequence number. */
2606 sc->bce_fw_wr_seq++;
2607 msg_data |= sc->bce_fw_wr_seq;
2609 /* Send the message to the bootcode driver mailbox. */
2610 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2612 /* Wait for the bootcode to acknowledge the message. */
2613 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2614 /* Check for a response in the bootcode firmware mailbox. */
2615 val = bce_shmem_rd(sc, BCE_FW_MB);
2616 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2621 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2622 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2623 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2624 if_printf(&sc->arpcom.ac_if,
2625 "Firmware synchronization timeout! "
2626 "msg_data = 0x%08X\n", msg_data);
2628 msg_data &= ~BCE_DRV_MSG_CODE;
2629 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2631 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2633 sc->bce_fw_timed_out = 1;
2639 /****************************************************************************/
2640 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2644 /****************************************************************************/
2646 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2647 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2652 for (i = 0; i < rv2p_code_len; i += 8) {
2653 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2655 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2658 if (rv2p_proc == RV2P_PROC1) {
2659 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2660 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2662 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2663 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2667 /* Reset the processor, un-stall is done later. */
2668 if (rv2p_proc == RV2P_PROC1)
2669 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2671 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2674 /****************************************************************************/
2675 /* Load RISC processor firmware. */
2677 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2678 /* associated with a particular processor. */
2682 /****************************************************************************/
2684 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2690 bce_halt_cpu(sc, cpu_reg);
2692 /* Load the Text area. */
2693 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2695 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2696 REG_WR_IND(sc, offset, fw->text[j]);
2699 /* Load the Data area. */
2700 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2702 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2703 REG_WR_IND(sc, offset, fw->data[j]);
2706 /* Load the SBSS area. */
2707 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2709 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2710 REG_WR_IND(sc, offset, fw->sbss[j]);
2713 /* Load the BSS area. */
2714 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2716 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2717 REG_WR_IND(sc, offset, fw->bss[j]);
2720 /* Load the Read-Only area. */
2721 offset = cpu_reg->spad_base +
2722 (fw->rodata_addr - cpu_reg->mips_view_base);
2724 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2725 REG_WR_IND(sc, offset, fw->rodata[j]);
2728 /* Clear the pre-fetch instruction and set the FW start address. */
2729 REG_WR_IND(sc, cpu_reg->inst, 0);
2730 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2733 /****************************************************************************/
2734 /* Starts the RISC processor. */
2736 /* Assumes the CPU starting address has already been set. */
2740 /****************************************************************************/
2742 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2746 /* Start the CPU. */
2747 val = REG_RD_IND(sc, cpu_reg->mode);
2748 val &= ~cpu_reg->mode_value_halt;
2749 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2750 REG_WR_IND(sc, cpu_reg->mode, val);
2753 /****************************************************************************/
2754 /* Halts the RISC processor. */
2758 /****************************************************************************/
2760 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2765 val = REG_RD_IND(sc, cpu_reg->mode);
2766 val |= cpu_reg->mode_value_halt;
2767 REG_WR_IND(sc, cpu_reg->mode, val);
2768 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2771 /****************************************************************************/
2772 /* Start the RX CPU. */
2776 /****************************************************************************/
2778 bce_start_rxp_cpu(struct bce_softc *sc)
2780 struct cpu_reg cpu_reg;
2782 cpu_reg.mode = BCE_RXP_CPU_MODE;
2783 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2784 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2785 cpu_reg.state = BCE_RXP_CPU_STATE;
2786 cpu_reg.state_value_clear = 0xffffff;
2787 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2788 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2789 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2790 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2791 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2792 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2793 cpu_reg.mips_view_base = 0x8000000;
2795 bce_start_cpu(sc, &cpu_reg);
2798 /****************************************************************************/
2799 /* Initialize the RX CPU. */
2803 /****************************************************************************/
2805 bce_init_rxp_cpu(struct bce_softc *sc)
2807 struct cpu_reg cpu_reg;
2810 cpu_reg.mode = BCE_RXP_CPU_MODE;
2811 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2812 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2813 cpu_reg.state = BCE_RXP_CPU_STATE;
2814 cpu_reg.state_value_clear = 0xffffff;
2815 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2816 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2817 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2818 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2819 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2820 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2821 cpu_reg.mips_view_base = 0x8000000;
2823 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2824 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2825 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2826 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2827 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2828 fw.start_addr = bce_RXP_b09FwStartAddr;
2830 fw.text_addr = bce_RXP_b09FwTextAddr;
2831 fw.text_len = bce_RXP_b09FwTextLen;
2833 fw.text = bce_RXP_b09FwText;
2835 fw.data_addr = bce_RXP_b09FwDataAddr;
2836 fw.data_len = bce_RXP_b09FwDataLen;
2838 fw.data = bce_RXP_b09FwData;
2840 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2841 fw.sbss_len = bce_RXP_b09FwSbssLen;
2843 fw.sbss = bce_RXP_b09FwSbss;
2845 fw.bss_addr = bce_RXP_b09FwBssAddr;
2846 fw.bss_len = bce_RXP_b09FwBssLen;
2848 fw.bss = bce_RXP_b09FwBss;
2850 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2851 fw.rodata_len = bce_RXP_b09FwRodataLen;
2852 fw.rodata_index = 0;
2853 fw.rodata = bce_RXP_b09FwRodata;
2855 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2856 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2857 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2858 fw.start_addr = bce_RXP_b06FwStartAddr;
2860 fw.text_addr = bce_RXP_b06FwTextAddr;
2861 fw.text_len = bce_RXP_b06FwTextLen;
2863 fw.text = bce_RXP_b06FwText;
2865 fw.data_addr = bce_RXP_b06FwDataAddr;
2866 fw.data_len = bce_RXP_b06FwDataLen;
2868 fw.data = bce_RXP_b06FwData;
2870 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2871 fw.sbss_len = bce_RXP_b06FwSbssLen;
2873 fw.sbss = bce_RXP_b06FwSbss;
2875 fw.bss_addr = bce_RXP_b06FwBssAddr;
2876 fw.bss_len = bce_RXP_b06FwBssLen;
2878 fw.bss = bce_RXP_b06FwBss;
2880 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2881 fw.rodata_len = bce_RXP_b06FwRodataLen;
2882 fw.rodata_index = 0;
2883 fw.rodata = bce_RXP_b06FwRodata;
2886 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2887 /* Delay RXP start until initialization is complete. */
2890 /****************************************************************************/
2891 /* Initialize the TX CPU. */
2895 /****************************************************************************/
2897 bce_init_txp_cpu(struct bce_softc *sc)
2899 struct cpu_reg cpu_reg;
2902 cpu_reg.mode = BCE_TXP_CPU_MODE;
2903 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2904 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2905 cpu_reg.state = BCE_TXP_CPU_STATE;
2906 cpu_reg.state_value_clear = 0xffffff;
2907 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2908 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2909 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2910 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2911 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2912 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2913 cpu_reg.mips_view_base = 0x8000000;
2915 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2916 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2917 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2918 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2919 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2920 fw.start_addr = bce_TXP_b09FwStartAddr;
2922 fw.text_addr = bce_TXP_b09FwTextAddr;
2923 fw.text_len = bce_TXP_b09FwTextLen;
2925 fw.text = bce_TXP_b09FwText;
2927 fw.data_addr = bce_TXP_b09FwDataAddr;
2928 fw.data_len = bce_TXP_b09FwDataLen;
2930 fw.data = bce_TXP_b09FwData;
2932 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2933 fw.sbss_len = bce_TXP_b09FwSbssLen;
2935 fw.sbss = bce_TXP_b09FwSbss;
2937 fw.bss_addr = bce_TXP_b09FwBssAddr;
2938 fw.bss_len = bce_TXP_b09FwBssLen;
2940 fw.bss = bce_TXP_b09FwBss;
2942 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2943 fw.rodata_len = bce_TXP_b09FwRodataLen;
2944 fw.rodata_index = 0;
2945 fw.rodata = bce_TXP_b09FwRodata;
2947 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2948 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2949 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2950 fw.start_addr = bce_TXP_b06FwStartAddr;
2952 fw.text_addr = bce_TXP_b06FwTextAddr;
2953 fw.text_len = bce_TXP_b06FwTextLen;
2955 fw.text = bce_TXP_b06FwText;
2957 fw.data_addr = bce_TXP_b06FwDataAddr;
2958 fw.data_len = bce_TXP_b06FwDataLen;
2960 fw.data = bce_TXP_b06FwData;
2962 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2963 fw.sbss_len = bce_TXP_b06FwSbssLen;
2965 fw.sbss = bce_TXP_b06FwSbss;
2967 fw.bss_addr = bce_TXP_b06FwBssAddr;
2968 fw.bss_len = bce_TXP_b06FwBssLen;
2970 fw.bss = bce_TXP_b06FwBss;
2972 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2973 fw.rodata_len = bce_TXP_b06FwRodataLen;
2974 fw.rodata_index = 0;
2975 fw.rodata = bce_TXP_b06FwRodata;
2978 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2979 bce_start_cpu(sc, &cpu_reg);
2982 /****************************************************************************/
2983 /* Initialize the TPAT CPU. */
2987 /****************************************************************************/
2989 bce_init_tpat_cpu(struct bce_softc *sc)
2991 struct cpu_reg cpu_reg;
2994 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2995 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2996 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2997 cpu_reg.state = BCE_TPAT_CPU_STATE;
2998 cpu_reg.state_value_clear = 0xffffff;
2999 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3000 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3001 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3002 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3003 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3004 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3005 cpu_reg.mips_view_base = 0x8000000;
3007 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3008 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3009 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3010 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3011 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3012 fw.start_addr = bce_TPAT_b09FwStartAddr;
3014 fw.text_addr = bce_TPAT_b09FwTextAddr;
3015 fw.text_len = bce_TPAT_b09FwTextLen;
3017 fw.text = bce_TPAT_b09FwText;
3019 fw.data_addr = bce_TPAT_b09FwDataAddr;
3020 fw.data_len = bce_TPAT_b09FwDataLen;
3022 fw.data = bce_TPAT_b09FwData;
3024 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3025 fw.sbss_len = bce_TPAT_b09FwSbssLen;
3027 fw.sbss = bce_TPAT_b09FwSbss;
3029 fw.bss_addr = bce_TPAT_b09FwBssAddr;
3030 fw.bss_len = bce_TPAT_b09FwBssLen;
3032 fw.bss = bce_TPAT_b09FwBss;
3034 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3035 fw.rodata_len = bce_TPAT_b09FwRodataLen;
3036 fw.rodata_index = 0;
3037 fw.rodata = bce_TPAT_b09FwRodata;
3039 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3040 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3041 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3042 fw.start_addr = bce_TPAT_b06FwStartAddr;
3044 fw.text_addr = bce_TPAT_b06FwTextAddr;
3045 fw.text_len = bce_TPAT_b06FwTextLen;
3047 fw.text = bce_TPAT_b06FwText;
3049 fw.data_addr = bce_TPAT_b06FwDataAddr;
3050 fw.data_len = bce_TPAT_b06FwDataLen;
3052 fw.data = bce_TPAT_b06FwData;
3054 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3055 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3057 fw.sbss = bce_TPAT_b06FwSbss;
3059 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3060 fw.bss_len = bce_TPAT_b06FwBssLen;
3062 fw.bss = bce_TPAT_b06FwBss;
3064 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3065 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3066 fw.rodata_index = 0;
3067 fw.rodata = bce_TPAT_b06FwRodata;
3070 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3071 bce_start_cpu(sc, &cpu_reg);
3074 /****************************************************************************/
3075 /* Initialize the CP CPU. */
3079 /****************************************************************************/
3081 bce_init_cp_cpu(struct bce_softc *sc)
3083 struct cpu_reg cpu_reg;
3086 cpu_reg.mode = BCE_CP_CPU_MODE;
3087 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3088 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3089 cpu_reg.state = BCE_CP_CPU_STATE;
3090 cpu_reg.state_value_clear = 0xffffff;
3091 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3092 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3093 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3094 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3095 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3096 cpu_reg.spad_base = BCE_CP_SCRATCH;
3097 cpu_reg.mips_view_base = 0x8000000;
3099 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3100 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3101 fw.ver_major = bce_CP_b09FwReleaseMajor;
3102 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3103 fw.ver_fix = bce_CP_b09FwReleaseFix;
3104 fw.start_addr = bce_CP_b09FwStartAddr;
3106 fw.text_addr = bce_CP_b09FwTextAddr;
3107 fw.text_len = bce_CP_b09FwTextLen;
3109 fw.text = bce_CP_b09FwText;
3111 fw.data_addr = bce_CP_b09FwDataAddr;
3112 fw.data_len = bce_CP_b09FwDataLen;
3114 fw.data = bce_CP_b09FwData;
3116 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3117 fw.sbss_len = bce_CP_b09FwSbssLen;
3119 fw.sbss = bce_CP_b09FwSbss;
3121 fw.bss_addr = bce_CP_b09FwBssAddr;
3122 fw.bss_len = bce_CP_b09FwBssLen;
3124 fw.bss = bce_CP_b09FwBss;
3126 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3127 fw.rodata_len = bce_CP_b09FwRodataLen;
3128 fw.rodata_index = 0;
3129 fw.rodata = bce_CP_b09FwRodata;
3131 fw.ver_major = bce_CP_b06FwReleaseMajor;
3132 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3133 fw.ver_fix = bce_CP_b06FwReleaseFix;
3134 fw.start_addr = bce_CP_b06FwStartAddr;
3136 fw.text_addr = bce_CP_b06FwTextAddr;
3137 fw.text_len = bce_CP_b06FwTextLen;
3139 fw.text = bce_CP_b06FwText;
3141 fw.data_addr = bce_CP_b06FwDataAddr;
3142 fw.data_len = bce_CP_b06FwDataLen;
3144 fw.data = bce_CP_b06FwData;
3146 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3147 fw.sbss_len = bce_CP_b06FwSbssLen;
3149 fw.sbss = bce_CP_b06FwSbss;
3151 fw.bss_addr = bce_CP_b06FwBssAddr;
3152 fw.bss_len = bce_CP_b06FwBssLen;
3154 fw.bss = bce_CP_b06FwBss;
3156 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3157 fw.rodata_len = bce_CP_b06FwRodataLen;
3158 fw.rodata_index = 0;
3159 fw.rodata = bce_CP_b06FwRodata;
3162 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3163 bce_start_cpu(sc, &cpu_reg);
3166 /****************************************************************************/
3167 /* Initialize the COM CPU. */
3171 /****************************************************************************/
3173 bce_init_com_cpu(struct bce_softc *sc)
3175 struct cpu_reg cpu_reg;
3178 cpu_reg.mode = BCE_COM_CPU_MODE;
3179 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3180 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3181 cpu_reg.state = BCE_COM_CPU_STATE;
3182 cpu_reg.state_value_clear = 0xffffff;
3183 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3184 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3185 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3186 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3187 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3188 cpu_reg.spad_base = BCE_COM_SCRATCH;
3189 cpu_reg.mips_view_base = 0x8000000;
3191 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3192 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3193 fw.ver_major = bce_COM_b09FwReleaseMajor;
3194 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3195 fw.ver_fix = bce_COM_b09FwReleaseFix;
3196 fw.start_addr = bce_COM_b09FwStartAddr;
3198 fw.text_addr = bce_COM_b09FwTextAddr;
3199 fw.text_len = bce_COM_b09FwTextLen;
3201 fw.text = bce_COM_b09FwText;
3203 fw.data_addr = bce_COM_b09FwDataAddr;
3204 fw.data_len = bce_COM_b09FwDataLen;
3206 fw.data = bce_COM_b09FwData;
3208 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3209 fw.sbss_len = bce_COM_b09FwSbssLen;
3211 fw.sbss = bce_COM_b09FwSbss;
3213 fw.bss_addr = bce_COM_b09FwBssAddr;
3214 fw.bss_len = bce_COM_b09FwBssLen;
3216 fw.bss = bce_COM_b09FwBss;
3218 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3219 fw.rodata_len = bce_COM_b09FwRodataLen;
3220 fw.rodata_index = 0;
3221 fw.rodata = bce_COM_b09FwRodata;
3223 fw.ver_major = bce_COM_b06FwReleaseMajor;
3224 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3225 fw.ver_fix = bce_COM_b06FwReleaseFix;
3226 fw.start_addr = bce_COM_b06FwStartAddr;
3228 fw.text_addr = bce_COM_b06FwTextAddr;
3229 fw.text_len = bce_COM_b06FwTextLen;
3231 fw.text = bce_COM_b06FwText;
3233 fw.data_addr = bce_COM_b06FwDataAddr;
3234 fw.data_len = bce_COM_b06FwDataLen;
3236 fw.data = bce_COM_b06FwData;
3238 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3239 fw.sbss_len = bce_COM_b06FwSbssLen;
3241 fw.sbss = bce_COM_b06FwSbss;
3243 fw.bss_addr = bce_COM_b06FwBssAddr;
3244 fw.bss_len = bce_COM_b06FwBssLen;
3246 fw.bss = bce_COM_b06FwBss;
3248 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3249 fw.rodata_len = bce_COM_b06FwRodataLen;
3250 fw.rodata_index = 0;
3251 fw.rodata = bce_COM_b06FwRodata;
3254 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3255 bce_start_cpu(sc, &cpu_reg);
3258 /****************************************************************************/
3259 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
3261 /* Loads the firmware for each CPU and starts the CPU. */
3265 /****************************************************************************/
3267 bce_init_cpus(struct bce_softc *sc)
3269 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3270 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3271 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3272 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3273 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3274 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3275 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3277 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3278 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3279 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3280 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3283 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3284 sizeof(bce_rv2p_proc1), RV2P_PROC1);
3285 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3286 sizeof(bce_rv2p_proc2), RV2P_PROC2);
3289 bce_init_rxp_cpu(sc);
3290 bce_init_txp_cpu(sc);
3291 bce_init_tpat_cpu(sc);
3292 bce_init_com_cpu(sc);
3293 bce_init_cp_cpu(sc);
3296 /****************************************************************************/
3297 /* Initialize context memory. */
3299 /* Clears the memory associated with each Context ID (CID). */
3303 /****************************************************************************/
3305 bce_init_ctx(struct bce_softc *sc)
3307 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3308 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3309 /* DRC: Replace this constant value with a #define. */
3310 int i, retry_cnt = 10;
3314 * BCM5709 context memory may be cached
3315 * in host memory so prepare the host memory
3318 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3320 val |= (BCM_PAGE_BITS - 8) << 16;
3321 REG_WR(sc, BCE_CTX_COMMAND, val);
3323 /* Wait for mem init command to complete. */
3324 for (i = 0; i < retry_cnt; i++) {
3325 val = REG_RD(sc, BCE_CTX_COMMAND);
3326 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3330 if (i == retry_cnt) {
3331 device_printf(sc->bce_dev,
3332 "Context memory initialization failed!\n");
3336 for (i = 0; i < sc->ctx_pages; i++) {
3340 * Set the physical address of the context
3343 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3344 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3345 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3346 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3347 BCE_ADDR_HI(sc->ctx_paddr[i]));
3348 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3349 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3352 * Verify that the context memory write was successful.
3354 for (j = 0; j < retry_cnt; j++) {
3355 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3357 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3361 if (j == retry_cnt) {
3362 device_printf(sc->bce_dev,
3363 "Failed to initialize context page!\n");
3368 uint32_t vcid_addr, offset;
3371 * For the 5706/5708, context memory is local to
3372 * the controller, so initialize the controller
3376 vcid_addr = GET_CID_ADDR(96);
3378 vcid_addr -= PHY_CTX_SIZE;
3380 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3381 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3383 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3384 CTX_WR(sc, 0x00, offset, 0);
3386 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3387 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3393 /****************************************************************************/
3394 /* Fetch the permanent MAC address of the controller. */
3398 /****************************************************************************/
3400 bce_get_mac_addr(struct bce_softc *sc)
3402 uint32_t mac_lo = 0, mac_hi = 0;
3405 * The NetXtreme II bootcode populates various NIC
3406 * power-on and runtime configuration items in a
3407 * shared memory area. The factory configured MAC
3408 * address is available from both NVRAM and the
3409 * shared memory area so we'll read the value from
3410 * shared memory for speed.
3413 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
3414 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3416 if (mac_lo == 0 && mac_hi == 0) {
3417 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3419 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3420 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3421 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3422 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3423 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3424 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3428 /****************************************************************************/
3429 /* Program the MAC address. */
3433 /****************************************************************************/
3435 bce_set_mac_addr(struct bce_softc *sc)
3437 const uint8_t *mac_addr = sc->eaddr;
3440 val = (mac_addr[0] << 8) | mac_addr[1];
3441 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3443 val = (mac_addr[2] << 24) |
3444 (mac_addr[3] << 16) |
3445 (mac_addr[4] << 8) |
3447 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3450 /****************************************************************************/
3451 /* Stop the controller. */
3455 /****************************************************************************/
3457 bce_stop(struct bce_softc *sc)
3459 struct ifnet *ifp = &sc->arpcom.ac_if;
3462 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3464 callout_stop(&sc->bce_tick_callout);
3466 /* Disable the transmit/receive blocks. */
3467 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3468 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3471 bce_disable_intr(sc);
3473 ifp->if_flags &= ~IFF_RUNNING;
3474 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3475 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3476 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3479 /* Free the RX lists. */
3480 for (i = 0; i < sc->rx_ring_cnt; ++i)
3481 bce_free_rx_chain(&sc->rx_rings[i]);
3483 /* Free TX buffers. */
3484 for (i = 0; i < sc->tx_ring_cnt; ++i)
3485 bce_free_tx_chain(&sc->tx_rings[i]);
3488 sc->bce_coalchg_mask = 0;
3492 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3497 /* Wait for pending PCI transactions to complete. */
3498 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3499 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3500 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3501 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3502 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3503 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3507 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3508 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3509 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3510 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3511 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3514 /* Assume bootcode is running. */
3515 sc->bce_fw_timed_out = 0;
3516 sc->bce_drv_cardiac_arrest = 0;
3518 /* Give the firmware a chance to prepare for the reset. */
3519 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3521 if_printf(&sc->arpcom.ac_if,
3522 "Firmware is not ready for reset\n");
3526 /* Set a firmware reminder that this is a soft reset. */
3527 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3528 BCE_DRV_RESET_SIGNATURE_MAGIC);
3530 /* Dummy read to force the chip to complete all current transactions. */
3531 val = REG_RD(sc, BCE_MISC_ID);
3534 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3535 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3536 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3537 REG_RD(sc, BCE_MISC_COMMAND);
3540 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3541 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3543 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3545 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3546 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3547 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3548 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3550 /* Allow up to 30us for reset to complete. */
3551 for (i = 0; i < 10; i++) {
3552 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3553 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3554 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3559 /* Check that reset completed successfully. */
3560 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3561 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3562 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3567 /* Make sure byte swapping is properly configured. */
3568 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3569 if (val != 0x01020304) {
3570 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3574 /* Just completed a reset, assume that firmware is running again. */
3575 sc->bce_fw_timed_out = 0;
3576 sc->bce_drv_cardiac_arrest = 0;
3578 /* Wait for the firmware to finish its initialization. */
3579 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3581 if_printf(&sc->arpcom.ac_if,
3582 "Firmware did not complete initialization!\n");
3585 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3586 bce_setup_msix_table(sc);
3587 /* Prevent MSIX table reads and write from timing out */
3588 REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3589 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3596 bce_chipinit(struct bce_softc *sc)
3601 /* Make sure the interrupt is not active. */
3602 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3603 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3606 * Initialize DMA byte/word swapping, configure the number of DMA
3607 * channels and PCI clock compensation delay.
3609 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3610 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3611 #if BYTE_ORDER == BIG_ENDIAN
3612 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3614 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3615 DMA_READ_CHANS << 12 |
3616 DMA_WRITE_CHANS << 16;
3618 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3620 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3621 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3624 * This setting resolves a problem observed on certain Intel PCI
3625 * chipsets that cannot handle multiple outstanding DMA operations.
3626 * See errata E9_5706A1_65.
3628 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3629 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3630 !(sc->bce_flags & BCE_PCIX_FLAG))
3631 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3633 REG_WR(sc, BCE_DMA_CONFIG, val);
3635 /* Enable the RX_V2P and Context state machines before access. */
3636 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3637 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3638 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3639 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3641 /* Initialize context mapping and zero out the quick contexts. */
3642 rc = bce_init_ctx(sc);
3646 /* Initialize the on-boards CPUs */
3649 /* Enable management frames (NC-SI) to flow to the MCP. */
3650 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3651 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3652 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3653 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3656 /* Prepare NVRAM for access. */
3657 rc = bce_init_nvram(sc);
3661 /* Set the kernel bypass block size */
3662 val = REG_RD(sc, BCE_MQ_CONFIG);
3663 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3664 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3666 /* Enable bins used on the 5709/5716. */
3667 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3668 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3669 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3670 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3671 val |= BCE_MQ_CONFIG_HALT_DIS;
3674 REG_WR(sc, BCE_MQ_CONFIG, val);
3676 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3677 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3678 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3680 /* Set the page size and clear the RV2P processor stall bits. */
3681 val = (BCM_PAGE_BITS - 8) << 24;
3682 REG_WR(sc, BCE_RV2P_CONFIG, val);
3684 /* Configure page size. */
3685 val = REG_RD(sc, BCE_TBDR_CONFIG);
3686 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3687 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3688 REG_WR(sc, BCE_TBDR_CONFIG, val);
3690 /* Set the perfect match control register to default. */
3691 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3696 /****************************************************************************/
3697 /* Initialize the controller in preparation to send/receive traffic. */
3700 /* 0 for success, positive value for failure. */
3701 /****************************************************************************/
3703 bce_blockinit(struct bce_softc *sc)
3708 /* Load the hardware default MAC address. */
3709 bce_set_mac_addr(sc);
3711 /* Set the Ethernet backoff seed value */
3712 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3713 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3714 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3716 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3718 /* Set up link change interrupt generation. */
3719 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3721 /* Program the physical address of the status block. */
3722 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3723 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3725 /* Program the physical address of the statistics block. */
3726 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3727 BCE_ADDR_LO(sc->stats_block_paddr));
3728 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3729 BCE_ADDR_HI(sc->stats_block_paddr));
3731 /* Program various host coalescing parameters. */
3732 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3733 (sc->bce_tx_quick_cons_trip_int << 16) |
3734 sc->bce_tx_quick_cons_trip);
3735 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3736 (sc->bce_rx_quick_cons_trip_int << 16) |
3737 sc->bce_rx_quick_cons_trip);
3738 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3739 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3740 REG_WR(sc, BCE_HC_TX_TICKS,
3741 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3742 REG_WR(sc, BCE_HC_RX_TICKS,
3743 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3744 REG_WR(sc, BCE_HC_COM_TICKS,
3745 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3746 REG_WR(sc, BCE_HC_CMD_TICKS,
3747 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3748 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3749 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3751 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3752 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3754 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3755 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3756 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3758 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3759 if_printf(&sc->arpcom.ac_if,
3762 if_printf(&sc->arpcom.ac_if,
3763 "using oneshot MSI\n");
3766 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3767 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3768 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3770 REG_WR(sc, BCE_HC_CONFIG, val);
3772 for (i = 1; i < sc->rx_ring_cnt; ++i) {
3775 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3776 KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3779 BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3780 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3781 BCE_HC_SB_CONFIG_1_ONE_SHOT);
3783 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3784 (sc->bce_tx_quick_cons_trip_int << 16) |
3785 sc->bce_tx_quick_cons_trip);
3786 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3787 (sc->bce_rx_quick_cons_trip_int << 16) |
3788 sc->bce_rx_quick_cons_trip);
3789 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3790 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3791 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3792 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3795 /* Clear the internal statistics counters. */
3796 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3798 /* Verify that bootcode is running. */
3799 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3801 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3802 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3803 if_printf(&sc->arpcom.ac_if,
3804 "Bootcode not running! Found: 0x%08X, "
3805 "Expected: 08%08X\n",
3806 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3807 BCE_DEV_INFO_SIGNATURE_MAGIC);
3812 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3813 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3814 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3815 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3816 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3819 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3820 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3822 /* Enable link state change interrupt generation. */
3823 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3825 /* Enable the RXP. */
3826 bce_start_rxp_cpu(sc);
3828 /* Disable management frames (NC-SI) from flowing to the MCP. */
3829 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3830 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3831 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3832 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3835 /* Enable all remaining blocks in the MAC. */
3836 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3837 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3839 BCE_MISC_ENABLE_DEFAULT_XI);
3841 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3843 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3846 /* Save the current host coalescing block settings. */
3847 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3852 /****************************************************************************/
3853 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3855 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3856 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3860 /* 0 for success, positive value for failure. */
3861 /****************************************************************************/
3863 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3864 uint32_t *prod_bseq, int init)
3866 struct bce_rx_buf *rx_buf;
3868 bus_dma_segment_t seg;
3872 /* This is a new mbuf allocation. */
3873 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3877 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3879 /* Map the mbuf cluster into device memory. */
3880 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3881 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3885 if_printf(&rxr->sc->arpcom.ac_if,
3886 "Error mapping mbuf into RX chain!\n");
3891 rx_buf = &rxr->rx_bufs[chain_prod];
3892 if (rx_buf->rx_mbuf_ptr != NULL)
3893 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3895 map = rx_buf->rx_mbuf_map;
3896 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3897 rxr->rx_mbuf_tmpmap = map;
3899 /* Save the mbuf and update our counter. */
3900 rx_buf->rx_mbuf_ptr = m_new;
3901 rx_buf->rx_mbuf_paddr = seg.ds_addr;
3904 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3910 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3911 uint32_t *prod_bseq)
3913 const struct bce_rx_buf *rx_buf;
3918 rx_buf = &rxr->rx_bufs[chain_prod];
3919 paddr = rx_buf->rx_mbuf_paddr;
3920 len = rx_buf->rx_mbuf_ptr->m_len;
3922 /* Setup the rx_bd for the first segment. */
3923 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3925 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3926 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3927 rxbd->rx_bd_len = htole32(len);
3928 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3931 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3934 /****************************************************************************/
3935 /* Initialize the TX context memory. */
3939 /****************************************************************************/
3941 bce_init_tx_context(struct bce_tx_ring *txr)
3945 /* Initialize the context ID for an L2 TX chain. */
3946 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3947 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3948 /* Set the CID type to support an L2 connection. */
3949 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3950 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3951 BCE_L2CTX_TX_TYPE_XI, val);
3952 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3953 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3954 BCE_L2CTX_TX_CMD_TYPE_XI, val);
3956 /* Point the hardware to the first page in the chain. */
3957 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3958 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3959 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3960 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3961 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3962 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3964 /* Set the CID type to support an L2 connection. */
3965 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3966 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3967 BCE_L2CTX_TX_TYPE, val);
3968 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3969 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3970 BCE_L2CTX_TX_CMD_TYPE, val);
3972 /* Point the hardware to the first page in the chain. */
3973 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3974 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3975 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3976 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3977 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3978 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3982 /****************************************************************************/
3983 /* Allocate memory and initialize the TX data structures. */
3986 /* 0 for success, positive value for failure. */
3987 /****************************************************************************/
3989 bce_init_tx_chain(struct bce_tx_ring *txr)
3994 /* Set the initial TX producer/consumer indices. */
3997 txr->tx_prod_bseq = 0;
3998 txr->used_tx_bd = 0;
3999 txr->max_tx_bd = USABLE_TX_BD(txr);
4002 * The NetXtreme II supports a linked-list structre called
4003 * a Buffer Descriptor Chain (or BD chain). A BD chain
4004 * consists of a series of 1 or more chain pages, each of which
4005 * consists of a fixed number of BD entries.
4006 * The last BD entry on each page is a pointer to the next page
4007 * in the chain, and the last pointer in the BD chain
4008 * points back to the beginning of the chain.
4011 /* Set the TX next pointer chain entries. */
4012 for (i = 0; i < txr->tx_pages; i++) {
4015 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4017 /* Check if we've reached the last page. */
4018 if (i == (txr->tx_pages - 1))
4023 txbd->tx_bd_haddr_hi =
4024 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4025 txbd->tx_bd_haddr_lo =
4026 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4028 bce_init_tx_context(txr);
4033 /****************************************************************************/
4034 /* Free memory and clear the TX data structures. */
4038 /****************************************************************************/
4040 bce_free_tx_chain(struct bce_tx_ring *txr)
4044 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4045 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4046 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4048 if (tx_buf->tx_mbuf_ptr != NULL) {
4049 bus_dmamap_unload(txr->tx_mbuf_tag,
4050 tx_buf->tx_mbuf_map);
4051 m_freem(tx_buf->tx_mbuf_ptr);
4052 tx_buf->tx_mbuf_ptr = NULL;
4056 /* Clear each TX chain page. */
4057 for (i = 0; i < txr->tx_pages; i++)
4058 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4059 txr->used_tx_bd = 0;
4062 /****************************************************************************/
4063 /* Initialize the RX context memory. */
4067 /****************************************************************************/
4069 bce_init_rx_context(struct bce_rx_ring *rxr)
4073 /* Initialize the context ID for an L2 RX chain. */
4074 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4075 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4078 * Set the level for generating pause frames
4079 * when the number of available rx_bd's gets
4080 * too low (the low watermark) and the level
4081 * when pause frames can be stopped (the high
4084 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4085 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4086 uint32_t lo_water, hi_water;
4088 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4089 hi_water = USABLE_RX_BD(rxr) / 4;
4091 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4092 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4096 else if (hi_water == 0)
4099 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4102 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4103 BCE_L2CTX_RX_CTX_TYPE, val);
4105 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4106 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4107 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4108 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4109 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4112 /* Point the hardware to the first page in the chain. */
4113 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4114 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4115 BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4116 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4117 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4118 BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4121 /****************************************************************************/
4122 /* Allocate memory and initialize the RX data structures. */
4125 /* 0 for success, positive value for failure. */
4126 /****************************************************************************/
4128 bce_init_rx_chain(struct bce_rx_ring *rxr)
4132 uint16_t prod, chain_prod;
4135 /* Initialize the RX producer and consumer indices. */
4138 rxr->rx_prod_bseq = 0;
4139 rxr->free_rx_bd = USABLE_RX_BD(rxr);
4140 rxr->max_rx_bd = USABLE_RX_BD(rxr);
4142 /* Clear cache status index */
4143 rxr->last_status_idx = 0;
4145 /* Initialize the RX next pointer chain entries. */
4146 for (i = 0; i < rxr->rx_pages; i++) {
4149 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4151 /* Check if we've reached the last page. */
4152 if (i == (rxr->rx_pages - 1))
4157 /* Setup the chain page pointers. */
4158 rxbd->rx_bd_haddr_hi =
4159 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4160 rxbd->rx_bd_haddr_lo =
4161 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4164 /* Allocate mbuf clusters for the rx_bd chain. */
4165 prod = prod_bseq = 0;
4166 while (prod < TOTAL_RX_BD(rxr)) {
4167 chain_prod = RX_CHAIN_IDX(rxr, prod);
4168 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4169 if_printf(&rxr->sc->arpcom.ac_if,
4170 "Error filling RX chain: rx_bd[0x%04X]!\n",
4175 prod = NEXT_RX_BD(prod);
4178 /* Save the RX chain producer index. */
4179 rxr->rx_prod = prod;
4180 rxr->rx_prod_bseq = prod_bseq;
4182 /* Tell the chip about the waiting rx_bd's. */
4183 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4185 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4188 bce_init_rx_context(rxr);
4193 /****************************************************************************/
4194 /* Free memory and clear the RX data structures. */
4198 /****************************************************************************/
4200 bce_free_rx_chain(struct bce_rx_ring *rxr)
4204 /* Free any mbufs still in the RX mbuf chain. */
4205 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4206 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4208 if (rx_buf->rx_mbuf_ptr != NULL) {
4209 bus_dmamap_unload(rxr->rx_mbuf_tag,
4210 rx_buf->rx_mbuf_map);
4211 m_freem(rx_buf->rx_mbuf_ptr);
4212 rx_buf->rx_mbuf_ptr = NULL;
4216 /* Clear each RX chain page. */
4217 for (i = 0; i < rxr->rx_pages; i++)
4218 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4221 /****************************************************************************/
4222 /* Set media options. */
4225 /* 0 for success, positive value for failure. */
4226 /****************************************************************************/
4228 bce_ifmedia_upd(struct ifnet *ifp)
4230 struct bce_softc *sc = ifp->if_softc;
4231 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4235 * 'mii' will be NULL, when this function is called on following
4236 * code path: bce_attach() -> bce_mgmt_init()
4239 /* Make sure the MII bus has been enumerated. */
4241 if (mii->mii_instance) {
4242 struct mii_softc *miisc;
4244 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4245 mii_phy_reset(miisc);
4247 error = mii_mediachg(mii);
4252 /****************************************************************************/
4253 /* Reports current media status. */
4257 /****************************************************************************/
4259 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4261 struct bce_softc *sc = ifp->if_softc;
4262 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4265 ifmr->ifm_active = mii->mii_media_active;
4266 ifmr->ifm_status = mii->mii_media_status;
4269 /****************************************************************************/
4270 /* Handles PHY generated interrupt events. */
4274 /****************************************************************************/
4276 bce_phy_intr(struct bce_softc *sc)
4278 uint32_t new_link_state, old_link_state;
4279 struct ifnet *ifp = &sc->arpcom.ac_if;
4281 ASSERT_SERIALIZED(&sc->main_serialize);
4283 new_link_state = sc->status_block->status_attn_bits &
4284 STATUS_ATTN_BITS_LINK_STATE;
4285 old_link_state = sc->status_block->status_attn_bits_ack &
4286 STATUS_ATTN_BITS_LINK_STATE;
4288 /* Handle any changes if the link state has changed. */
4289 if (new_link_state != old_link_state) { /* XXX redundant? */
4290 /* Update the status_attn_bits_ack field in the status block. */
4291 if (new_link_state) {
4292 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4293 STATUS_ATTN_BITS_LINK_STATE);
4295 if_printf(ifp, "Link is now UP.\n");
4297 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4298 STATUS_ATTN_BITS_LINK_STATE);
4300 if_printf(ifp, "Link is now DOWN.\n");
4304 * Assume link is down and allow tick routine to
4305 * update the state based on the actual media state.
4308 callout_stop(&sc->bce_tick_callout);
4309 bce_tick_serialized(sc);
4312 /* Acknowledge the link change interrupt. */
4313 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4316 /****************************************************************************/
4317 /* Reads the receive consumer value from the status block (skipping over */
4318 /* chain page pointer if necessary). */
4322 /****************************************************************************/
4323 static __inline uint16_t
4324 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4326 uint16_t hw_cons = *rxr->rx_hw_cons;
4328 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4333 /****************************************************************************/
4334 /* Handles received frame interrupt events. */
4338 /****************************************************************************/
4340 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4342 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4343 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4344 uint32_t sw_prod_bseq;
4346 ASSERT_SERIALIZED(&rxr->rx_serialize);
4348 /* Get working copies of the driver's view of the RX indices. */
4349 sw_cons = rxr->rx_cons;
4350 sw_prod = rxr->rx_prod;
4351 sw_prod_bseq = rxr->rx_prod_bseq;
4353 /* Scan through the receive chain as long as there is work to do. */
4354 while (sw_cons != hw_cons) {
4355 struct pktinfo pi0, *pi = NULL;
4356 struct bce_rx_buf *rx_buf;
4357 struct mbuf *m = NULL;
4358 struct l2_fhdr *l2fhdr = NULL;
4360 uint32_t status = 0;
4362 #ifdef IFPOLL_ENABLE
4363 if (count >= 0 && count-- == 0)
4368 * Convert the producer/consumer indices
4369 * to an actual rx_bd index.
4371 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4372 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4373 rx_buf = &rxr->rx_bufs[sw_chain_cons];
4377 /* The mbuf is stored with the last rx_bd entry of a packet. */
4378 if (rx_buf->rx_mbuf_ptr != NULL) {
4379 if (sw_chain_cons != sw_chain_prod) {
4380 if_printf(ifp, "RX cons(%d) != prod(%d), "
4381 "drop!\n", sw_chain_cons, sw_chain_prod);
4382 IFNET_STAT_INC(ifp, ierrors, 1);
4384 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4387 goto bce_rx_int_next_rx;
4390 /* Unmap the mbuf from DMA space. */
4391 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4392 BUS_DMASYNC_POSTREAD);
4394 /* Save the mbuf from the driver's chain. */
4395 m = rx_buf->rx_mbuf_ptr;
4398 * Frames received on the NetXteme II are prepended
4399 * with an l2_fhdr structure which provides status
4400 * information about the received frame (including
4401 * VLAN tags and checksum info). The frames are also
4402 * automatically adjusted to align the IP header
4403 * (i.e. two null bytes are inserted before the
4404 * Ethernet header). As a result the data DMA'd by
4405 * the controller into the mbuf is as follows:
4407 * +---------+-----+---------------------+-----+
4408 * | l2_fhdr | pad | packet data | FCS |
4409 * +---------+-----+---------------------+-----+
4411 * The l2_fhdr needs to be checked and skipped and the
4412 * FCS needs to be stripped before sending the packet
4415 l2fhdr = mtod(m, struct l2_fhdr *);
4417 len = l2fhdr->l2_fhdr_pkt_len;
4418 status = l2fhdr->l2_fhdr_status;
4420 len -= ETHER_CRC_LEN;
4422 /* Check the received frame for errors. */
4423 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4424 L2_FHDR_ERRORS_PHY_DECODE |
4425 L2_FHDR_ERRORS_ALIGNMENT |
4426 L2_FHDR_ERRORS_TOO_SHORT |
4427 L2_FHDR_ERRORS_GIANT_FRAME)) {
4428 IFNET_STAT_INC(ifp, ierrors, 1);
4430 /* Reuse the mbuf for a new frame. */
4431 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4434 goto bce_rx_int_next_rx;
4438 * Get a new mbuf for the rx_bd. If no new
4439 * mbufs are available then reuse the current mbuf,
4440 * log an ierror on the interface, and generate
4441 * an error in the system log.
4443 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4444 &sw_prod_bseq, 0)) {
4445 IFNET_STAT_INC(ifp, ierrors, 1);
4447 /* Try and reuse the exisitng mbuf. */
4448 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4451 goto bce_rx_int_next_rx;
4455 * Skip over the l2_fhdr when passing
4456 * the data up the stack.
4458 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4460 m->m_pkthdr.len = m->m_len = len;
4461 m->m_pkthdr.rcvif = ifp;
4463 /* Validate the checksum if offload enabled. */
4464 if (ifp->if_capenable & IFCAP_RXCSUM) {
4465 /* Check for an IP datagram. */
4466 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4467 m->m_pkthdr.csum_flags |=
4470 /* Check if the IP checksum is valid. */
4471 if ((l2fhdr->l2_fhdr_ip_xsum ^
4473 m->m_pkthdr.csum_flags |=
4478 /* Check for a valid TCP/UDP frame. */
4479 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4480 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4482 /* Check for a good TCP/UDP checksum. */
4484 (L2_FHDR_ERRORS_TCP_XSUM |
4485 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4486 m->m_pkthdr.csum_data =
4487 l2fhdr->l2_fhdr_tcp_udp_xsum;
4488 m->m_pkthdr.csum_flags |=
4494 if (ifp->if_capenable & IFCAP_RSS) {
4495 pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4497 (status & L2_FHDR_STATUS_RSS_HASH)) {
4498 m->m_flags |= M_HASH;
4500 toeplitz_hash(l2fhdr->l2_fhdr_hash);
4504 IFNET_STAT_INC(ifp, ipackets, 1);
4506 sw_prod = NEXT_RX_BD(sw_prod);
4509 sw_cons = NEXT_RX_BD(sw_cons);
4511 /* If we have a packet, pass it up the stack */
4513 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4514 m->m_flags |= M_VLANTAG;
4515 m->m_pkthdr.ether_vlantag =
4516 l2fhdr->l2_fhdr_vlan_tag;
4518 ether_input_pkt(ifp, m, pi);
4519 #ifdef BCE_RSS_DEBUG
4525 rxr->rx_cons = sw_cons;
4526 rxr->rx_prod = sw_prod;
4527 rxr->rx_prod_bseq = sw_prod_bseq;
4529 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4531 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4535 /****************************************************************************/
4536 /* Reads the transmit consumer value from the status block (skipping over */
4537 /* chain page pointer if necessary). */
4541 /****************************************************************************/
4542 static __inline uint16_t
4543 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4545 uint16_t hw_cons = *txr->tx_hw_cons;
4547 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4552 /****************************************************************************/
4553 /* Handles transmit completion interrupt events. */
4557 /****************************************************************************/
4559 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4561 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4562 uint16_t sw_tx_cons, sw_tx_chain_cons;
4564 ASSERT_SERIALIZED(&txr->tx_serialize);
4566 /* Get the hardware's view of the TX consumer index. */
4567 sw_tx_cons = txr->tx_cons;
4569 /* Cycle through any completed TX chain page entries. */
4570 while (sw_tx_cons != hw_tx_cons) {
4571 struct bce_tx_buf *tx_buf;
4573 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4574 tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4577 * Free the associated mbuf. Remember
4578 * that only the last tx_bd of a packet
4579 * has an mbuf pointer and DMA map.
4581 if (tx_buf->tx_mbuf_ptr != NULL) {
4582 /* Unmap the mbuf. */
4583 bus_dmamap_unload(txr->tx_mbuf_tag,
4584 tx_buf->tx_mbuf_map);
4586 /* Free the mbuf. */
4587 m_freem(tx_buf->tx_mbuf_ptr);
4588 tx_buf->tx_mbuf_ptr = NULL;
4590 IFNET_STAT_INC(ifp, opackets, 1);
4591 #ifdef BCE_TSS_DEBUG
4597 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4600 if (txr->used_tx_bd == 0) {
4601 /* Clear the TX timeout timer. */
4602 txr->tx_watchdog.wd_timer = 0;
4605 /* Clear the tx hardware queue full flag. */
4606 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4607 ifsq_clr_oactive(txr->ifsq);
4608 txr->tx_cons = sw_tx_cons;
4611 /****************************************************************************/
4612 /* Disables interrupt generation. */
4616 /****************************************************************************/
4618 bce_disable_intr(struct bce_softc *sc)
4622 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4623 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4624 (sc->rx_rings[i].idx << 24) |
4625 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4627 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4629 callout_stop(&sc->bce_ckmsi_callout);
4630 sc->bce_msi_maylose = FALSE;
4631 sc->bce_check_rx_cons = 0;
4632 sc->bce_check_tx_cons = 0;
4633 sc->bce_check_status_idx = 0xffff;
4635 for (i = 0; i < sc->rx_ring_cnt; ++i)
4636 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4639 /****************************************************************************/
4640 /* Enables interrupt generation. */
4644 /****************************************************************************/
4646 bce_enable_intr(struct bce_softc *sc)
4650 for (i = 0; i < sc->rx_ring_cnt; ++i)
4651 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4653 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4654 struct bce_rx_ring *rxr = &sc->rx_rings[i];
4656 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4657 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4658 BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4659 rxr->last_status_idx);
4660 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4661 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4662 rxr->last_status_idx);
4664 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4666 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4667 sc->bce_msi_maylose = FALSE;
4668 sc->bce_check_rx_cons = 0;
4669 sc->bce_check_tx_cons = 0;
4670 sc->bce_check_status_idx = 0xffff;
4673 if_printf(&sc->arpcom.ac_if, "check msi\n");
4675 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4676 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4680 /****************************************************************************/
4681 /* Reenables interrupt generation during interrupt handling. */
4685 /****************************************************************************/
4687 bce_reenable_intr(struct bce_rx_ring *rxr)
4689 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4690 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4693 /****************************************************************************/
4694 /* Handles controller initialization. */
4698 /****************************************************************************/
4702 struct bce_softc *sc = xsc;
4703 struct ifnet *ifp = &sc->arpcom.ac_if;
4708 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4710 /* Check if the driver is still running and bail out if it is. */
4711 if (ifp->if_flags & IFF_RUNNING)
4716 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4718 if_printf(ifp, "Controller reset failed!\n");
4722 error = bce_chipinit(sc);
4724 if_printf(ifp, "Controller initialization failed!\n");
4728 error = bce_blockinit(sc);
4730 if_printf(ifp, "Block initialization failed!\n");
4734 /* Load our MAC address. */
4735 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4736 bce_set_mac_addr(sc);
4738 /* Calculate and program the Ethernet MTU size. */
4739 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4742 * Program the mtu, enabling jumbo frame
4743 * support if necessary. Also set the mbuf
4744 * allocation count for RX frames.
4746 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4748 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4749 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4750 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4752 panic("jumbo buffer is not supported yet");
4755 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4758 /* Program appropriate promiscuous/multicast filtering. */
4759 bce_set_rx_mode(sc);
4762 * Init RX buffer descriptor chain.
4764 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4765 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4767 for (i = 0; i < sc->rx_ring_cnt; ++i)
4768 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */
4770 if (sc->rx_ring_cnt > 1)
4774 * Init TX buffer descriptor chain.
4776 REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4778 for (i = 0; i < sc->tx_ring_cnt; ++i)
4779 bce_init_tx_chain(&sc->tx_rings[i]);
4781 if (sc->tx_ring_cnt > 1) {
4782 REG_WR(sc, BCE_TSCH_TSS_CFG,
4783 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4787 #ifdef IFPOLL_ENABLE
4788 if (ifp->if_flags & IFF_NPOLLING)
4793 /* Disable interrupts if we are polling. */
4794 bce_disable_intr(sc);
4796 /* Change coalesce parameters */
4797 bce_npoll_coal_change(sc);
4799 /* Enable host interrupts. */
4800 bce_enable_intr(sc);
4802 bce_set_timer_cpuid(sc, polling);
4804 bce_ifmedia_upd(ifp);
4806 ifp->if_flags |= IFF_RUNNING;
4807 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4808 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4809 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4812 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4813 sc->bce_timer_cpuid);
4819 /****************************************************************************/
4820 /* Initialize the controller just enough so that any management firmware */
4821 /* running on the device will continue to operate corectly. */
4825 /****************************************************************************/
4827 bce_mgmt_init(struct bce_softc *sc)
4829 struct ifnet *ifp = &sc->arpcom.ac_if;
4831 /* Bail out if management firmware is not running. */
4832 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4835 /* Enable all critical blocks in the MAC. */
4836 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4837 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4838 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4839 BCE_MISC_ENABLE_DEFAULT_XI);
4841 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4843 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4846 bce_ifmedia_upd(ifp);
4849 /****************************************************************************/
4850 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4851 /* memory visible to the controller. */
4854 /* 0 for success, positive value for failure. */
4855 /****************************************************************************/
4857 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4859 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4860 bus_dmamap_t map, tmp_map;
4861 struct mbuf *m0 = *m_head;
4862 struct tx_bd *txbd = NULL;
4863 uint16_t vlan_tag = 0, flags = 0, mss = 0;
4864 uint16_t chain_prod, chain_prod_start, prod;
4866 int i, error, maxsegs, nsegs;
4868 /* Transfer any checksum offload flags to the bd. */
4869 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4870 error = bce_tso_setup(txr, m_head, &flags, &mss);
4874 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4875 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4876 flags |= TX_BD_FLAGS_IP_CKSUM;
4877 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4878 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4881 /* Transfer any VLAN tags to the bd. */
4882 if (m0->m_flags & M_VLANTAG) {
4883 flags |= TX_BD_FLAGS_VLAN_TAG;
4884 vlan_tag = m0->m_pkthdr.ether_vlantag;
4887 prod = txr->tx_prod;
4888 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4890 /* Map the mbuf into DMAable memory. */
4891 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4893 maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4894 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4895 ("not enough segments %d", maxsegs));
4896 if (maxsegs > BCE_MAX_SEGMENTS)
4897 maxsegs = BCE_MAX_SEGMENTS;
4899 /* Map the mbuf into our DMA address space. */
4900 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4901 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4904 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4906 *nsegs_used += nsegs;
4911 /* prod points to an empty tx_bd at this point. */
4912 prod_bseq = txr->tx_prod_bseq;
4915 * Cycle through each mbuf segment that makes up
4916 * the outgoing frame, gathering the mapping info
4917 * for that segment and creating a tx_bd to for
4920 for (i = 0; i < nsegs; i++) {
4921 chain_prod = TX_CHAIN_IDX(txr, prod);
4923 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4925 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4926 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4927 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4928 htole16(segs[i].ds_len);
4929 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4930 txbd->tx_bd_flags = htole16(flags);
4932 prod_bseq += segs[i].ds_len;
4934 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4935 prod = NEXT_TX_BD(prod);
4938 /* Set the END flag on the last TX buffer descriptor. */
4939 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4942 * Ensure that the mbuf pointer for this transmission
4943 * is placed at the array index of the last
4944 * descriptor in this chain. This is done
4945 * because a single map is used for all
4946 * segments of the mbuf and we don't want to
4947 * unload the map before all of the segments
4950 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4952 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4953 txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4954 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4956 txr->used_tx_bd += nsegs;
4958 /* prod points to the next free tx_bd at this point. */
4959 txr->tx_prod = prod;
4960 txr->tx_prod_bseq = prod_bseq;
4970 bce_xmit(struct bce_tx_ring *txr)
4972 /* Start the transmit. */
4973 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4975 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4979 /****************************************************************************/
4980 /* Main transmit routine when called from another routine with a lock. */
4984 /****************************************************************************/
4986 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4988 struct bce_softc *sc = ifp->if_softc;
4989 struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4992 KKASSERT(txr->ifsq == ifsq);
4993 ASSERT_SERIALIZED(&txr->tx_serialize);
4995 /* If there's no link or the transmit queue is empty then just exit. */
4996 if (!sc->bce_link) {
5001 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
5005 struct mbuf *m_head;
5008 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
5011 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
5012 ifsq_set_oactive(ifsq);
5016 /* Check for any frames to send. */
5017 m_head = ifsq_dequeue(ifsq, NULL);
5022 * Pack the data into the transmit ring. If we
5023 * don't have room, place the mbuf back at the
5024 * head of the queue and set the OACTIVE flag
5025 * to wait for the NIC to drain the chain.
5027 if (bce_encap(txr, &m_head, &count)) {
5028 IFNET_STAT_INC(ifp, oerrors, 1);
5029 if (txr->used_tx_bd == 0) {
5032 ifsq_set_oactive(ifsq);
5037 if (count >= txr->tx_wreg) {
5042 /* Send a copy of the frame to any BPF listeners. */
5043 ETHER_BPF_MTAP(ifp, m_head);
5045 /* Set the tx timeout. */
5046 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5052 /****************************************************************************/
5053 /* Handles any IOCTL calls from the operating system. */
5056 /* 0 for success, positive value for failure. */
5057 /****************************************************************************/
5059 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5061 struct bce_softc *sc = ifp->if_softc;
5062 struct ifreq *ifr = (struct ifreq *)data;
5063 struct mii_data *mii;
5064 int mask, error = 0;
5066 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5070 /* Check that the MTU setting is supported. */
5071 if (ifr->ifr_mtu < BCE_MIN_MTU ||
5073 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5075 ifr->ifr_mtu > ETHERMTU
5082 ifp->if_mtu = ifr->ifr_mtu;
5083 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5088 if (ifp->if_flags & IFF_UP) {
5089 if (ifp->if_flags & IFF_RUNNING) {
5090 mask = ifp->if_flags ^ sc->bce_if_flags;
5092 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5093 bce_set_rx_mode(sc);
5097 } else if (ifp->if_flags & IFF_RUNNING) {
5100 /* If MFW is running, restart the controller a bit. */
5101 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5102 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5107 sc->bce_if_flags = ifp->if_flags;
5112 if (ifp->if_flags & IFF_RUNNING)
5113 bce_set_rx_mode(sc);
5118 mii = device_get_softc(sc->bce_miibus);
5119 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5123 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5124 if (mask & IFCAP_HWCSUM) {
5125 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5126 if (ifp->if_capenable & IFCAP_TXCSUM)
5127 ifp->if_hwassist |= BCE_CSUM_FEATURES;
5129 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5131 if (mask & IFCAP_TSO) {
5132 ifp->if_capenable ^= IFCAP_TSO;
5133 if (ifp->if_capenable & IFCAP_TSO)
5134 ifp->if_hwassist |= CSUM_TSO;
5136 ifp->if_hwassist &= ~CSUM_TSO;
5138 if (mask & IFCAP_RSS)
5139 ifp->if_capenable ^= IFCAP_RSS;
5143 error = ether_ioctl(ifp, command, data);
5149 /****************************************************************************/
5150 /* Transmit timeout handler. */
5154 /****************************************************************************/
5156 bce_watchdog(struct ifaltq_subque *ifsq)
5158 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5159 struct bce_softc *sc = ifp->if_softc;
5162 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5165 * If we are in this routine because of pause frames, then
5166 * don't reset the hardware.
5168 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5171 if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5173 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5176 IFNET_STAT_INC(ifp, oerrors, 1);
5178 for (i = 0; i < sc->tx_ring_cnt; ++i)
5179 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5182 #ifdef IFPOLL_ENABLE
5185 bce_npoll_status(struct ifnet *ifp)
5187 struct bce_softc *sc = ifp->if_softc;
5188 struct status_block *sblk = sc->status_block;
5189 uint32_t status_attn_bits;
5191 ASSERT_SERIALIZED(&sc->main_serialize);
5193 status_attn_bits = sblk->status_attn_bits;
5195 /* Was it a link change interrupt? */
5196 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5197 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5201 * Clear any transient status updates during link state change.
5203 REG_WR(sc, BCE_HC_COMMAND,
5204 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5205 REG_RD(sc, BCE_HC_COMMAND);
5209 * If any other attention is asserted then the chip is toast.
5211 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5212 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5213 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5214 sblk->status_attn_bits);
5215 bce_serialize_skipmain(sc);
5217 bce_deserialize_skipmain(sc);
5222 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5224 struct bce_rx_ring *rxr = arg;
5225 uint16_t hw_rx_cons;
5227 ASSERT_SERIALIZED(&rxr->rx_serialize);
5230 * Save the status block index value for use when enabling
5233 rxr->last_status_idx = *rxr->hw_status_idx;
5235 /* Make sure status index is extracted before RX/TX cons */
5238 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5240 /* Check for any completed RX frames. */
5241 if (hw_rx_cons != rxr->rx_cons)
5242 bce_rx_intr(rxr, count, hw_rx_cons);
5246 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5248 struct bce_rx_ring *rxr = arg;
5250 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5251 bce_npoll_rx(ifp, rxr, count);
5253 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5254 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5255 rxr->sc->rx_ring_cnt2));
5257 /* Last ring carries packets whose masked hash is 0 */
5258 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5260 lwkt_serialize_enter(&rxr->rx_serialize);
5261 bce_npoll_rx(ifp, rxr, count);
5262 lwkt_serialize_exit(&rxr->rx_serialize);
5266 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5268 struct bce_tx_ring *txr = arg;
5269 uint16_t hw_tx_cons;
5271 ASSERT_SERIALIZED(&txr->tx_serialize);
5273 hw_tx_cons = bce_get_hw_tx_cons(txr);
5275 /* Check for any completed TX frames. */
5276 if (hw_tx_cons != txr->tx_cons) {
5277 bce_tx_intr(txr, hw_tx_cons);
5278 if (!ifsq_is_empty(txr->ifsq))
5279 ifsq_devstart(txr->ifsq);
5284 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5286 struct bce_softc *sc = ifp->if_softc;
5289 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5292 info->ifpi_status.status_func = bce_npoll_status;
5293 info->ifpi_status.serializer = &sc->main_serialize;
5295 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5296 struct bce_tx_ring *txr = &sc->tx_rings[i];
5297 int idx = i + sc->npoll_ofs;
5299 KKASSERT(idx < ncpus2);
5300 info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5301 info->ifpi_tx[idx].arg = txr;
5302 info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5303 ifsq_set_cpuid(txr->ifsq, idx);
5306 for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5307 struct bce_rx_ring *rxr = &sc->rx_rings[i];
5308 int idx = i + sc->npoll_ofs;
5310 KKASSERT(idx < ncpus2);
5311 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5313 * If RSS is enabled, the packets whose
5314 * masked hash are 0 are queued to the
5315 * last RX ring; piggyback the last RX
5316 * ring's processing in the first RX
5317 * polling handler. (see also: comment
5318 * in bce_setup_ring_cnt())
5321 if_printf(ifp, "npoll pack last "
5322 "RX ring on cpu%d\n", idx);
5324 info->ifpi_rx[idx].poll_func =
5327 info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5329 info->ifpi_rx[idx].arg = rxr;
5330 info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5333 if (ifp->if_flags & IFF_RUNNING) {
5334 bce_set_timer_cpuid(sc, TRUE);
5335 bce_disable_intr(sc);
5336 bce_npoll_coal_change(sc);
5339 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5340 ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5341 sc->bce_msix[i].msix_cpuid);
5344 if (ifp->if_flags & IFF_RUNNING) {
5345 bce_set_timer_cpuid(sc, FALSE);
5346 bce_enable_intr(sc);
5348 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5349 BCE_COALMASK_RX_BDS_INT;
5350 bce_coal_change(sc);
5355 #endif /* IFPOLL_ENABLE */
5358 * Interrupt handler.
5360 /****************************************************************************/
5361 /* Main interrupt entry point. Verifies that the controller generated the */
5362 /* interrupt and then calls a separate routine for handle the various */
5363 /* interrupt causes (PHY, TX, RX). */
5366 /* 0 for success, positive value for failure. */
5367 /****************************************************************************/
5369 bce_intr(struct bce_softc *sc)
5371 struct ifnet *ifp = &sc->arpcom.ac_if;
5372 struct status_block *sblk;
5373 uint16_t hw_rx_cons, hw_tx_cons;
5374 uint32_t status_attn_bits;
5375 struct bce_tx_ring *txr = &sc->tx_rings[0];
5376 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5378 ASSERT_SERIALIZED(&sc->main_serialize);
5380 sblk = sc->status_block;
5383 * Save the status block index value for use during
5384 * the next interrupt.
5386 rxr->last_status_idx = *rxr->hw_status_idx;
5388 /* Make sure status index is extracted before RX/TX cons */
5391 /* Check if the hardware has finished any work. */
5392 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5393 hw_tx_cons = bce_get_hw_tx_cons(txr);
5395 status_attn_bits = sblk->status_attn_bits;
5397 /* Was it a link change interrupt? */
5398 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5399 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5403 * Clear any transient status updates during link state
5406 REG_WR(sc, BCE_HC_COMMAND,
5407 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5408 REG_RD(sc, BCE_HC_COMMAND);
5412 * If any other attention is asserted then
5413 * the chip is toast.
5415 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5416 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5417 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5418 sblk->status_attn_bits);
5419 bce_serialize_skipmain(sc);
5421 bce_deserialize_skipmain(sc);
5425 /* Check for any completed RX frames. */
5426 lwkt_serialize_enter(&rxr->rx_serialize);
5427 if (hw_rx_cons != rxr->rx_cons)
5428 bce_rx_intr(rxr, -1, hw_rx_cons);
5429 lwkt_serialize_exit(&rxr->rx_serialize);
5431 /* Check for any completed TX frames. */
5432 lwkt_serialize_enter(&txr->tx_serialize);
5433 if (hw_tx_cons != txr->tx_cons) {
5434 bce_tx_intr(txr, hw_tx_cons);
5435 if (!ifsq_is_empty(txr->ifsq))
5436 ifsq_devstart(txr->ifsq);
5438 lwkt_serialize_exit(&txr->tx_serialize);
5442 bce_intr_legacy(void *xsc)
5444 struct bce_softc *sc = xsc;
5445 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5446 struct status_block *sblk;
5448 sblk = sc->status_block;
5451 * If the hardware status block index matches the last value
5452 * read by the driver and we haven't asserted our interrupt
5453 * then there's nothing to do.
5455 if (sblk->status_idx == rxr->last_status_idx &&
5456 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5457 BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5460 /* Ack the interrupt and stop others from occuring. */
5461 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5462 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5463 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5466 * Read back to deassert IRQ immediately to avoid too
5467 * many spurious interrupts.
5469 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5473 /* Re-enable interrupts. */
5474 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5475 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5476 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5477 bce_reenable_intr(rxr);
5481 bce_intr_msi(void *xsc)
5483 struct bce_softc *sc = xsc;
5485 /* Ack the interrupt and stop others from occuring. */
5486 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5487 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5488 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5492 /* Re-enable interrupts */
5493 bce_reenable_intr(&sc->rx_rings[0]);
5497 bce_intr_msi_oneshot(void *xsc)
5499 struct bce_softc *sc = xsc;
5503 /* Re-enable interrupts */
5504 bce_reenable_intr(&sc->rx_rings[0]);
5508 bce_intr_msix_rxtx(void *xrxr)
5510 struct bce_rx_ring *rxr = xrxr;
5511 struct bce_tx_ring *txr;
5512 uint16_t hw_rx_cons, hw_tx_cons;
5514 ASSERT_SERIALIZED(&rxr->rx_serialize);
5516 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5517 txr = &rxr->sc->tx_rings[rxr->idx];
5520 * Save the status block index value for use during
5521 * the next interrupt.
5523 rxr->last_status_idx = *rxr->hw_status_idx;
5525 /* Make sure status index is extracted before RX/TX cons */
5528 /* Check if the hardware has finished any work. */
5529 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5530 if (hw_rx_cons != rxr->rx_cons)
5531 bce_rx_intr(rxr, -1, hw_rx_cons);
5533 /* Check for any completed TX frames. */
5534 hw_tx_cons = bce_get_hw_tx_cons(txr);
5535 lwkt_serialize_enter(&txr->tx_serialize);
5536 if (hw_tx_cons != txr->tx_cons) {
5537 bce_tx_intr(txr, hw_tx_cons);
5538 if (!ifsq_is_empty(txr->ifsq))
5539 ifsq_devstart(txr->ifsq);
5541 lwkt_serialize_exit(&txr->tx_serialize);
5543 /* Re-enable interrupts */
5544 bce_reenable_intr(rxr);
5548 bce_intr_msix_rx(void *xrxr)
5550 struct bce_rx_ring *rxr = xrxr;
5551 uint16_t hw_rx_cons;
5553 ASSERT_SERIALIZED(&rxr->rx_serialize);
5556 * Save the status block index value for use during
5557 * the next interrupt.
5559 rxr->last_status_idx = *rxr->hw_status_idx;
5561 /* Make sure status index is extracted before RX cons */
5564 /* Check if the hardware has finished any work. */
5565 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5566 if (hw_rx_cons != rxr->rx_cons)
5567 bce_rx_intr(rxr, -1, hw_rx_cons);
5569 /* Re-enable interrupts */
5570 bce_reenable_intr(rxr);
5573 /****************************************************************************/
5574 /* Programs the various packet receive modes (broadcast and multicast). */
5578 /****************************************************************************/
5580 bce_set_rx_mode(struct bce_softc *sc)
5582 struct ifnet *ifp = &sc->arpcom.ac_if;
5583 struct ifmultiaddr *ifma;
5584 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5585 uint32_t rx_mode, sort_mode;
5588 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5590 /* Initialize receive mode default settings. */
5591 rx_mode = sc->rx_mode &
5592 ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5593 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5594 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5597 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5600 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5601 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5602 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5605 * Check for promiscuous, all multicast, or selected
5606 * multicast address filtering.
5608 if (ifp->if_flags & IFF_PROMISC) {
5609 /* Enable promiscuous mode. */
5610 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5611 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5612 } else if (ifp->if_flags & IFF_ALLMULTI) {
5613 /* Enable all multicast addresses. */
5614 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5615 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5618 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5620 /* Accept one or more multicast(s). */
5621 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5622 if (ifma->ifma_addr->sa_family != AF_LINK)
5625 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5626 ETHER_ADDR_LEN) & 0xFF;
5627 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5630 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5631 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5634 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5637 /* Only make changes if the recive mode has actually changed. */
5638 if (rx_mode != sc->rx_mode) {
5639 sc->rx_mode = rx_mode;
5640 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5643 /* Disable and clear the exisitng sort before enabling a new sort. */
5644 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5645 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5646 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5649 /****************************************************************************/
5650 /* Called periodically to updates statistics from the controllers */
5651 /* statistics block. */
5655 /****************************************************************************/
5657 bce_stats_update(struct bce_softc *sc)
5659 struct ifnet *ifp = &sc->arpcom.ac_if;
5660 struct statistics_block *stats = sc->stats_block;
5662 ASSERT_SERIALIZED(&sc->main_serialize);
5665 * Certain controllers don't report carrier sense errors correctly.
5666 * See errata E11_5708CA0_1165.
5668 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5669 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5670 IFNET_STAT_INC(ifp, oerrors,
5671 (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5675 * Update the sysctl statistics from the hardware statistics.
5677 sc->stat_IfHCInOctets =
5678 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5679 (uint64_t)stats->stat_IfHCInOctets_lo;
5681 sc->stat_IfHCInBadOctets =
5682 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5683 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5685 sc->stat_IfHCOutOctets =
5686 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5687 (uint64_t)stats->stat_IfHCOutOctets_lo;
5689 sc->stat_IfHCOutBadOctets =
5690 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5691 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5693 sc->stat_IfHCInUcastPkts =
5694 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5695 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5697 sc->stat_IfHCInMulticastPkts =
5698 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5699 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5701 sc->stat_IfHCInBroadcastPkts =
5702 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5703 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5705 sc->stat_IfHCOutUcastPkts =
5706 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5707 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5709 sc->stat_IfHCOutMulticastPkts =
5710 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5711 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5713 sc->stat_IfHCOutBroadcastPkts =
5714 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5715 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5717 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5718 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5720 sc->stat_Dot3StatsCarrierSenseErrors =
5721 stats->stat_Dot3StatsCarrierSenseErrors;
5723 sc->stat_Dot3StatsFCSErrors =
5724 stats->stat_Dot3StatsFCSErrors;
5726 sc->stat_Dot3StatsAlignmentErrors =
5727 stats->stat_Dot3StatsAlignmentErrors;
5729 sc->stat_Dot3StatsSingleCollisionFrames =
5730 stats->stat_Dot3StatsSingleCollisionFrames;
5732 sc->stat_Dot3StatsMultipleCollisionFrames =
5733 stats->stat_Dot3StatsMultipleCollisionFrames;
5735 sc->stat_Dot3StatsDeferredTransmissions =
5736 stats->stat_Dot3StatsDeferredTransmissions;
5738 sc->stat_Dot3StatsExcessiveCollisions =
5739 stats->stat_Dot3StatsExcessiveCollisions;
5741 sc->stat_Dot3StatsLateCollisions =
5742 stats->stat_Dot3StatsLateCollisions;
5744 sc->stat_EtherStatsCollisions =
5745 stats->stat_EtherStatsCollisions;
5747 sc->stat_EtherStatsFragments =
5748 stats->stat_EtherStatsFragments;
5750 sc->stat_EtherStatsJabbers =
5751 stats->stat_EtherStatsJabbers;
5753 sc->stat_EtherStatsUndersizePkts =
5754 stats->stat_EtherStatsUndersizePkts;
5756 sc->stat_EtherStatsOverrsizePkts =
5757 stats->stat_EtherStatsOverrsizePkts;
5759 sc->stat_EtherStatsPktsRx64Octets =
5760 stats->stat_EtherStatsPktsRx64Octets;
5762 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5763 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5765 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5766 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5768 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5769 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5771 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5772 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5774 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5775 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5777 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5778 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5780 sc->stat_EtherStatsPktsTx64Octets =
5781 stats->stat_EtherStatsPktsTx64Octets;
5783 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5784 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5786 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5787 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5789 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5790 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5792 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5793 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5795 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5796 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5798 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5799 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5801 sc->stat_XonPauseFramesReceived =
5802 stats->stat_XonPauseFramesReceived;
5804 sc->stat_XoffPauseFramesReceived =
5805 stats->stat_XoffPauseFramesReceived;
5807 sc->stat_OutXonSent =
5808 stats->stat_OutXonSent;
5810 sc->stat_OutXoffSent =
5811 stats->stat_OutXoffSent;
5813 sc->stat_FlowControlDone =
5814 stats->stat_FlowControlDone;
5816 sc->stat_MacControlFramesReceived =
5817 stats->stat_MacControlFramesReceived;
5819 sc->stat_XoffStateEntered =
5820 stats->stat_XoffStateEntered;
5822 sc->stat_IfInFramesL2FilterDiscards =
5823 stats->stat_IfInFramesL2FilterDiscards;
5825 sc->stat_IfInRuleCheckerDiscards =
5826 stats->stat_IfInRuleCheckerDiscards;
5828 sc->stat_IfInFTQDiscards =
5829 stats->stat_IfInFTQDiscards;
5831 sc->stat_IfInMBUFDiscards =
5832 stats->stat_IfInMBUFDiscards;
5834 sc->stat_IfInRuleCheckerP4Hit =
5835 stats->stat_IfInRuleCheckerP4Hit;
5837 sc->stat_CatchupInRuleCheckerDiscards =
5838 stats->stat_CatchupInRuleCheckerDiscards;
5840 sc->stat_CatchupInFTQDiscards =
5841 stats->stat_CatchupInFTQDiscards;
5843 sc->stat_CatchupInMBUFDiscards =
5844 stats->stat_CatchupInMBUFDiscards;
5846 sc->stat_CatchupInRuleCheckerP4Hit =
5847 stats->stat_CatchupInRuleCheckerP4Hit;
5849 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5852 * Update the interface statistics from the
5853 * hardware statistics.
5855 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5857 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5858 (u_long)sc->stat_EtherStatsOverrsizePkts +
5859 (u_long)sc->stat_IfInMBUFDiscards +
5860 (u_long)sc->stat_Dot3StatsAlignmentErrors +
5861 (u_long)sc->stat_Dot3StatsFCSErrors +
5862 (u_long)sc->stat_IfInRuleCheckerDiscards +
5863 (u_long)sc->stat_IfInFTQDiscards +
5864 (u_long)sc->com_no_buffers);
5866 IFNET_STAT_SET(ifp, oerrors,
5867 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5868 (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5869 (u_long)sc->stat_Dot3StatsLateCollisions);
5872 /****************************************************************************/
5873 /* Periodic function to notify the bootcode that the driver is still */
5878 /****************************************************************************/
5880 bce_pulse(void *xsc)
5882 struct bce_softc *sc = xsc;
5883 struct ifnet *ifp = &sc->arpcom.ac_if;
5886 lwkt_serialize_enter(&sc->main_serialize);
5888 /* Tell the firmware that the driver is still running. */
5889 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5890 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5892 /* Update the bootcode condition. */
5893 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5895 /* Report whether the bootcode still knows the driver is running. */
5896 if (!sc->bce_drv_cardiac_arrest) {
5897 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5898 sc->bce_drv_cardiac_arrest = 1;
5899 if_printf(ifp, "Bootcode lost the driver pulse! "
5900 "(bc_state = 0x%08X)\n", sc->bc_state);
5904 * Not supported by all bootcode versions.
5905 * (v5.0.11+ and v5.2.1+) Older bootcode
5906 * will require the driver to reset the
5907 * controller to clear this condition.
5909 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5910 sc->bce_drv_cardiac_arrest = 0;
5911 if_printf(ifp, "Bootcode found the driver pulse! "
5912 "(bc_state = 0x%08X)\n", sc->bc_state);
5916 /* Schedule the next pulse. */
5917 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5918 sc->bce_timer_cpuid);
5920 lwkt_serialize_exit(&sc->main_serialize);
5923 /****************************************************************************/
5924 /* Periodic function to check whether MSI is lost */
5928 /****************************************************************************/
5930 bce_check_msi(void *xsc)
5932 struct bce_softc *sc = xsc;
5933 struct ifnet *ifp = &sc->arpcom.ac_if;
5934 struct status_block *sblk = sc->status_block;
5935 struct bce_tx_ring *txr = &sc->tx_rings[0];
5936 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5938 lwkt_serialize_enter(&sc->main_serialize);
5940 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5942 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5943 lwkt_serialize_exit(&sc->main_serialize);
5947 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5948 bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5949 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5950 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5951 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5952 sc->bce_check_tx_cons == txr->tx_cons &&
5953 sc->bce_check_status_idx == rxr->last_status_idx) {
5956 if (!sc->bce_msi_maylose) {
5957 sc->bce_msi_maylose = TRUE;
5961 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5962 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5964 if_printf(ifp, "lost MSI\n");
5966 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5967 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5968 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5971 } else if (bootverbose) {
5972 if_printf(ifp, "MSI may be lost\n");
5976 sc->bce_msi_maylose = FALSE;
5977 sc->bce_check_rx_cons = rxr->rx_cons;
5978 sc->bce_check_tx_cons = txr->tx_cons;
5979 sc->bce_check_status_idx = rxr->last_status_idx;
5982 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5984 lwkt_serialize_exit(&sc->main_serialize);
5987 /****************************************************************************/
5988 /* Periodic function to perform maintenance tasks. */
5992 /****************************************************************************/
5994 bce_tick_serialized(struct bce_softc *sc)
5996 struct mii_data *mii;
5998 ASSERT_SERIALIZED(&sc->main_serialize);
6000 /* Update the statistics from the hardware statistics block. */
6001 bce_stats_update(sc);
6003 /* Schedule the next tick. */
6004 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
6005 sc->bce_timer_cpuid);
6007 /* If link is up already up then we're done. */
6011 mii = device_get_softc(sc->bce_miibus);
6014 /* Check if the link has come up. */
6015 if ((mii->mii_media_status & IFM_ACTIVE) &&
6016 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6020 /* Now that link is up, handle any outstanding TX traffic. */
6021 for (i = 0; i < sc->tx_ring_cnt; ++i)
6022 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6029 struct bce_softc *sc = xsc;
6031 lwkt_serialize_enter(&sc->main_serialize);
6032 bce_tick_serialized(sc);
6033 lwkt_serialize_exit(&sc->main_serialize);
6036 /****************************************************************************/
6037 /* Adds any sysctl parameters for tuning or debugging purposes. */
6040 /* 0 for success, positive value for failure. */
6041 /****************************************************************************/
6043 bce_add_sysctls(struct bce_softc *sc)
6045 struct sysctl_ctx_list *ctx;
6046 struct sysctl_oid_list *children;
6047 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6052 sysctl_ctx_init(&sc->bce_sysctl_ctx);
6053 sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
6054 SYSCTL_STATIC_CHILDREN(_hw),
6056 device_get_nameunit(sc->bce_dev),
6058 if (sc->bce_sysctl_tree == NULL) {
6059 device_printf(sc->bce_dev, "can't add sysctl node\n");
6063 ctx = &sc->bce_sysctl_ctx;
6064 children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
6066 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6067 CTLTYPE_INT | CTLFLAG_RW,
6068 sc, 0, bce_sysctl_tx_bds_int, "I",
6069 "Send max coalesced BD count during interrupt");
6070 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6071 CTLTYPE_INT | CTLFLAG_RW,
6072 sc, 0, bce_sysctl_tx_bds, "I",
6073 "Send max coalesced BD count");
6074 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6075 CTLTYPE_INT | CTLFLAG_RW,
6076 sc, 0, bce_sysctl_tx_ticks_int, "I",
6077 "Send coalescing ticks during interrupt");
6078 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6079 CTLTYPE_INT | CTLFLAG_RW,
6080 sc, 0, bce_sysctl_tx_ticks, "I",
6081 "Send coalescing ticks");
6083 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6084 CTLTYPE_INT | CTLFLAG_RW,
6085 sc, 0, bce_sysctl_rx_bds_int, "I",
6086 "Receive max coalesced BD count during interrupt");
6087 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6088 CTLTYPE_INT | CTLFLAG_RW,
6089 sc, 0, bce_sysctl_rx_bds, "I",
6090 "Receive max coalesced BD count");
6091 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6092 CTLTYPE_INT | CTLFLAG_RW,
6093 sc, 0, bce_sysctl_rx_ticks_int, "I",
6094 "Receive coalescing ticks during interrupt");
6095 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6096 CTLTYPE_INT | CTLFLAG_RW,
6097 sc, 0, bce_sysctl_rx_ticks, "I",
6098 "Receive coalescing ticks");
6100 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6101 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6102 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6103 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6105 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6106 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6107 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6108 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6110 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6111 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6112 "# segments before write to hardware registers");
6114 #ifdef IFPOLL_ENABLE
6115 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
6116 CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
6117 "I", "NPOLLING cpu offset");
6120 #ifdef BCE_RSS_DEBUG
6121 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6122 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6123 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6124 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6125 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6126 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6131 #ifdef BCE_TSS_DEBUG
6132 for (i = 0; i < sc->tx_ring_cnt; ++i) {
6133 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6134 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6135 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6140 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6141 "stat_IfHCInOctets",
6142 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6145 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6146 "stat_IfHCInBadOctets",
6147 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6148 "Bad bytes received");
6150 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6151 "stat_IfHCOutOctets",
6152 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6155 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6156 "stat_IfHCOutBadOctets",
6157 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6160 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6161 "stat_IfHCInUcastPkts",
6162 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6163 "Unicast packets received");
6165 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6166 "stat_IfHCInMulticastPkts",
6167 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6168 "Multicast packets received");
6170 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6171 "stat_IfHCInBroadcastPkts",
6172 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6173 "Broadcast packets received");
6175 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6176 "stat_IfHCOutUcastPkts",
6177 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6178 "Unicast packets sent");
6180 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6181 "stat_IfHCOutMulticastPkts",
6182 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6183 "Multicast packets sent");
6185 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6186 "stat_IfHCOutBroadcastPkts",
6187 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6188 "Broadcast packets sent");
6190 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6191 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6192 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6193 0, "Internal MAC transmit errors");
6195 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6196 "stat_Dot3StatsCarrierSenseErrors",
6197 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6198 0, "Carrier sense errors");
6200 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6201 "stat_Dot3StatsFCSErrors",
6202 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6203 0, "Frame check sequence errors");
6205 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6206 "stat_Dot3StatsAlignmentErrors",
6207 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6208 0, "Alignment errors");
6210 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6211 "stat_Dot3StatsSingleCollisionFrames",
6212 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6213 0, "Single Collision Frames");
6215 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6216 "stat_Dot3StatsMultipleCollisionFrames",
6217 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6218 0, "Multiple Collision Frames");
6220 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6221 "stat_Dot3StatsDeferredTransmissions",
6222 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6223 0, "Deferred Transmissions");
6225 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6226 "stat_Dot3StatsExcessiveCollisions",
6227 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6228 0, "Excessive Collisions");
6230 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6231 "stat_Dot3StatsLateCollisions",
6232 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6233 0, "Late Collisions");
6235 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6236 "stat_EtherStatsCollisions",
6237 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6240 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6241 "stat_EtherStatsFragments",
6242 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6245 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6246 "stat_EtherStatsJabbers",
6247 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6250 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6251 "stat_EtherStatsUndersizePkts",
6252 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6253 0, "Undersize packets");
6255 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6256 "stat_EtherStatsOverrsizePkts",
6257 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6258 0, "stat_EtherStatsOverrsizePkts");
6260 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6261 "stat_EtherStatsPktsRx64Octets",
6262 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6263 0, "Bytes received in 64 byte packets");
6265 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6266 "stat_EtherStatsPktsRx65Octetsto127Octets",
6267 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6268 0, "Bytes received in 65 to 127 byte packets");
6270 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6271 "stat_EtherStatsPktsRx128Octetsto255Octets",
6272 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6273 0, "Bytes received in 128 to 255 byte packets");
6275 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6276 "stat_EtherStatsPktsRx256Octetsto511Octets",
6277 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6278 0, "Bytes received in 256 to 511 byte packets");
6280 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6281 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6282 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6283 0, "Bytes received in 512 to 1023 byte packets");
6285 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6286 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6287 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6288 0, "Bytes received in 1024 t0 1522 byte packets");
6290 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6291 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6292 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6293 0, "Bytes received in 1523 to 9022 byte packets");
6295 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6296 "stat_EtherStatsPktsTx64Octets",
6297 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6298 0, "Bytes sent in 64 byte packets");
6300 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6301 "stat_EtherStatsPktsTx65Octetsto127Octets",
6302 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6303 0, "Bytes sent in 65 to 127 byte packets");
6305 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6306 "stat_EtherStatsPktsTx128Octetsto255Octets",
6307 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6308 0, "Bytes sent in 128 to 255 byte packets");
6310 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6311 "stat_EtherStatsPktsTx256Octetsto511Octets",
6312 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6313 0, "Bytes sent in 256 to 511 byte packets");
6315 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6316 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6317 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6318 0, "Bytes sent in 512 to 1023 byte packets");
6320 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6321 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6322 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6323 0, "Bytes sent in 1024 to 1522 byte packets");
6325 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6326 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6327 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6328 0, "Bytes sent in 1523 to 9022 byte packets");
6330 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6331 "stat_XonPauseFramesReceived",
6332 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6333 0, "XON pause frames receved");
6335 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6336 "stat_XoffPauseFramesReceived",
6337 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6338 0, "XOFF pause frames received");
6340 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6342 CTLFLAG_RD, &sc->stat_OutXonSent,
6343 0, "XON pause frames sent");
6345 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6347 CTLFLAG_RD, &sc->stat_OutXoffSent,
6348 0, "XOFF pause frames sent");
6350 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6351 "stat_FlowControlDone",
6352 CTLFLAG_RD, &sc->stat_FlowControlDone,
6353 0, "Flow control done");
6355 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6356 "stat_MacControlFramesReceived",
6357 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6358 0, "MAC control frames received");
6360 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6361 "stat_XoffStateEntered",
6362 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6363 0, "XOFF state entered");
6365 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6366 "stat_IfInFramesL2FilterDiscards",
6367 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6368 0, "Received L2 packets discarded");
6370 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6371 "stat_IfInRuleCheckerDiscards",
6372 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6373 0, "Received packets discarded by rule");
6375 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6376 "stat_IfInFTQDiscards",
6377 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6378 0, "Received packet FTQ discards");
6380 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6381 "stat_IfInMBUFDiscards",
6382 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6383 0, "Received packets discarded due to lack of controller buffer memory");
6385 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6386 "stat_IfInRuleCheckerP4Hit",
6387 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6388 0, "Received packets rule checker hits");
6390 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6391 "stat_CatchupInRuleCheckerDiscards",
6392 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6393 0, "Received packets discarded in Catchup path");
6395 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6396 "stat_CatchupInFTQDiscards",
6397 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6398 0, "Received packets discarded in FTQ in Catchup path");
6400 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6401 "stat_CatchupInMBUFDiscards",
6402 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6403 0, "Received packets discarded in controller buffer memory in Catchup path");
6405 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6406 "stat_CatchupInRuleCheckerP4Hit",
6407 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6408 0, "Received packets rule checker hits in Catchup path");
6410 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6412 CTLFLAG_RD, &sc->com_no_buffers,
6413 0, "Valid packets received but no RX buffers available");
6417 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6419 struct bce_softc *sc = arg1;
6421 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6422 &sc->bce_tx_quick_cons_trip_int,
6423 BCE_COALMASK_TX_BDS_INT);
6427 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6429 struct bce_softc *sc = arg1;
6431 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6432 &sc->bce_tx_quick_cons_trip,
6433 BCE_COALMASK_TX_BDS);
6437 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6439 struct bce_softc *sc = arg1;
6441 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6442 &sc->bce_tx_ticks_int,
6443 BCE_COALMASK_TX_TICKS_INT);
6447 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6449 struct bce_softc *sc = arg1;
6451 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6453 BCE_COALMASK_TX_TICKS);
6457 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6459 struct bce_softc *sc = arg1;
6461 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6462 &sc->bce_rx_quick_cons_trip_int,
6463 BCE_COALMASK_RX_BDS_INT);
6467 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6469 struct bce_softc *sc = arg1;
6471 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6472 &sc->bce_rx_quick_cons_trip,
6473 BCE_COALMASK_RX_BDS);
6477 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6479 struct bce_softc *sc = arg1;
6481 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6482 &sc->bce_rx_ticks_int,
6483 BCE_COALMASK_RX_TICKS_INT);
6487 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6489 struct bce_softc *sc = arg1;
6491 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6493 BCE_COALMASK_RX_TICKS);
6497 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6498 uint32_t coalchg_mask)
6500 struct bce_softc *sc = arg1;
6501 struct ifnet *ifp = &sc->arpcom.ac_if;
6504 ifnet_serialize_all(ifp);
6507 error = sysctl_handle_int(oidp, &v, 0, req);
6508 if (!error && req->newptr != NULL) {
6513 sc->bce_coalchg_mask |= coalchg_mask;
6515 /* Commit changes */
6516 bce_coal_change(sc);
6520 ifnet_deserialize_all(ifp);
6525 bce_coal_change(struct bce_softc *sc)
6527 struct ifnet *ifp = &sc->arpcom.ac_if;
6530 ASSERT_SERIALIZED(&sc->main_serialize);
6532 if ((ifp->if_flags & IFF_RUNNING) == 0) {
6533 sc->bce_coalchg_mask = 0;
6537 if (sc->bce_coalchg_mask &
6538 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6539 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6540 (sc->bce_tx_quick_cons_trip_int << 16) |
6541 sc->bce_tx_quick_cons_trip);
6542 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6545 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6547 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6548 (sc->bce_tx_quick_cons_trip_int << 16) |
6549 sc->bce_tx_quick_cons_trip);
6552 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6553 sc->bce_tx_quick_cons_trip,
6554 sc->bce_tx_quick_cons_trip_int);
6558 if (sc->bce_coalchg_mask &
6559 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6560 REG_WR(sc, BCE_HC_TX_TICKS,
6561 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6562 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6565 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6567 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6568 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6571 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6572 sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6576 if (sc->bce_coalchg_mask &
6577 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6578 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6579 (sc->bce_rx_quick_cons_trip_int << 16) |
6580 sc->bce_rx_quick_cons_trip);
6581 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6584 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6586 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6587 (sc->bce_rx_quick_cons_trip_int << 16) |
6588 sc->bce_rx_quick_cons_trip);
6591 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6592 sc->bce_rx_quick_cons_trip,
6593 sc->bce_rx_quick_cons_trip_int);
6597 if (sc->bce_coalchg_mask &
6598 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6599 REG_WR(sc, BCE_HC_RX_TICKS,
6600 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6601 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6604 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6606 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6607 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6610 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6611 sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6615 sc->bce_coalchg_mask = 0;
6619 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6620 uint16_t *flags0, uint16_t *mss0)
6624 int thoff, iphlen, hoff;
6627 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6629 hoff = m->m_pkthdr.csum_lhlen;
6630 iphlen = m->m_pkthdr.csum_iphlen;
6631 thoff = m->m_pkthdr.csum_thlen;
6633 KASSERT(hoff >= sizeof(struct ether_header),
6634 ("invalid ether header len %d", hoff));
6635 KASSERT(iphlen >= sizeof(struct ip),
6636 ("invalid ip header len %d", iphlen));
6637 KASSERT(thoff >= sizeof(struct tcphdr),
6638 ("invalid tcp header len %d", thoff));
6640 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6641 m = m_pullup(m, hoff + iphlen + thoff);
6649 /* Set the LSO flag in the TX BD */
6650 flags = TX_BD_FLAGS_SW_LSO;
6652 /* Set the length of IP + TCP options (in 32 bit words) */
6653 flags |= (((iphlen + thoff -
6654 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6656 *mss0 = htole16(m->m_pkthdr.tso_segsz);
6663 bce_setup_serialize(struct bce_softc *sc)
6668 * Allocate serializer array
6671 /* Main + TX + RX */
6672 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6675 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6676 M_DEVBUF, M_WAITOK | M_ZERO);
6681 * NOTE: Order is critical
6686 KKASSERT(i < sc->serialize_cnt);
6687 sc->serializes[i++] = &sc->main_serialize;
6689 for (j = 0; j < sc->rx_ring_cnt; ++j) {
6690 KKASSERT(i < sc->serialize_cnt);
6691 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6694 for (j = 0; j < sc->tx_ring_cnt; ++j) {
6695 KKASSERT(i < sc->serialize_cnt);
6696 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6699 KKASSERT(i == sc->serialize_cnt);
6703 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6705 struct bce_softc *sc = ifp->if_softc;
6707 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6711 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6713 struct bce_softc *sc = ifp->if_softc;
6715 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6719 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6721 struct bce_softc *sc = ifp->if_softc;
6723 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6730 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6731 boolean_t serialized)
6733 struct bce_softc *sc = ifp->if_softc;
6735 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6739 #endif /* INVARIANTS */
6742 bce_serialize_skipmain(struct bce_softc *sc)
6744 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6748 bce_deserialize_skipmain(struct bce_softc *sc)
6750 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6753 #ifdef IFPOLL_ENABLE
6756 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6758 struct bce_softc *sc = (void *)arg1;
6759 struct ifnet *ifp = &sc->arpcom.ac_if;
6762 off = sc->npoll_ofs;
6763 error = sysctl_handle_int(oidp, &off, 0, req);
6764 if (error || req->newptr == NULL)
6769 ifnet_serialize_all(ifp);
6770 if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) {
6774 sc->npoll_ofs = off;
6776 ifnet_deserialize_all(ifp);
6781 #endif /* IFPOLL_ENABLE */
6784 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6787 sc->bce_timer_cpuid = 0; /* XXX */
6789 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6793 bce_alloc_intr(struct bce_softc *sc)
6797 bce_try_alloc_msix(sc);
6798 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6801 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6802 &sc->bce_irq_rid, &irq_flags);
6804 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6805 &sc->bce_irq_rid, irq_flags);
6806 if (sc->bce_res_irq == NULL) {
6807 device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6810 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6811 sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6817 bce_try_alloc_msix(struct bce_softc *sc)
6819 struct bce_msix_data *msix;
6820 int offset, i, error;
6821 boolean_t setup = FALSE;
6823 if (sc->rx_ring_cnt == 1)
6826 if (sc->rx_ring_cnt2 == ncpus2) {
6830 (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2;
6832 offset = device_getenv_int(sc->bce_dev,
6833 "msix.offset", offset_def);
6834 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) {
6835 device_printf(sc->bce_dev,
6836 "invalid msix.offset %d, use %d\n",
6837 offset, offset_def);
6838 offset = offset_def;
6842 msix = &sc->bce_msix[0];
6843 msix->msix_serialize = &sc->main_serialize;
6844 msix->msix_func = bce_intr_msi_oneshot;
6845 msix->msix_arg = sc;
6846 KKASSERT(offset < ncpus2);
6847 msix->msix_cpuid = offset;
6848 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6849 device_get_nameunit(sc->bce_dev));
6851 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6852 struct bce_rx_ring *rxr = &sc->rx_rings[i];
6854 msix = &sc->bce_msix[i];
6856 msix->msix_serialize = &rxr->rx_serialize;
6857 msix->msix_arg = rxr;
6858 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2);
6859 KKASSERT(msix->msix_cpuid < ncpus2);
6861 if (i < sc->tx_ring_cnt) {
6862 msix->msix_func = bce_intr_msix_rxtx;
6863 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6864 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6866 msix->msix_func = bce_intr_msix_rx;
6867 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6868 "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6875 bce_setup_msix_table(sc);
6876 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6877 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6878 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6880 REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6882 error = pci_setup_msix(sc->bce_dev);
6884 device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6889 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6890 msix = &sc->bce_msix[i];
6892 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6895 device_printf(sc->bce_dev,
6896 "Unable to allocate MSI-X %d on cpu%d\n",
6897 i, msix->msix_cpuid);
6901 msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6902 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6903 if (msix->msix_res == NULL) {
6904 device_printf(sc->bce_dev,
6905 "Unable to allocate MSI-X %d resource\n", i);
6911 pci_enable_msix(sc->bce_dev);
6912 sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6915 bce_free_msix(sc, setup);
6919 bce_setup_ring_cnt(struct bce_softc *sc)
6921 int msix_enable, ring_max, msix_cnt2, msix_cnt, i;
6923 sc->rx_ring_cnt = 1;
6924 sc->rx_ring_cnt2 = 1;
6925 sc->tx_ring_cnt = 1;
6927 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6928 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6931 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6939 msix_cnt = pci_msix_count(sc->bce_dev);
6944 while ((1 << (i + 1)) <= msix_cnt)
6949 * One extra RX ring will be needed (see below), so make sure
6950 * that there are enough MSI-X vectors.
6952 if (msix_cnt == msix_cnt2) {
6955 * This probably will not happen; 5709/5716
6956 * come with 9 MSI-X vectors.
6959 if (msix_cnt2 <= 1) {
6960 device_printf(sc->bce_dev,
6961 "MSI-X count %d could not be used\n", msix_cnt);
6964 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n",
6969 * Setup RX ring count
6971 ring_max = BCE_RX_RING_MAX;
6972 if (ring_max > msix_cnt2)
6973 ring_max = msix_cnt2;
6974 sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings",
6976 sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max);
6979 * Don't use MSI-X, if the effective RX ring count is 1.
6980 * Since if the effective RX ring count is 1, the TX ring
6981 * count will be 1. This RX ring and the TX ring must be
6982 * bundled into one MSI-X vector, so the hot path will be
6983 * exact same as using MSI. Besides, the first RX ring
6984 * must be fully populated, which only accepts packets whose
6985 * RSS hash can't calculated, e.g. ARP packets; waste of
6986 * resource at least.
6988 if (sc->rx_ring_cnt2 == 1)
6992 * One extra RX ring is allocated, since the first RX ring
6993 * could not be used for RSS hashed packets whose masked
6994 * hash is 0. The first RX ring is only used for packets
6995 * whose RSS hash could not be calculated, e.g. ARP packets.
6996 * This extra RX ring will be used for packets whose masked
6997 * hash is 0. The effective RX ring count involved in RSS
6998 * is still sc->rx_ring_cnt2.
7000 KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt);
7001 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
7004 * Setup TX ring count
7007 * TX ring count must be less than the effective RSS RX ring
7008 * count, since we use RX ring software data struct to save
7009 * status index and various other MSI-X related stuffs.
7011 ring_max = BCE_TX_RING_MAX;
7012 if (ring_max > msix_cnt2)
7013 ring_max = msix_cnt2;
7014 if (ring_max > sc->rx_ring_cnt2)
7015 ring_max = sc->rx_ring_cnt2;
7016 sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings",
7018 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
7022 bce_free_msix(struct bce_softc *sc, boolean_t setup)
7026 KKASSERT(sc->rx_ring_cnt > 1);
7028 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7029 struct bce_msix_data *msix = &sc->bce_msix[i];
7031 if (msix->msix_res != NULL) {
7032 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7033 msix->msix_rid, msix->msix_res);
7035 if (msix->msix_rid >= 0)
7036 pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
7039 pci_teardown_msix(sc->bce_dev);
7043 bce_free_intr(struct bce_softc *sc)
7045 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
7046 if (sc->bce_res_irq != NULL) {
7047 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7048 sc->bce_irq_rid, sc->bce_res_irq);
7050 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
7051 pci_release_msi(sc->bce_dev);
7053 bce_free_msix(sc, TRUE);
7058 bce_setup_msix_table(struct bce_softc *sc)
7060 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
7061 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
7062 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
7066 bce_setup_intr(struct bce_softc *sc)
7068 void (*irq_handle)(void *);
7071 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
7072 return bce_setup_msix(sc);
7074 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
7075 irq_handle = bce_intr_legacy;
7076 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
7077 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7078 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7079 irq_handle = bce_intr_msi_oneshot;
7080 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7082 irq_handle = bce_intr_msi;
7083 sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7086 panic("%s: unsupported intr type %d",
7087 device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7090 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7091 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7093 device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7101 bce_teardown_intr(struct bce_softc *sc)
7103 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7104 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7106 bce_teardown_msix(sc, sc->rx_ring_cnt);
7110 bce_setup_msix(struct bce_softc *sc)
7114 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7115 struct bce_msix_data *msix = &sc->bce_msix[i];
7118 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7119 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7120 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7122 device_printf(sc->bce_dev, "could not set up %s "
7123 "interrupt handler.\n", msix->msix_desc);
7124 bce_teardown_msix(sc, i);
7132 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7136 for (i = 0; i < msix_cnt; ++i) {
7137 struct bce_msix_data *msix = &sc->bce_msix[i];
7139 bus_teardown_intr(sc->bce_dev, msix->msix_res,
7145 bce_init_rss(struct bce_softc *sc)
7147 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7151 KKASSERT(sc->rx_ring_cnt > 2);
7154 * Configure RSS keys
7156 toeplitz_get_key(key, sizeof(key));
7157 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7160 rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7161 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7163 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7167 * Configure the redirect table
7170 * - The "queue ID" in redirect table is the software RX ring's
7171 * index _minus_ one.
7172 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7173 * will be used for packets whose masked hash is 0.
7174 * (see also: comment in bce_setup_ring_cnt())
7176 * The redirect table is configured in following fashion, except
7177 * for the masked hash 0, which is noted above:
7178 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
7180 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7181 int shift = (i % 8) << 2, qid;
7183 qid = i % sc->rx_ring_cnt2;
7187 qid = sc->rx_ring_cnt - 2;
7188 KKASSERT(qid < (sc->rx_ring_cnt - 1));
7190 tbl |= qid << shift;
7192 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7193 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7194 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7195 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7196 BCE_RLUP_RSS_COMMAND_WRITE |
7197 BCE_RLUP_RSS_COMMAND_HASH_MASK);
7201 REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7202 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7206 bce_npoll_coal_change(struct bce_softc *sc)
7208 uint32_t old_rx_cons, old_tx_cons;
7210 old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7211 old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7212 sc->bce_rx_quick_cons_trip_int = 1;
7213 sc->bce_tx_quick_cons_trip_int = 1;
7215 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7216 BCE_COALMASK_RX_BDS_INT;
7217 bce_coal_change(sc);
7219 sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7220 sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7223 static struct pktinfo *
7224 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7225 const struct l2_fhdr *l2fhdr)
7227 /* Check for an IP datagram. */
7228 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7231 /* Check if the IP checksum is valid. */
7232 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7235 /* Check for a valid TCP/UDP frame. */
7236 if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7237 if (status & L2_FHDR_ERRORS_TCP_XSUM)
7239 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7241 pi->pi_l3proto = IPPROTO_TCP;
7242 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7243 if (status & L2_FHDR_ERRORS_UDP_XSUM)
7245 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7247 pi->pi_l3proto = IPPROTO_UDP;
7251 pi->pi_netisr = NETISR_IP;