2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
42 * The following controllers are not supported by this driver:
48 * BCM5709S A0, A1, B0, B1, B2, C0
51 * Note about MSI-X on 5709/5716:
52 * - 9 MSI-X vectors are supported.
53 * - MSI-X vectors, RX/TX rings and status blocks' association
55 * o The first RX ring and the first TX ring use the first
57 * o The first MSI-X vector is associated with the first
59 * o The second RX ring and the second TX ring use the second
61 * o The second MSI-X vector is associated with the second
65 * - Status blocks must reside in physically contiguous memory
66 * and each status block consumes 128bytes. In addition to
67 * this, the memory for the status blocks is aligned on 128bytes
68 * in this driver. (see bce_dma_alloc() and HC_CONFIG)
69 * - Each status block has its own coalesce parameters, which also
70 * serve as the related MSI-X vector's interrupt moderation
71 * parameters. (see bce_coal_change())
75 #include "opt_ifpoll.h"
77 #include <sys/param.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
95 #include <net/ethernet.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/if_ringmap.h>
104 #include <net/toeplitz.h>
105 #include <net/toeplitz2.h>
106 #include <net/vlan/if_vlan_var.h>
107 #include <net/vlan/if_vlan_ether.h>
109 #include <dev/netif/mii_layer/mii.h>
110 #include <dev/netif/mii_layer/miivar.h>
111 #include <dev/netif/mii_layer/brgphyreg.h>
113 #include <bus/pci/pcireg.h>
114 #include <bus/pci/pcivar.h>
116 #include "miibus_if.h"
118 #include <dev/netif/bce/if_bcereg.h>
119 #include <dev/netif/bce/if_bcefw.h>
121 #define BCE_MSI_CKINTVL ((10 * hz) / 1000) /* 10ms */
124 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
126 if (sc->rss_debug >= lvl) \
127 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
129 #else /* !BCE_RSS_DEBUG */
130 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0)
131 #endif /* BCE_RSS_DEBUG */
133 /****************************************************************************/
134 /* PCI Device ID Table */
136 /* Used by bce_probe() to identify the devices supported by this driver. */
137 /****************************************************************************/
138 #define BCE_DEVDESC_MAX 64
140 static struct bce_type bce_devs[] = {
141 /* BCM5706C Controllers and OEM boards. */
142 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
143 "HP NC370T Multifunction Gigabit Server Adapter" },
144 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
145 "HP NC370i Multifunction Gigabit Server Adapter" },
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3070,
147 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x1709,
149 "HP NC371i Multifunction Gigabit Server Adapter" },
150 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
151 "Broadcom NetXtreme II BCM5706 1000Base-T" },
153 /* BCM5706S controllers and OEM boards. */
154 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
155 "HP NC370F Multifunction Gigabit Server Adapter" },
156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
157 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
159 /* BCM5708C controllers and OEM boards. */
160 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7037,
161 "HP NC373T PCIe Multifunction Gig Server Adapter" },
162 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7038,
163 "HP NC373i Multifunction Gigabit Server Adapter" },
164 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, HP_VENDORID, 0x7045,
165 "HP NC374m PCIe Multifunction Adapter" },
166 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
167 "Broadcom NetXtreme II BCM5708 1000Base-T" },
169 /* BCM5708S controllers and OEM boards. */
170 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x1706,
171 "HP NC373m Multifunction Gigabit Server Adapter" },
172 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703b,
173 "HP NC373i Multifunction Gigabit Server Adapter" },
174 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, HP_VENDORID, 0x703d,
175 "HP NC373F PCIe Multifunc Giga Server Adapter" },
176 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
177 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
179 /* BCM5709C controllers and OEM boards. */
180 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7055,
181 "HP NC382i DP Multifunction Gigabit Server Adapter" },
182 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, HP_VENDORID, 0x7059,
183 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
184 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709, PCI_ANY_ID, PCI_ANY_ID,
185 "Broadcom NetXtreme II BCM5709 1000Base-T" },
187 /* BCM5709S controllers and OEM boards. */
188 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x171d,
189 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
190 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, HP_VENDORID, 0x7056,
191 "HP NC382i DP Multifunction Gigabit Server Adapter" },
192 { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S, PCI_ANY_ID, PCI_ANY_ID,
193 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
195 /* BCM5716 controllers and OEM boards. */
196 { BRCM_VENDORID, BRCM_DEVICEID_BCM5716, PCI_ANY_ID, PCI_ANY_ID,
197 "Broadcom NetXtreme II BCM5716 1000Base-T" },
202 /****************************************************************************/
203 /* Supported Flash NVRAM device data. */
204 /****************************************************************************/
205 static const struct flash_spec flash_table[] =
207 #define BUFFERED_FLAGS (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
208 #define NONBUFFERED_FLAGS (BCE_NV_WREN)
211 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
212 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
213 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
215 /* Expansion entry 0001 */
216 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 /* Saifun SA25F010 (non-buffered flash) */
221 /* strap, cfg1, & write1 need updates */
222 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
225 "Non-buffered flash (128kB)"},
226 /* Saifun SA25F020 (non-buffered flash) */
227 /* strap, cfg1, & write1 need updates */
228 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
229 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
230 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
231 "Non-buffered flash (256kB)"},
232 /* Expansion entry 0100 */
233 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
234 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
235 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
237 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
238 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
239 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
240 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
241 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
242 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
243 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
244 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
245 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
246 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
247 /* Saifun SA25F005 (non-buffered flash) */
248 /* strap, cfg1, & write1 need updates */
249 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
250 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
251 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
252 "Non-buffered flash (64kB)"},
254 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
255 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
256 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
258 /* Expansion entry 1001 */
259 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
260 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
261 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
263 /* Expansion entry 1010 */
264 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
265 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
266 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
268 /* ATMEL AT45DB011B (buffered flash) */
269 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
270 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
271 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
272 "Buffered flash (128kB)"},
273 /* Expansion entry 1100 */
274 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
275 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
276 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
278 /* Expansion entry 1101 */
279 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
280 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
281 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
283 /* Ateml Expansion entry 1110 */
284 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
285 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
286 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
287 "Entry 1110 (Atmel)"},
288 /* ATMEL AT45DB021B (buffered flash) */
289 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
290 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
291 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
292 "Buffered flash (256kB)"},
296 * The BCM5709 controllers transparently handle the
297 * differences between Atmel 264 byte pages and all
298 * flash devices which use 256 byte pages, so no
299 * logical-to-physical mapping is required in the
302 static struct flash_spec flash_5709 = {
303 .flags = BCE_NV_BUFFERED,
304 .page_bits = BCM5709_FLASH_PAGE_BITS,
305 .page_size = BCM5709_FLASH_PAGE_SIZE,
306 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
307 .total_size = BUFFERED_FLASH_TOTAL_SIZE * 2,
308 .name = "5709/5716 buffered flash (256kB)",
311 /****************************************************************************/
312 /* DragonFly device entry points. */
313 /****************************************************************************/
314 static int bce_probe(device_t);
315 static int bce_attach(device_t);
316 static int bce_detach(device_t);
317 static void bce_shutdown(device_t);
318 static int bce_miibus_read_reg(device_t, int, int);
319 static int bce_miibus_write_reg(device_t, int, int, int);
320 static void bce_miibus_statchg(device_t);
322 /****************************************************************************/
323 /* BCE Register/Memory Access Routines */
324 /****************************************************************************/
325 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
326 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
327 static void bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
328 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
329 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
331 /****************************************************************************/
332 /* BCE NVRAM Access Routines */
333 /****************************************************************************/
334 static int bce_acquire_nvram_lock(struct bce_softc *);
335 static int bce_release_nvram_lock(struct bce_softc *);
336 static void bce_enable_nvram_access(struct bce_softc *);
337 static void bce_disable_nvram_access(struct bce_softc *);
338 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
340 static int bce_init_nvram(struct bce_softc *);
341 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
342 static int bce_nvram_test(struct bce_softc *);
344 /****************************************************************************/
345 /* BCE DMA Allocate/Free Routines */
346 /****************************************************************************/
347 static int bce_dma_alloc(struct bce_softc *);
348 static void bce_dma_free(struct bce_softc *);
349 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
351 /****************************************************************************/
352 /* BCE Firmware Synchronization and Load */
353 /****************************************************************************/
354 static int bce_fw_sync(struct bce_softc *, uint32_t);
355 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
357 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
359 static void bce_start_cpu(struct bce_softc *, struct cpu_reg *);
360 static void bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
361 static void bce_start_rxp_cpu(struct bce_softc *);
362 static void bce_init_rxp_cpu(struct bce_softc *);
363 static void bce_init_txp_cpu(struct bce_softc *);
364 static void bce_init_tpat_cpu(struct bce_softc *);
365 static void bce_init_cp_cpu(struct bce_softc *);
366 static void bce_init_com_cpu(struct bce_softc *);
367 static void bce_init_cpus(struct bce_softc *);
368 static void bce_setup_msix_table(struct bce_softc *);
369 static void bce_init_rss(struct bce_softc *);
371 static void bce_stop(struct bce_softc *);
372 static int bce_reset(struct bce_softc *, uint32_t);
373 static int bce_chipinit(struct bce_softc *);
374 static int bce_blockinit(struct bce_softc *);
375 static void bce_probe_pci_caps(struct bce_softc *);
376 static void bce_print_adapter_info(struct bce_softc *);
377 static void bce_get_media(struct bce_softc *);
378 static void bce_mgmt_init(struct bce_softc *);
379 static int bce_init_ctx(struct bce_softc *);
380 static void bce_get_mac_addr(struct bce_softc *);
381 static void bce_set_mac_addr(struct bce_softc *);
382 static void bce_set_rx_mode(struct bce_softc *);
383 static void bce_coal_change(struct bce_softc *);
384 static void bce_npoll_coal_change(struct bce_softc *);
385 static void bce_setup_serialize(struct bce_softc *);
386 static void bce_serialize_skipmain(struct bce_softc *);
387 static void bce_deserialize_skipmain(struct bce_softc *);
388 static void bce_set_timer_cpuid(struct bce_softc *, boolean_t);
389 static int bce_alloc_intr(struct bce_softc *);
390 static void bce_free_intr(struct bce_softc *);
391 static void bce_try_alloc_msix(struct bce_softc *);
392 static void bce_free_msix(struct bce_softc *, boolean_t);
393 static void bce_setup_ring_cnt(struct bce_softc *);
394 static int bce_setup_intr(struct bce_softc *);
395 static void bce_teardown_intr(struct bce_softc *);
396 static int bce_setup_msix(struct bce_softc *);
397 static void bce_teardown_msix(struct bce_softc *, int);
399 static int bce_create_tx_ring(struct bce_tx_ring *);
400 static void bce_destroy_tx_ring(struct bce_tx_ring *);
401 static void bce_init_tx_context(struct bce_tx_ring *);
402 static int bce_init_tx_chain(struct bce_tx_ring *);
403 static void bce_free_tx_chain(struct bce_tx_ring *);
404 static void bce_xmit(struct bce_tx_ring *);
405 static int bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
406 static int bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
407 uint16_t *, uint16_t *);
409 static int bce_create_rx_ring(struct bce_rx_ring *);
410 static void bce_destroy_rx_ring(struct bce_rx_ring *);
411 static void bce_init_rx_context(struct bce_rx_ring *);
412 static int bce_init_rx_chain(struct bce_rx_ring *);
413 static void bce_free_rx_chain(struct bce_rx_ring *);
414 static int bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t,
416 static void bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
418 static struct pktinfo *bce_rss_pktinfo(struct pktinfo *, uint32_t,
419 const struct l2_fhdr *);
421 static void bce_start(struct ifnet *, struct ifaltq_subque *);
422 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
423 static void bce_watchdog(struct ifaltq_subque *);
424 static int bce_ifmedia_upd(struct ifnet *);
425 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
426 static void bce_init(void *);
428 static void bce_npoll(struct ifnet *, struct ifpoll_info *);
429 static void bce_npoll_rx(struct ifnet *, void *, int);
430 static void bce_npoll_tx(struct ifnet *, void *, int);
431 static void bce_npoll_status(struct ifnet *);
432 static void bce_npoll_rx_pack(struct ifnet *, void *, int);
434 static void bce_serialize(struct ifnet *, enum ifnet_serialize);
435 static void bce_deserialize(struct ifnet *, enum ifnet_serialize);
436 static int bce_tryserialize(struct ifnet *, enum ifnet_serialize);
438 static void bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
442 static void bce_intr(struct bce_softc *);
443 static void bce_intr_legacy(void *);
444 static void bce_intr_msi(void *);
445 static void bce_intr_msi_oneshot(void *);
446 static void bce_intr_msix_rxtx(void *);
447 static void bce_intr_msix_rx(void *);
448 static void bce_tx_intr(struct bce_tx_ring *, uint16_t);
449 static void bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
450 static void bce_phy_intr(struct bce_softc *);
451 static void bce_disable_intr(struct bce_softc *);
452 static void bce_enable_intr(struct bce_softc *);
453 static void bce_reenable_intr(struct bce_rx_ring *);
454 static void bce_check_msi(void *);
456 static void bce_stats_update(struct bce_softc *);
457 static void bce_tick(void *);
458 static void bce_tick_serialized(struct bce_softc *);
459 static void bce_pulse(void *);
461 static void bce_add_sysctls(struct bce_softc *);
462 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
463 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
464 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
465 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
466 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
467 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
468 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
469 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
470 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
471 uint32_t *, uint32_t);
475 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
476 * takes 1023 as the TX ticks limit. However, using 1023 will
477 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
478 * there is _no_ network activity on the NIC.
480 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
481 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
482 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
483 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
484 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
485 static uint32_t bce_rx_bds = 0; /* bcm: 6 */
486 static uint32_t bce_rx_ticks_int = 150; /* bcm: 18 */
487 static uint32_t bce_rx_ticks = 150; /* bcm: 18 */
489 static int bce_tx_wreg = 8;
491 static int bce_msi_enable = 1;
492 static int bce_msix_enable = 1;
494 static int bce_rx_pages = RX_PAGES_DEFAULT;
495 static int bce_tx_pages = TX_PAGES_DEFAULT;
497 static int bce_rx_rings = 0; /* auto */
498 static int bce_tx_rings = 0; /* auto */
500 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
501 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
502 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
503 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
504 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
505 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
506 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
507 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
508 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
509 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
510 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
511 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
512 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
513 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
514 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
516 /****************************************************************************/
517 /* DragonFly device dispatch table. */
518 /****************************************************************************/
519 static device_method_t bce_methods[] = {
520 /* Device interface */
521 DEVMETHOD(device_probe, bce_probe),
522 DEVMETHOD(device_attach, bce_attach),
523 DEVMETHOD(device_detach, bce_detach),
524 DEVMETHOD(device_shutdown, bce_shutdown),
527 DEVMETHOD(bus_print_child, bus_generic_print_child),
528 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
531 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
532 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
533 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
538 static driver_t bce_driver = {
541 sizeof(struct bce_softc)
544 static devclass_t bce_devclass;
546 DECLARE_DUMMY_MODULE(if_bce);
547 MODULE_DEPEND(bce, miibus, 1, 1, 1);
548 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
549 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
551 /****************************************************************************/
552 /* Device probe function. */
554 /* Compares the device to the driver's list of supported devices and */
555 /* reports back to the OS whether this is the right driver for the device. */
558 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
559 /****************************************************************************/
561 bce_probe(device_t dev)
564 uint16_t vid, did, svid, sdid;
566 /* Get the data for the device to be probed. */
567 vid = pci_get_vendor(dev);
568 did = pci_get_device(dev);
569 svid = pci_get_subvendor(dev);
570 sdid = pci_get_subdevice(dev);
572 /* Look through the list of known devices for a match. */
573 for (t = bce_devs; t->bce_name != NULL; ++t) {
574 if (vid == t->bce_vid && did == t->bce_did &&
575 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
576 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
577 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
580 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
582 /* Print out the device identity. */
583 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
585 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
587 device_set_desc_copy(dev, descbuf);
588 kfree(descbuf, M_TEMP);
595 /****************************************************************************/
596 /* PCI Capabilities Probe Function. */
598 /* Walks the PCI capabiites list for the device to find what features are */
603 /****************************************************************************/
605 bce_print_adapter_info(struct bce_softc *sc)
607 device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
609 kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
610 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
613 if (sc->bce_flags & BCE_PCIE_FLAG) {
614 kprintf("Bus (PCIe x%d, ", sc->link_width);
615 switch (sc->link_speed) {
617 kprintf("2.5Gbps); ");
623 kprintf("Unknown link speed); ");
627 kprintf("Bus (PCI%s, %s, %dMHz); ",
628 ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
629 ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
633 /* Firmware version and device features. */
634 kprintf("B/C (%s)", sc->bce_bc_ver);
636 if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
637 (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
639 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
640 kprintf("MFW[%s]", sc->bce_mfw_ver);
641 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
648 /****************************************************************************/
649 /* PCI Capabilities Probe Function. */
651 /* Walks the PCI capabiites list for the device to find what features are */
656 /****************************************************************************/
658 bce_probe_pci_caps(struct bce_softc *sc)
660 device_t dev = sc->bce_dev;
663 if (pci_is_pcix(dev))
664 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
666 ptr = pci_get_pciecap_ptr(dev);
668 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
670 sc->link_speed = link_status & 0xf;
671 sc->link_width = (link_status >> 4) & 0x3f;
672 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
673 sc->bce_flags |= BCE_PCIE_FLAG;
677 /****************************************************************************/
678 /* Device attach function. */
680 /* Allocates device resources, performs secondary chip identification, */
681 /* resets and initializes the hardware, and initializes driver instance */
685 /* 0 on success, positive value on failure. */
686 /****************************************************************************/
688 bce_attach(device_t dev)
690 struct bce_softc *sc = device_get_softc(dev);
691 struct ifnet *ifp = &sc->arpcom.ac_if;
695 struct mii_probe_args mii_args;
696 uintptr_t mii_priv = 0;
699 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
701 lwkt_serialize_init(&sc->main_serialize);
702 for (i = 0; i < BCE_MSIX_MAX; ++i) {
703 struct bce_msix_data *msix = &sc->bce_msix[i];
705 msix->msix_cpuid = -1;
709 pci_enable_busmaster(dev);
711 bce_probe_pci_caps(sc);
713 /* Allocate PCI memory resources. */
715 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
716 RF_ACTIVE | PCI_RF_DENSE);
717 if (sc->bce_res_mem == NULL) {
718 device_printf(dev, "PCI memory allocation failed\n");
721 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
722 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
725 * Configure byte swap and enable indirect register access.
726 * Rely on CPU to do target byte swapping on big endian systems.
727 * Access to registers outside of PCI configurtion space are not
728 * valid until this is done.
730 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
731 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
732 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
734 /* Save ASIC revsion info. */
735 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
737 /* Weed out any non-production controller revisions. */
738 switch (BCE_CHIP_ID(sc)) {
739 case BCE_CHIP_ID_5706_A0:
740 case BCE_CHIP_ID_5706_A1:
741 case BCE_CHIP_ID_5708_A0:
742 case BCE_CHIP_ID_5708_B0:
743 case BCE_CHIP_ID_5709_A0:
744 case BCE_CHIP_ID_5709_B0:
745 case BCE_CHIP_ID_5709_B1:
747 /* 5709C B2 seems to work fine */
748 case BCE_CHIP_ID_5709_B2:
750 device_printf(dev, "Unsupported chip id 0x%08x!\n",
756 mii_priv |= BRGPHY_FLAG_WIRESPEED;
757 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
758 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
759 BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
760 mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
762 mii_priv |= BRGPHY_FLAG_BER_BUG;
766 * Find the base address for shared memory access.
767 * Newer versions of bootcode use a signature and offset
768 * while older versions use a fixed address.
770 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
771 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
772 BCE_SHM_HDR_SIGNATURE_SIG) {
773 /* Multi-port devices use different offsets in shared memory. */
774 sc->bce_shmem_base = REG_RD_IND(sc,
775 BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
777 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
780 /* Fetch the bootcode revision. */
781 val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
782 for (i = 0, j = 0; i < 3; i++) {
786 num = (uint8_t)(val >> (24 - (i * 8)));
787 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
788 if (num >= k || !skip0 || k == 1) {
789 sc->bce_bc_ver[j++] = (num / k) + '0';
794 sc->bce_bc_ver[j++] = '.';
797 /* Check if any management firwmare is running. */
798 val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
799 if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
800 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
802 /* Allow time for firmware to enter the running state. */
803 for (i = 0; i < 30; i++) {
804 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
805 if (val & BCE_CONDITION_MFW_RUN_MASK)
811 /* Check the current bootcode state. */
812 val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
813 BCE_CONDITION_MFW_RUN_MASK;
814 if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
815 val != BCE_CONDITION_MFW_RUN_NONE) {
816 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
818 for (i = 0, j = 0; j < 3; j++) {
819 val = bce_reg_rd_ind(sc, addr + j * 4);
821 memcpy(&sc->bce_mfw_ver[i], &val, 4);
826 /* Get PCI bus information (speed and type). */
827 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
828 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
831 sc->bce_flags |= BCE_PCIX_FLAG;
833 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
834 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
836 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
837 sc->bus_speed_mhz = 133;
840 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
841 sc->bus_speed_mhz = 100;
844 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
845 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
846 sc->bus_speed_mhz = 66;
849 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
850 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
851 sc->bus_speed_mhz = 50;
854 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
855 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
856 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
857 sc->bus_speed_mhz = 33;
861 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
862 sc->bus_speed_mhz = 66;
864 sc->bus_speed_mhz = 33;
867 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
868 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
870 /* Reset the controller. */
871 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
875 /* Initialize the controller. */
876 rc = bce_chipinit(sc);
878 device_printf(dev, "Controller initialization failed!\n");
882 /* Perform NVRAM test. */
883 rc = bce_nvram_test(sc);
885 device_printf(dev, "NVRAM test failed!\n");
889 /* Fetch the permanent Ethernet MAC address. */
890 bce_get_mac_addr(sc);
893 * Trip points control how many BDs
894 * should be ready before generating an
895 * interrupt while ticks control how long
896 * a BD can sit in the chain before
897 * generating an interrupt. Set the default
898 * values for the RX and TX rings.
902 /* Force more frequent interrupts. */
903 sc->bce_tx_quick_cons_trip_int = 1;
904 sc->bce_tx_quick_cons_trip = 1;
905 sc->bce_tx_ticks_int = 0;
906 sc->bce_tx_ticks = 0;
908 sc->bce_rx_quick_cons_trip_int = 1;
909 sc->bce_rx_quick_cons_trip = 1;
910 sc->bce_rx_ticks_int = 0;
911 sc->bce_rx_ticks = 0;
913 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
914 sc->bce_tx_quick_cons_trip = bce_tx_bds;
915 sc->bce_tx_ticks_int = bce_tx_ticks_int;
916 sc->bce_tx_ticks = bce_tx_ticks;
918 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
919 sc->bce_rx_quick_cons_trip = bce_rx_bds;
920 sc->bce_rx_ticks_int = bce_rx_ticks_int;
921 sc->bce_rx_ticks = bce_rx_ticks;
924 /* Update statistics once every second. */
925 sc->bce_stats_ticks = 1000000 & 0xffff00;
927 /* Find the media type for the adapter. */
930 /* Find out RX/TX ring count */
931 bce_setup_ring_cnt(sc);
933 /* Allocate DMA memory resources. */
934 rc = bce_dma_alloc(sc);
936 device_printf(dev, "DMA resource allocation failed!\n");
940 /* Allocate PCI IRQ resources. */
941 rc = bce_alloc_intr(sc);
945 /* Setup serializer */
946 bce_setup_serialize(sc);
948 /* Initialize the ifnet interface. */
950 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
951 ifp->if_ioctl = bce_ioctl;
952 ifp->if_start = bce_start;
953 ifp->if_init = bce_init;
954 ifp->if_serialize = bce_serialize;
955 ifp->if_deserialize = bce_deserialize;
956 ifp->if_tryserialize = bce_tryserialize;
958 ifp->if_serialize_assert = bce_serialize_assert;
961 ifp->if_npoll = bce_npoll;
964 ifp->if_mtu = ETHERMTU;
965 ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
966 ifp->if_capabilities = BCE_IF_CAPABILITIES;
967 if (sc->rx_ring_cnt > 1)
968 ifp->if_capabilities |= IFCAP_RSS;
969 ifp->if_capenable = ifp->if_capabilities;
971 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
972 ifp->if_baudrate = IF_Gbps(2.5);
974 ifp->if_baudrate = IF_Gbps(1);
976 ifp->if_nmbclusters = sc->rx_ring_cnt * USABLE_RX_BD(&sc->rx_rings[0]);
978 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
979 ifq_set_ready(&ifp->if_snd);
980 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
982 if (sc->tx_ring_cnt > 1) {
983 ifp->if_mapsubq = ifq_mapsubq_modulo;
984 ifq_set_subq_divisor(&ifp->if_snd, sc->tx_ring_cnt);
990 mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
991 mii_args.mii_probemask = 1 << sc->bce_phy_addr;
992 mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
993 mii_args.mii_priv = mii_priv;
995 rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
997 device_printf(dev, "PHY probe failed!\n");
1001 /* Attach to the Ethernet interface list. */
1002 ether_ifattach(ifp, sc->eaddr, NULL);
1004 /* Setup TX rings and subqueues */
1005 for (i = 0; i < sc->tx_ring_cnt; ++i) {
1006 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1007 struct bce_tx_ring *txr = &sc->tx_rings[i];
1009 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1010 ifsq_set_priv(ifsq, txr);
1011 ifsq_set_hw_serialize(ifsq, &txr->tx_serialize);
1014 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1017 callout_init_mp(&sc->bce_tick_callout);
1018 callout_init_mp(&sc->bce_pulse_callout);
1019 callout_init_mp(&sc->bce_ckmsi_callout);
1021 rc = bce_setup_intr(sc);
1023 device_printf(dev, "Failed to setup IRQ!\n");
1024 ether_ifdetach(ifp);
1028 /* Set timer CPUID */
1029 bce_set_timer_cpuid(sc, FALSE);
1031 /* Add the supported sysctls to the kernel. */
1032 bce_add_sysctls(sc);
1035 * The chip reset earlier notified the bootcode that
1036 * a driver is present. We now need to start our pulse
1037 * routine so that the bootcode is reminded that we're
1042 /* Get the firmware running so IPMI still works */
1046 bce_print_adapter_info(sc);
1054 /****************************************************************************/
1055 /* Device detach function. */
1057 /* Stops the controller, resets the controller, and releases resources. */
1060 /* 0 on success, positive value on failure. */
1061 /****************************************************************************/
1063 bce_detach(device_t dev)
1065 struct bce_softc *sc = device_get_softc(dev);
1067 if (device_is_attached(dev)) {
1068 struct ifnet *ifp = &sc->arpcom.ac_if;
1071 ifnet_serialize_all(ifp);
1073 /* Stop and reset the controller. */
1074 callout_stop(&sc->bce_pulse_callout);
1076 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1077 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1079 msg = BCE_DRV_MSG_CODE_UNLOAD;
1082 bce_teardown_intr(sc);
1084 ifnet_deserialize_all(ifp);
1086 ether_ifdetach(ifp);
1089 /* If we have a child device on the MII bus remove it too. */
1091 device_delete_child(dev, sc->bce_miibus);
1092 bus_generic_detach(dev);
1096 if (sc->bce_res_mem != NULL) {
1097 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1103 if (sc->serializes != NULL)
1104 kfree(sc->serializes, M_DEVBUF);
1106 if (sc->tx_rmap != NULL)
1107 if_ringmap_free(sc->tx_rmap);
1108 if (sc->rx_rmap != NULL)
1109 if_ringmap_free(sc->rx_rmap);
1114 /****************************************************************************/
1115 /* Device shutdown function. */
1117 /* Stops and resets the controller. */
1121 /****************************************************************************/
1123 bce_shutdown(device_t dev)
1125 struct bce_softc *sc = device_get_softc(dev);
1126 struct ifnet *ifp = &sc->arpcom.ac_if;
1129 ifnet_serialize_all(ifp);
1132 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1133 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1135 msg = BCE_DRV_MSG_CODE_UNLOAD;
1138 ifnet_deserialize_all(ifp);
1141 /****************************************************************************/
1142 /* Indirect register read. */
1144 /* Reads NetXtreme II registers using an index/data register pair in PCI */
1145 /* configuration space. Using this mechanism avoids issues with posted */
1146 /* reads but is much slower than memory-mapped I/O. */
1149 /* The value of the register. */
1150 /****************************************************************************/
1152 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1154 device_t dev = sc->bce_dev;
1156 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1157 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1160 /****************************************************************************/
1161 /* Indirect register write. */
1163 /* Writes NetXtreme II registers using an index/data register pair in PCI */
1164 /* configuration space. Using this mechanism avoids issues with posted */
1165 /* writes but is muchh slower than memory-mapped I/O. */
1169 /****************************************************************************/
1171 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1173 device_t dev = sc->bce_dev;
1175 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1176 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1179 /****************************************************************************/
1180 /* Shared memory write. */
1182 /* Writes NetXtreme II shared memory region. */
1186 /****************************************************************************/
1188 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1190 bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1193 /****************************************************************************/
1194 /* Shared memory read. */
1196 /* Reads NetXtreme II shared memory region. */
1199 /* The 32 bit value read. */
1200 /****************************************************************************/
1202 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1204 return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1207 /****************************************************************************/
1208 /* Context memory write. */
1210 /* The NetXtreme II controller uses context memory to track connection */
1211 /* information for L2 and higher network protocols. */
1215 /****************************************************************************/
1217 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1220 uint32_t idx, offset = ctx_offset + cid_addr;
1221 uint32_t val, retry_cnt = 5;
1223 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1224 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1225 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1226 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1228 for (idx = 0; idx < retry_cnt; idx++) {
1229 val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1230 if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1235 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1236 device_printf(sc->bce_dev,
1237 "Unable to write CTX memory: "
1238 "cid_addr = 0x%08X, offset = 0x%08X!\n",
1239 cid_addr, ctx_offset);
1242 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1243 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1247 /****************************************************************************/
1248 /* PHY register read. */
1250 /* Implements register reads on the MII bus. */
1253 /* The value of the register. */
1254 /****************************************************************************/
1256 bce_miibus_read_reg(device_t dev, int phy, int reg)
1258 struct bce_softc *sc = device_get_softc(dev);
1262 /* Make sure we are accessing the correct PHY address. */
1263 KASSERT(phy == sc->bce_phy_addr,
1264 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1266 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1267 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1268 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1270 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1271 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1276 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1277 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1278 BCE_EMAC_MDIO_COMM_START_BUSY;
1279 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1281 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1284 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1285 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1288 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1289 val &= BCE_EMAC_MDIO_COMM_DATA;
1294 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1295 if_printf(&sc->arpcom.ac_if,
1296 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1300 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1303 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1304 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1305 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1307 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1308 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1312 return (val & 0xffff);
1315 /****************************************************************************/
1316 /* PHY register write. */
1318 /* Implements register writes on the MII bus. */
1321 /* The value of the register. */
1322 /****************************************************************************/
1324 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1326 struct bce_softc *sc = device_get_softc(dev);
1330 /* Make sure we are accessing the correct PHY address. */
1331 KASSERT(phy == sc->bce_phy_addr,
1332 ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1334 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1335 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1336 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1338 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1339 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1344 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1345 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1346 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1347 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1349 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1352 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1353 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1359 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1360 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1362 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1363 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1364 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1366 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1367 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1374 /****************************************************************************/
1375 /* MII bus status change. */
1377 /* Called by the MII bus driver when the PHY establishes link to set the */
1378 /* MAC interface registers. */
1382 /****************************************************************************/
1384 bce_miibus_statchg(device_t dev)
1386 struct bce_softc *sc = device_get_softc(dev);
1387 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1389 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1392 * Set MII or GMII interface based on the speed negotiated
1395 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1396 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1397 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1399 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1403 * Set half or full duplex based on the duplicity negotiated
1406 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1407 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1409 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1413 /****************************************************************************/
1414 /* Acquire NVRAM lock. */
1416 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1417 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1418 /* for use by the driver. */
1421 /* 0 on success, positive value on failure. */
1422 /****************************************************************************/
1424 bce_acquire_nvram_lock(struct bce_softc *sc)
1429 /* Request access to the flash interface. */
1430 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1431 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1432 val = REG_RD(sc, BCE_NVM_SW_ARB);
1433 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1439 if (j >= NVRAM_TIMEOUT_COUNT) {
1445 /****************************************************************************/
1446 /* Release NVRAM lock. */
1448 /* When the caller is finished accessing NVRAM the lock must be released. */
1449 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1450 /* for use by the driver. */
1453 /* 0 on success, positive value on failure. */
1454 /****************************************************************************/
1456 bce_release_nvram_lock(struct bce_softc *sc)
1462 * Relinquish nvram interface.
1464 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1466 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1467 val = REG_RD(sc, BCE_NVM_SW_ARB);
1468 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1474 if (j >= NVRAM_TIMEOUT_COUNT) {
1480 /****************************************************************************/
1481 /* Enable NVRAM access. */
1483 /* Before accessing NVRAM for read or write operations the caller must */
1484 /* enabled NVRAM access. */
1488 /****************************************************************************/
1490 bce_enable_nvram_access(struct bce_softc *sc)
1494 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1495 /* Enable both bits, even on read. */
1496 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1497 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1500 /****************************************************************************/
1501 /* Disable NVRAM access. */
1503 /* When the caller is finished accessing NVRAM access must be disabled. */
1507 /****************************************************************************/
1509 bce_disable_nvram_access(struct bce_softc *sc)
1513 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1515 /* Disable both bits, even after read. */
1516 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1517 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1520 /****************************************************************************/
1521 /* Read a dword (32 bits) from NVRAM. */
1523 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1524 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1527 /* 0 on success and the 32 bit value read, positive value on failure. */
1528 /****************************************************************************/
1530 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1536 /* Build the command word. */
1537 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1539 /* Calculate the offset for buffered flash. */
1540 if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1541 offset = ((offset / sc->bce_flash_info->page_size) <<
1542 sc->bce_flash_info->page_bits) +
1543 (offset % sc->bce_flash_info->page_size);
1547 * Clear the DONE bit separately, set the address to read,
1548 * and issue the read.
1550 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1551 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1552 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1554 /* Wait for completion. */
1555 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1560 val = REG_RD(sc, BCE_NVM_COMMAND);
1561 if (val & BCE_NVM_COMMAND_DONE) {
1562 val = REG_RD(sc, BCE_NVM_READ);
1565 memcpy(ret_val, &val, 4);
1570 /* Check for errors. */
1571 if (i >= NVRAM_TIMEOUT_COUNT) {
1572 if_printf(&sc->arpcom.ac_if,
1573 "Timeout error reading NVRAM at offset 0x%08X!\n",
1580 /****************************************************************************/
1581 /* Initialize NVRAM access. */
1583 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1584 /* access that device. */
1587 /* 0 on success, positive value on failure. */
1588 /****************************************************************************/
1590 bce_init_nvram(struct bce_softc *sc)
1593 int j, entry_count, rc = 0;
1594 const struct flash_spec *flash;
1596 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1597 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1598 sc->bce_flash_info = &flash_5709;
1599 goto bce_init_nvram_get_flash_size;
1602 /* Determine the selected interface. */
1603 val = REG_RD(sc, BCE_NVM_CFG1);
1605 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1608 * Flash reconfiguration is required to support additional
1609 * NVRAM devices not directly supported in hardware.
1610 * Check if the flash interface was reconfigured
1614 if (val & 0x40000000) {
1615 /* Flash interface reconfigured by bootcode. */
1616 for (j = 0, flash = flash_table; j < entry_count;
1618 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1619 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1620 sc->bce_flash_info = flash;
1625 /* Flash interface not yet reconfigured. */
1628 if (val & (1 << 23))
1629 mask = FLASH_BACKUP_STRAP_MASK;
1631 mask = FLASH_STRAP_MASK;
1633 /* Look for the matching NVRAM device configuration data. */
1634 for (j = 0, flash = flash_table; j < entry_count;
1636 /* Check if the device matches any of the known devices. */
1637 if ((val & mask) == (flash->strapping & mask)) {
1638 /* Found a device match. */
1639 sc->bce_flash_info = flash;
1641 /* Request access to the flash interface. */
1642 rc = bce_acquire_nvram_lock(sc);
1646 /* Reconfigure the flash interface. */
1647 bce_enable_nvram_access(sc);
1648 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1649 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1650 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1651 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1652 bce_disable_nvram_access(sc);
1653 bce_release_nvram_lock(sc);
1659 /* Check if a matching device was found. */
1660 if (j == entry_count) {
1661 sc->bce_flash_info = NULL;
1662 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1666 bce_init_nvram_get_flash_size:
1667 /* Write the flash config data to the shared memory interface. */
1668 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1669 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1671 sc->bce_flash_size = val;
1673 sc->bce_flash_size = sc->bce_flash_info->total_size;
1678 /****************************************************************************/
1679 /* Read an arbitrary range of data from NVRAM. */
1681 /* Prepares the NVRAM interface for access and reads the requested data */
1682 /* into the supplied buffer. */
1685 /* 0 on success and the data read, positive value on failure. */
1686 /****************************************************************************/
1688 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1691 uint32_t cmd_flags, offset32, len32, extra;
1697 /* Request access to the flash interface. */
1698 rc = bce_acquire_nvram_lock(sc);
1702 /* Enable access to flash interface */
1703 bce_enable_nvram_access(sc);
1711 /* XXX should we release nvram lock if read_dword() fails? */
1717 pre_len = 4 - (offset & 3);
1719 if (pre_len >= len32) {
1721 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1723 cmd_flags = BCE_NVM_COMMAND_FIRST;
1726 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1730 memcpy(ret_buf, buf + (offset & 3), pre_len);
1738 extra = 4 - (len32 & 3);
1739 len32 = (len32 + 4) & ~3;
1746 cmd_flags = BCE_NVM_COMMAND_LAST;
1748 cmd_flags = BCE_NVM_COMMAND_FIRST |
1749 BCE_NVM_COMMAND_LAST;
1751 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1753 memcpy(ret_buf, buf, 4 - extra);
1754 } else if (len32 > 0) {
1757 /* Read the first word. */
1761 cmd_flags = BCE_NVM_COMMAND_FIRST;
1763 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1765 /* Advance to the next dword. */
1770 while (len32 > 4 && rc == 0) {
1771 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1773 /* Advance to the next dword. */
1780 goto bce_nvram_read_locked_exit;
1782 cmd_flags = BCE_NVM_COMMAND_LAST;
1783 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1785 memcpy(ret_buf, buf, 4 - extra);
1788 bce_nvram_read_locked_exit:
1789 /* Disable access to flash interface and release the lock. */
1790 bce_disable_nvram_access(sc);
1791 bce_release_nvram_lock(sc);
1796 /****************************************************************************/
1797 /* Verifies that NVRAM is accessible and contains valid data. */
1799 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1803 /* 0 on success, positive value on failure. */
1804 /****************************************************************************/
1806 bce_nvram_test(struct bce_softc *sc)
1808 uint32_t buf[BCE_NVRAM_SIZE / 4];
1809 uint32_t magic, csum;
1810 uint8_t *data = (uint8_t *)buf;
1814 * Check that the device NVRAM is valid by reading
1815 * the magic value at offset 0.
1817 rc = bce_nvram_read(sc, 0, data, 4);
1821 magic = be32toh(buf[0]);
1822 if (magic != BCE_NVRAM_MAGIC) {
1823 if_printf(&sc->arpcom.ac_if,
1824 "Invalid NVRAM magic value! Expected: 0x%08X, "
1825 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1830 * Verify that the device NVRAM includes valid
1831 * configuration data.
1833 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1837 csum = ether_crc32_le(data, 0x100);
1838 if (csum != BCE_CRC32_RESIDUAL) {
1839 if_printf(&sc->arpcom.ac_if,
1840 "Invalid Manufacturing Information NVRAM CRC! "
1841 "Expected: 0x%08X, Found: 0x%08X\n",
1842 BCE_CRC32_RESIDUAL, csum);
1846 csum = ether_crc32_le(data + 0x100, 0x100);
1847 if (csum != BCE_CRC32_RESIDUAL) {
1848 if_printf(&sc->arpcom.ac_if,
1849 "Invalid Feature Configuration Information "
1850 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1851 BCE_CRC32_RESIDUAL, csum);
1857 /****************************************************************************/
1858 /* Identifies the current media type of the controller and sets the PHY */
1863 /****************************************************************************/
1865 bce_get_media(struct bce_softc *sc)
1869 sc->bce_phy_addr = 1;
1871 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1872 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1873 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1874 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1878 * The BCM5709S is software configurable
1879 * for Copper or SerDes operation.
1881 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1883 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1884 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1888 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1889 strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1892 (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1895 if (pci_get_function(sc->bce_dev) == 0) {
1900 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1908 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1912 } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1913 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1916 if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1917 sc->bce_flags |= BCE_NO_WOL_FLAG;
1918 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1919 sc->bce_phy_addr = 2;
1920 val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1921 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1922 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1924 } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1925 (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1926 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1931 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1935 /* Destroy the TX buffer descriptor DMA stuffs. */
1936 if (txr->tx_bd_chain_tag != NULL) {
1937 for (i = 0; i < txr->tx_pages; i++) {
1938 if (txr->tx_bd_chain[i] != NULL) {
1939 bus_dmamap_unload(txr->tx_bd_chain_tag,
1940 txr->tx_bd_chain_map[i]);
1941 bus_dmamem_free(txr->tx_bd_chain_tag,
1942 txr->tx_bd_chain[i],
1943 txr->tx_bd_chain_map[i]);
1946 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1949 /* Destroy the TX mbuf DMA stuffs. */
1950 if (txr->tx_mbuf_tag != NULL) {
1951 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1952 /* Must have been unloaded in bce_stop() */
1953 KKASSERT(txr->tx_bufs[i].tx_mbuf_ptr == NULL);
1954 bus_dmamap_destroy(txr->tx_mbuf_tag,
1955 txr->tx_bufs[i].tx_mbuf_map);
1957 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1960 if (txr->tx_bd_chain_map != NULL)
1961 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1962 if (txr->tx_bd_chain != NULL)
1963 kfree(txr->tx_bd_chain, M_DEVBUF);
1964 if (txr->tx_bd_chain_paddr != NULL)
1965 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1967 if (txr->tx_bufs != NULL)
1968 kfree(txr->tx_bufs, M_DEVBUF);
1972 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1976 /* Destroy the RX buffer descriptor DMA stuffs. */
1977 if (rxr->rx_bd_chain_tag != NULL) {
1978 for (i = 0; i < rxr->rx_pages; i++) {
1979 if (rxr->rx_bd_chain[i] != NULL) {
1980 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1981 rxr->rx_bd_chain_map[i]);
1982 bus_dmamem_free(rxr->rx_bd_chain_tag,
1983 rxr->rx_bd_chain[i],
1984 rxr->rx_bd_chain_map[i]);
1987 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1990 /* Destroy the RX mbuf DMA stuffs. */
1991 if (rxr->rx_mbuf_tag != NULL) {
1992 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1993 /* Must have been unloaded in bce_stop() */
1994 KKASSERT(rxr->rx_bufs[i].rx_mbuf_ptr == NULL);
1995 bus_dmamap_destroy(rxr->rx_mbuf_tag,
1996 rxr->rx_bufs[i].rx_mbuf_map);
1998 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1999 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2002 if (rxr->rx_bd_chain_map != NULL)
2003 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2004 if (rxr->rx_bd_chain != NULL)
2005 kfree(rxr->rx_bd_chain, M_DEVBUF);
2006 if (rxr->rx_bd_chain_paddr != NULL)
2007 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2009 if (rxr->rx_bufs != NULL)
2010 kfree(rxr->rx_bufs, M_DEVBUF);
2013 /****************************************************************************/
2014 /* Free any DMA memory owned by the driver. */
2016 /* Scans through each data structre that requires DMA memory and frees */
2017 /* the memory if allocated. */
2021 /****************************************************************************/
2023 bce_dma_free(struct bce_softc *sc)
2027 /* Destroy the status block. */
2028 if (sc->status_tag != NULL) {
2029 if (sc->status_block != NULL) {
2030 bus_dmamap_unload(sc->status_tag, sc->status_map);
2031 bus_dmamem_free(sc->status_tag, sc->status_block,
2034 bus_dma_tag_destroy(sc->status_tag);
2037 /* Destroy the statistics block. */
2038 if (sc->stats_tag != NULL) {
2039 if (sc->stats_block != NULL) {
2040 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2041 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2044 bus_dma_tag_destroy(sc->stats_tag);
2047 /* Destroy the CTX DMA stuffs. */
2048 if (sc->ctx_tag != NULL) {
2049 for (i = 0; i < sc->ctx_pages; i++) {
2050 if (sc->ctx_block[i] != NULL) {
2051 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2052 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2056 bus_dma_tag_destroy(sc->ctx_tag);
2060 if (sc->tx_rings != NULL) {
2061 for (i = 0; i < sc->tx_ring_cnt; ++i)
2062 bce_destroy_tx_ring(&sc->tx_rings[i]);
2063 kfree(sc->tx_rings, M_DEVBUF);
2067 if (sc->rx_rings != NULL) {
2068 for (i = 0; i < sc->rx_ring_cnt; ++i)
2069 bce_destroy_rx_ring(&sc->rx_rings[i]);
2070 kfree(sc->rx_rings, M_DEVBUF);
2073 /* Destroy the parent tag */
2074 if (sc->parent_tag != NULL)
2075 bus_dma_tag_destroy(sc->parent_tag);
2078 /****************************************************************************/
2079 /* Get DMA memory from the OS. */
2081 /* Validates that the OS has provided DMA buffers in response to a */
2082 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2083 /* When the callback is used the OS will return 0 for the mapping function */
2084 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2085 /* failures back to the caller. */
2089 /****************************************************************************/
2091 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2093 bus_addr_t *busaddr = arg;
2095 /* Check for an error and signal the caller that an error occurred. */
2099 KASSERT(nseg == 1, ("only one segment is allowed"));
2100 *busaddr = segs->ds_addr;
2104 bce_create_tx_ring(struct bce_tx_ring *txr)
2108 lwkt_serialize_init(&txr->tx_serialize);
2109 txr->tx_wreg = bce_tx_wreg;
2111 pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2112 if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2113 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2114 pages = TX_PAGES_DEFAULT;
2116 txr->tx_pages = pages;
2118 txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2119 M_DEVBUF, M_WAITOK | M_ZERO);
2120 txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2121 M_DEVBUF, M_WAITOK | M_ZERO);
2122 txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2123 M_DEVBUF, M_WAITOK | M_ZERO);
2125 txr->tx_bufs = kmalloc_cachealign(
2126 sizeof(struct bce_tx_buf) * TOTAL_TX_BD(txr),
2127 M_DEVBUF, M_WAITOK | M_ZERO);
2130 * Create a DMA tag for the TX buffer descriptor chain,
2131 * allocate and clear the memory, and fetch the
2132 * physical address of the block.
2134 rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2135 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2136 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2137 0, &txr->tx_bd_chain_tag);
2139 device_printf(txr->sc->bce_dev, "Could not allocate "
2140 "TX descriptor chain DMA tag!\n");
2144 for (i = 0; i < txr->tx_pages; i++) {
2147 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2148 (void **)&txr->tx_bd_chain[i],
2149 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2150 &txr->tx_bd_chain_map[i]);
2152 device_printf(txr->sc->bce_dev,
2153 "Could not allocate %dth TX descriptor "
2154 "chain DMA memory!\n", i);
2158 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2159 txr->tx_bd_chain_map[i],
2160 txr->tx_bd_chain[i],
2161 BCE_TX_CHAIN_PAGE_SZ,
2162 bce_dma_map_addr, &busaddr,
2165 if (rc == EINPROGRESS) {
2166 panic("%s coherent memory loading "
2167 "is still in progress!",
2168 txr->sc->arpcom.ac_if.if_xname);
2170 device_printf(txr->sc->bce_dev, "Could not map %dth "
2171 "TX descriptor chain DMA memory!\n", i);
2172 bus_dmamem_free(txr->tx_bd_chain_tag,
2173 txr->tx_bd_chain[i],
2174 txr->tx_bd_chain_map[i]);
2175 txr->tx_bd_chain[i] = NULL;
2179 txr->tx_bd_chain_paddr[i] = busaddr;
2182 /* Create a DMA tag for TX mbufs. */
2183 rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2184 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2185 IP_MAXPACKET + sizeof(struct ether_vlan_header),
2186 BCE_MAX_SEGMENTS, PAGE_SIZE,
2187 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2190 device_printf(txr->sc->bce_dev,
2191 "Could not allocate TX mbuf DMA tag!\n");
2195 /* Create DMA maps for the TX mbufs clusters. */
2196 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2197 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2198 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2199 &txr->tx_bufs[i].tx_mbuf_map);
2203 for (j = 0; j < i; ++j) {
2204 bus_dmamap_destroy(txr->tx_mbuf_tag,
2205 txr->tx_bufs[j].tx_mbuf_map);
2207 bus_dma_tag_destroy(txr->tx_mbuf_tag);
2208 txr->tx_mbuf_tag = NULL;
2210 device_printf(txr->sc->bce_dev, "Unable to create "
2211 "%dth TX mbuf DMA map!\n", i);
2219 bce_create_rx_ring(struct bce_rx_ring *rxr)
2223 lwkt_serialize_init(&rxr->rx_serialize);
2225 pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2226 if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2227 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2228 pages = RX_PAGES_DEFAULT;
2230 rxr->rx_pages = pages;
2232 rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2233 M_DEVBUF, M_WAITOK | M_ZERO);
2234 rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2235 M_DEVBUF, M_WAITOK | M_ZERO);
2236 rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2237 M_DEVBUF, M_WAITOK | M_ZERO);
2239 rxr->rx_bufs = kmalloc_cachealign(
2240 sizeof(struct bce_rx_buf) * TOTAL_RX_BD(rxr),
2241 M_DEVBUF, M_WAITOK | M_ZERO);
2244 * Create a DMA tag for the RX buffer descriptor chain,
2245 * allocate and clear the memory, and fetch the physical
2246 * address of the blocks.
2248 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2249 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2250 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2251 0, &rxr->rx_bd_chain_tag);
2253 device_printf(rxr->sc->bce_dev, "Could not allocate "
2254 "RX descriptor chain DMA tag!\n");
2258 for (i = 0; i < rxr->rx_pages; i++) {
2261 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2262 (void **)&rxr->rx_bd_chain[i],
2263 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2264 &rxr->rx_bd_chain_map[i]);
2266 device_printf(rxr->sc->bce_dev,
2267 "Could not allocate %dth RX descriptor "
2268 "chain DMA memory!\n", i);
2272 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2273 rxr->rx_bd_chain_map[i],
2274 rxr->rx_bd_chain[i],
2275 BCE_RX_CHAIN_PAGE_SZ,
2276 bce_dma_map_addr, &busaddr,
2279 if (rc == EINPROGRESS) {
2280 panic("%s coherent memory loading "
2281 "is still in progress!",
2282 rxr->sc->arpcom.ac_if.if_xname);
2284 device_printf(rxr->sc->bce_dev,
2285 "Could not map %dth RX descriptor "
2286 "chain DMA memory!\n", i);
2287 bus_dmamem_free(rxr->rx_bd_chain_tag,
2288 rxr->rx_bd_chain[i],
2289 rxr->rx_bd_chain_map[i]);
2290 rxr->rx_bd_chain[i] = NULL;
2294 rxr->rx_bd_chain_paddr[i] = busaddr;
2297 /* Create a DMA tag for RX mbufs. */
2298 rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2299 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2300 MCLBYTES, 1, MCLBYTES,
2301 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2304 device_printf(rxr->sc->bce_dev,
2305 "Could not allocate RX mbuf DMA tag!\n");
2309 /* Create tmp DMA map for RX mbuf clusters. */
2310 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2311 &rxr->rx_mbuf_tmpmap);
2313 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2314 rxr->rx_mbuf_tag = NULL;
2316 device_printf(rxr->sc->bce_dev,
2317 "Could not create RX mbuf tmp DMA map!\n");
2321 /* Create DMA maps for the RX mbuf clusters. */
2322 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2323 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2324 &rxr->rx_bufs[i].rx_mbuf_map);
2328 for (j = 0; j < i; ++j) {
2329 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2330 rxr->rx_bufs[j].rx_mbuf_map);
2332 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2333 rxr->rx_mbuf_tag = NULL;
2335 device_printf(rxr->sc->bce_dev, "Unable to create "
2336 "%dth RX mbuf DMA map!\n", i);
2343 /****************************************************************************/
2344 /* Allocate any DMA memory needed by the driver. */
2346 /* Allocates DMA memory needed for the various global structures needed by */
2349 /* Memory alignment requirements: */
2350 /* -----------------+----------+----------+----------+----------+ */
2351 /* Data Structure | 5706 | 5708 | 5709 | 5716 | */
2352 /* -----------------+----------+----------+----------+----------+ */
2353 /* Status Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2354 /* Statistics Block | 8 bytes | 8 bytes | 16 bytes | 16 bytes | */
2355 /* RX Buffers | 16 bytes | 16 bytes | 16 bytes | 16 bytes | */
2356 /* PG Buffers | none | none | none | none | */
2357 /* TX Buffers | none | none | none | none | */
2358 /* Chain Pages(1) | 4KiB | 4KiB | 4KiB | 4KiB | */
2359 /* Context Pages(1) | N/A | N/A | 4KiB | 4KiB | */
2360 /* -----------------+----------+----------+----------+----------+ */
2362 /* (1) Must align with CPU page size (BCM_PAGE_SZIE). */
2365 /* 0 for success, positive value for failure. */
2366 /****************************************************************************/
2368 bce_dma_alloc(struct bce_softc *sc)
2370 struct ifnet *ifp = &sc->arpcom.ac_if;
2372 bus_addr_t busaddr, max_busaddr;
2373 bus_size_t status_align, stats_align, status_size;
2376 * The embedded PCIe to PCI-X bridge (EPB)
2377 * in the 5708 cannot address memory above
2378 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
2380 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2381 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2383 max_busaddr = BUS_SPACE_MAXADDR;
2386 * BCM5709 and BCM5716 uses host memory as cache for context memory.
2388 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2389 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2390 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2391 if (sc->ctx_pages == 0)
2393 if (sc->ctx_pages > BCE_CTX_PAGES) {
2394 device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2406 * Each MSI-X vector needs a status block; each status block
2407 * consumes 128bytes and is 128bytes aligned.
2409 if (sc->rx_ring_cnt > 1) {
2410 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2411 status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2413 status_size = BCE_STATUS_BLK_SZ;
2417 * Allocate the parent bus DMA tag appropriate for PCI.
2419 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2420 max_busaddr, BUS_SPACE_MAXADDR,
2422 BUS_SPACE_MAXSIZE_32BIT, 0,
2423 BUS_SPACE_MAXSIZE_32BIT,
2424 0, &sc->parent_tag);
2426 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2431 * Allocate status block.
2433 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2434 status_align, status_size,
2435 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2436 &sc->status_tag, &sc->status_map,
2437 &sc->status_block_paddr);
2438 if (sc->status_block == NULL) {
2439 if_printf(ifp, "Could not allocate status block!\n");
2444 * Allocate statistics block.
2446 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2447 stats_align, BCE_STATS_BLK_SZ,
2448 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2449 &sc->stats_tag, &sc->stats_map,
2450 &sc->stats_block_paddr);
2451 if (sc->stats_block == NULL) {
2452 if_printf(ifp, "Could not allocate statistics block!\n");
2457 * Allocate context block, if needed
2459 if (sc->ctx_pages != 0) {
2460 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2461 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2463 BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2466 if_printf(ifp, "Could not allocate "
2467 "context block DMA tag!\n");
2471 for (i = 0; i < sc->ctx_pages; i++) {
2472 rc = bus_dmamem_alloc(sc->ctx_tag,
2473 (void **)&sc->ctx_block[i],
2474 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2478 if_printf(ifp, "Could not allocate %dth context "
2479 "DMA memory!\n", i);
2483 rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2484 sc->ctx_block[i], BCM_PAGE_SIZE,
2485 bce_dma_map_addr, &busaddr,
2488 if (rc == EINPROGRESS) {
2489 panic("%s coherent memory loading "
2490 "is still in progress!", ifp->if_xname);
2492 if_printf(ifp, "Could not map %dth context "
2493 "DMA memory!\n", i);
2494 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2496 sc->ctx_block[i] = NULL;
2499 sc->ctx_paddr[i] = busaddr;
2503 sc->tx_rings = kmalloc_cachealign(
2504 sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2506 for (i = 0; i < sc->tx_ring_cnt; ++i) {
2507 sc->tx_rings[i].sc = sc;
2509 sc->tx_rings[i].tx_cid = TX_CID;
2510 sc->tx_rings[i].tx_hw_cons =
2511 &sc->status_block->status_tx_quick_consumer_index0;
2513 struct status_block_msix *sblk =
2514 (struct status_block_msix *)
2515 (((uint8_t *)(sc->status_block)) +
2516 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2518 sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2519 sc->tx_rings[i].tx_hw_cons =
2520 &sblk->status_tx_quick_consumer_index;
2523 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2525 device_printf(sc->bce_dev,
2526 "can't create %dth tx ring\n", i);
2531 sc->rx_rings = kmalloc_cachealign(
2532 sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2534 for (i = 0; i < sc->rx_ring_cnt; ++i) {
2535 sc->rx_rings[i].sc = sc;
2536 sc->rx_rings[i].idx = i;
2538 sc->rx_rings[i].rx_cid = RX_CID;
2539 sc->rx_rings[i].rx_hw_cons =
2540 &sc->status_block->status_rx_quick_consumer_index0;
2541 sc->rx_rings[i].hw_status_idx =
2542 &sc->status_block->status_idx;
2544 struct status_block_msix *sblk =
2545 (struct status_block_msix *)
2546 (((uint8_t *)(sc->status_block)) +
2547 (i * BCE_STATUS_BLK_MSIX_ALIGN));
2549 sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2550 sc->rx_rings[i].rx_hw_cons =
2551 &sblk->status_rx_quick_consumer_index;
2552 sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2555 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2557 device_printf(sc->bce_dev,
2558 "can't create %dth rx ring\n", i);
2566 /****************************************************************************/
2567 /* Firmware synchronization. */
2569 /* Before performing certain events such as a chip reset, synchronize with */
2570 /* the firmware first. */
2573 /* 0 for success, positive value for failure. */
2574 /****************************************************************************/
2576 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2581 /* Don't waste any time if we've timed out before. */
2582 if (sc->bce_fw_timed_out)
2585 /* Increment the message sequence number. */
2586 sc->bce_fw_wr_seq++;
2587 msg_data |= sc->bce_fw_wr_seq;
2589 /* Send the message to the bootcode driver mailbox. */
2590 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2592 /* Wait for the bootcode to acknowledge the message. */
2593 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2594 /* Check for a response in the bootcode firmware mailbox. */
2595 val = bce_shmem_rd(sc, BCE_FW_MB);
2596 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2601 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2602 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2603 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2604 if_printf(&sc->arpcom.ac_if,
2605 "Firmware synchronization timeout! "
2606 "msg_data = 0x%08X\n", msg_data);
2608 msg_data &= ~BCE_DRV_MSG_CODE;
2609 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2611 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2613 sc->bce_fw_timed_out = 1;
2619 /****************************************************************************/
2620 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2624 /****************************************************************************/
2626 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2627 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2632 for (i = 0; i < rv2p_code_len; i += 8) {
2633 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2635 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2638 if (rv2p_proc == RV2P_PROC1) {
2639 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2640 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2642 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2643 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2647 /* Reset the processor, un-stall is done later. */
2648 if (rv2p_proc == RV2P_PROC1)
2649 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2651 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2654 /****************************************************************************/
2655 /* Load RISC processor firmware. */
2657 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2658 /* associated with a particular processor. */
2662 /****************************************************************************/
2664 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2670 bce_halt_cpu(sc, cpu_reg);
2672 /* Load the Text area. */
2673 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2675 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2676 REG_WR_IND(sc, offset, fw->text[j]);
2679 /* Load the Data area. */
2680 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2682 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2683 REG_WR_IND(sc, offset, fw->data[j]);
2686 /* Load the SBSS area. */
2687 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2689 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2690 REG_WR_IND(sc, offset, fw->sbss[j]);
2693 /* Load the BSS area. */
2694 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2696 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2697 REG_WR_IND(sc, offset, fw->bss[j]);
2700 /* Load the Read-Only area. */
2701 offset = cpu_reg->spad_base +
2702 (fw->rodata_addr - cpu_reg->mips_view_base);
2704 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2705 REG_WR_IND(sc, offset, fw->rodata[j]);
2708 /* Clear the pre-fetch instruction and set the FW start address. */
2709 REG_WR_IND(sc, cpu_reg->inst, 0);
2710 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2713 /****************************************************************************/
2714 /* Starts the RISC processor. */
2716 /* Assumes the CPU starting address has already been set. */
2720 /****************************************************************************/
2722 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2726 /* Start the CPU. */
2727 val = REG_RD_IND(sc, cpu_reg->mode);
2728 val &= ~cpu_reg->mode_value_halt;
2729 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2730 REG_WR_IND(sc, cpu_reg->mode, val);
2733 /****************************************************************************/
2734 /* Halts the RISC processor. */
2738 /****************************************************************************/
2740 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2745 val = REG_RD_IND(sc, cpu_reg->mode);
2746 val |= cpu_reg->mode_value_halt;
2747 REG_WR_IND(sc, cpu_reg->mode, val);
2748 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2751 /****************************************************************************/
2752 /* Start the RX CPU. */
2756 /****************************************************************************/
2758 bce_start_rxp_cpu(struct bce_softc *sc)
2760 struct cpu_reg cpu_reg;
2762 cpu_reg.mode = BCE_RXP_CPU_MODE;
2763 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2764 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2765 cpu_reg.state = BCE_RXP_CPU_STATE;
2766 cpu_reg.state_value_clear = 0xffffff;
2767 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2768 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2769 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2770 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2771 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2772 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2773 cpu_reg.mips_view_base = 0x8000000;
2775 bce_start_cpu(sc, &cpu_reg);
2778 /****************************************************************************/
2779 /* Initialize the RX CPU. */
2783 /****************************************************************************/
2785 bce_init_rxp_cpu(struct bce_softc *sc)
2787 struct cpu_reg cpu_reg;
2790 cpu_reg.mode = BCE_RXP_CPU_MODE;
2791 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2792 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2793 cpu_reg.state = BCE_RXP_CPU_STATE;
2794 cpu_reg.state_value_clear = 0xffffff;
2795 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2796 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2797 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2798 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2799 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2800 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2801 cpu_reg.mips_view_base = 0x8000000;
2803 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2804 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2805 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2806 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2807 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2808 fw.start_addr = bce_RXP_b09FwStartAddr;
2810 fw.text_addr = bce_RXP_b09FwTextAddr;
2811 fw.text_len = bce_RXP_b09FwTextLen;
2813 fw.text = bce_RXP_b09FwText;
2815 fw.data_addr = bce_RXP_b09FwDataAddr;
2816 fw.data_len = bce_RXP_b09FwDataLen;
2818 fw.data = bce_RXP_b09FwData;
2820 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2821 fw.sbss_len = bce_RXP_b09FwSbssLen;
2823 fw.sbss = bce_RXP_b09FwSbss;
2825 fw.bss_addr = bce_RXP_b09FwBssAddr;
2826 fw.bss_len = bce_RXP_b09FwBssLen;
2828 fw.bss = bce_RXP_b09FwBss;
2830 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2831 fw.rodata_len = bce_RXP_b09FwRodataLen;
2832 fw.rodata_index = 0;
2833 fw.rodata = bce_RXP_b09FwRodata;
2835 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2836 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2837 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2838 fw.start_addr = bce_RXP_b06FwStartAddr;
2840 fw.text_addr = bce_RXP_b06FwTextAddr;
2841 fw.text_len = bce_RXP_b06FwTextLen;
2843 fw.text = bce_RXP_b06FwText;
2845 fw.data_addr = bce_RXP_b06FwDataAddr;
2846 fw.data_len = bce_RXP_b06FwDataLen;
2848 fw.data = bce_RXP_b06FwData;
2850 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2851 fw.sbss_len = bce_RXP_b06FwSbssLen;
2853 fw.sbss = bce_RXP_b06FwSbss;
2855 fw.bss_addr = bce_RXP_b06FwBssAddr;
2856 fw.bss_len = bce_RXP_b06FwBssLen;
2858 fw.bss = bce_RXP_b06FwBss;
2860 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2861 fw.rodata_len = bce_RXP_b06FwRodataLen;
2862 fw.rodata_index = 0;
2863 fw.rodata = bce_RXP_b06FwRodata;
2866 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2867 /* Delay RXP start until initialization is complete. */
2870 /****************************************************************************/
2871 /* Initialize the TX CPU. */
2875 /****************************************************************************/
2877 bce_init_txp_cpu(struct bce_softc *sc)
2879 struct cpu_reg cpu_reg;
2882 cpu_reg.mode = BCE_TXP_CPU_MODE;
2883 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2884 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2885 cpu_reg.state = BCE_TXP_CPU_STATE;
2886 cpu_reg.state_value_clear = 0xffffff;
2887 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2888 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2889 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2890 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2891 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2892 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2893 cpu_reg.mips_view_base = 0x8000000;
2895 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2896 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2897 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2898 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2899 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2900 fw.start_addr = bce_TXP_b09FwStartAddr;
2902 fw.text_addr = bce_TXP_b09FwTextAddr;
2903 fw.text_len = bce_TXP_b09FwTextLen;
2905 fw.text = bce_TXP_b09FwText;
2907 fw.data_addr = bce_TXP_b09FwDataAddr;
2908 fw.data_len = bce_TXP_b09FwDataLen;
2910 fw.data = bce_TXP_b09FwData;
2912 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2913 fw.sbss_len = bce_TXP_b09FwSbssLen;
2915 fw.sbss = bce_TXP_b09FwSbss;
2917 fw.bss_addr = bce_TXP_b09FwBssAddr;
2918 fw.bss_len = bce_TXP_b09FwBssLen;
2920 fw.bss = bce_TXP_b09FwBss;
2922 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2923 fw.rodata_len = bce_TXP_b09FwRodataLen;
2924 fw.rodata_index = 0;
2925 fw.rodata = bce_TXP_b09FwRodata;
2927 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2928 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2929 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2930 fw.start_addr = bce_TXP_b06FwStartAddr;
2932 fw.text_addr = bce_TXP_b06FwTextAddr;
2933 fw.text_len = bce_TXP_b06FwTextLen;
2935 fw.text = bce_TXP_b06FwText;
2937 fw.data_addr = bce_TXP_b06FwDataAddr;
2938 fw.data_len = bce_TXP_b06FwDataLen;
2940 fw.data = bce_TXP_b06FwData;
2942 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2943 fw.sbss_len = bce_TXP_b06FwSbssLen;
2945 fw.sbss = bce_TXP_b06FwSbss;
2947 fw.bss_addr = bce_TXP_b06FwBssAddr;
2948 fw.bss_len = bce_TXP_b06FwBssLen;
2950 fw.bss = bce_TXP_b06FwBss;
2952 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2953 fw.rodata_len = bce_TXP_b06FwRodataLen;
2954 fw.rodata_index = 0;
2955 fw.rodata = bce_TXP_b06FwRodata;
2958 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2959 bce_start_cpu(sc, &cpu_reg);
2962 /****************************************************************************/
2963 /* Initialize the TPAT CPU. */
2967 /****************************************************************************/
2969 bce_init_tpat_cpu(struct bce_softc *sc)
2971 struct cpu_reg cpu_reg;
2974 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2975 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2976 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2977 cpu_reg.state = BCE_TPAT_CPU_STATE;
2978 cpu_reg.state_value_clear = 0xffffff;
2979 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2980 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2981 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2982 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2983 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2984 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2985 cpu_reg.mips_view_base = 0x8000000;
2987 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2988 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2989 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2990 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2991 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2992 fw.start_addr = bce_TPAT_b09FwStartAddr;
2994 fw.text_addr = bce_TPAT_b09FwTextAddr;
2995 fw.text_len = bce_TPAT_b09FwTextLen;
2997 fw.text = bce_TPAT_b09FwText;
2999 fw.data_addr = bce_TPAT_b09FwDataAddr;
3000 fw.data_len = bce_TPAT_b09FwDataLen;
3002 fw.data = bce_TPAT_b09FwData;
3004 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3005 fw.sbss_len = bce_TPAT_b09FwSbssLen;
3007 fw.sbss = bce_TPAT_b09FwSbss;
3009 fw.bss_addr = bce_TPAT_b09FwBssAddr;
3010 fw.bss_len = bce_TPAT_b09FwBssLen;
3012 fw.bss = bce_TPAT_b09FwBss;
3014 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3015 fw.rodata_len = bce_TPAT_b09FwRodataLen;
3016 fw.rodata_index = 0;
3017 fw.rodata = bce_TPAT_b09FwRodata;
3019 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3020 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3021 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3022 fw.start_addr = bce_TPAT_b06FwStartAddr;
3024 fw.text_addr = bce_TPAT_b06FwTextAddr;
3025 fw.text_len = bce_TPAT_b06FwTextLen;
3027 fw.text = bce_TPAT_b06FwText;
3029 fw.data_addr = bce_TPAT_b06FwDataAddr;
3030 fw.data_len = bce_TPAT_b06FwDataLen;
3032 fw.data = bce_TPAT_b06FwData;
3034 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3035 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3037 fw.sbss = bce_TPAT_b06FwSbss;
3039 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3040 fw.bss_len = bce_TPAT_b06FwBssLen;
3042 fw.bss = bce_TPAT_b06FwBss;
3044 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3045 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3046 fw.rodata_index = 0;
3047 fw.rodata = bce_TPAT_b06FwRodata;
3050 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3051 bce_start_cpu(sc, &cpu_reg);
3054 /****************************************************************************/
3055 /* Initialize the CP CPU. */
3059 /****************************************************************************/
3061 bce_init_cp_cpu(struct bce_softc *sc)
3063 struct cpu_reg cpu_reg;
3066 cpu_reg.mode = BCE_CP_CPU_MODE;
3067 cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3068 cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3069 cpu_reg.state = BCE_CP_CPU_STATE;
3070 cpu_reg.state_value_clear = 0xffffff;
3071 cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3072 cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3073 cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3074 cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3075 cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3076 cpu_reg.spad_base = BCE_CP_SCRATCH;
3077 cpu_reg.mips_view_base = 0x8000000;
3079 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3080 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3081 fw.ver_major = bce_CP_b09FwReleaseMajor;
3082 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3083 fw.ver_fix = bce_CP_b09FwReleaseFix;
3084 fw.start_addr = bce_CP_b09FwStartAddr;
3086 fw.text_addr = bce_CP_b09FwTextAddr;
3087 fw.text_len = bce_CP_b09FwTextLen;
3089 fw.text = bce_CP_b09FwText;
3091 fw.data_addr = bce_CP_b09FwDataAddr;
3092 fw.data_len = bce_CP_b09FwDataLen;
3094 fw.data = bce_CP_b09FwData;
3096 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3097 fw.sbss_len = bce_CP_b09FwSbssLen;
3099 fw.sbss = bce_CP_b09FwSbss;
3101 fw.bss_addr = bce_CP_b09FwBssAddr;
3102 fw.bss_len = bce_CP_b09FwBssLen;
3104 fw.bss = bce_CP_b09FwBss;
3106 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3107 fw.rodata_len = bce_CP_b09FwRodataLen;
3108 fw.rodata_index = 0;
3109 fw.rodata = bce_CP_b09FwRodata;
3111 fw.ver_major = bce_CP_b06FwReleaseMajor;
3112 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3113 fw.ver_fix = bce_CP_b06FwReleaseFix;
3114 fw.start_addr = bce_CP_b06FwStartAddr;
3116 fw.text_addr = bce_CP_b06FwTextAddr;
3117 fw.text_len = bce_CP_b06FwTextLen;
3119 fw.text = bce_CP_b06FwText;
3121 fw.data_addr = bce_CP_b06FwDataAddr;
3122 fw.data_len = bce_CP_b06FwDataLen;
3124 fw.data = bce_CP_b06FwData;
3126 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3127 fw.sbss_len = bce_CP_b06FwSbssLen;
3129 fw.sbss = bce_CP_b06FwSbss;
3131 fw.bss_addr = bce_CP_b06FwBssAddr;
3132 fw.bss_len = bce_CP_b06FwBssLen;
3134 fw.bss = bce_CP_b06FwBss;
3136 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3137 fw.rodata_len = bce_CP_b06FwRodataLen;
3138 fw.rodata_index = 0;
3139 fw.rodata = bce_CP_b06FwRodata;
3142 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3143 bce_start_cpu(sc, &cpu_reg);
3146 /****************************************************************************/
3147 /* Initialize the COM CPU. */
3151 /****************************************************************************/
3153 bce_init_com_cpu(struct bce_softc *sc)
3155 struct cpu_reg cpu_reg;
3158 cpu_reg.mode = BCE_COM_CPU_MODE;
3159 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3160 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3161 cpu_reg.state = BCE_COM_CPU_STATE;
3162 cpu_reg.state_value_clear = 0xffffff;
3163 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3164 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3165 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3166 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3167 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3168 cpu_reg.spad_base = BCE_COM_SCRATCH;
3169 cpu_reg.mips_view_base = 0x8000000;
3171 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3172 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3173 fw.ver_major = bce_COM_b09FwReleaseMajor;
3174 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3175 fw.ver_fix = bce_COM_b09FwReleaseFix;
3176 fw.start_addr = bce_COM_b09FwStartAddr;
3178 fw.text_addr = bce_COM_b09FwTextAddr;
3179 fw.text_len = bce_COM_b09FwTextLen;
3181 fw.text = bce_COM_b09FwText;
3183 fw.data_addr = bce_COM_b09FwDataAddr;
3184 fw.data_len = bce_COM_b09FwDataLen;
3186 fw.data = bce_COM_b09FwData;
3188 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3189 fw.sbss_len = bce_COM_b09FwSbssLen;
3191 fw.sbss = bce_COM_b09FwSbss;
3193 fw.bss_addr = bce_COM_b09FwBssAddr;
3194 fw.bss_len = bce_COM_b09FwBssLen;
3196 fw.bss = bce_COM_b09FwBss;
3198 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3199 fw.rodata_len = bce_COM_b09FwRodataLen;
3200 fw.rodata_index = 0;
3201 fw.rodata = bce_COM_b09FwRodata;
3203 fw.ver_major = bce_COM_b06FwReleaseMajor;
3204 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3205 fw.ver_fix = bce_COM_b06FwReleaseFix;
3206 fw.start_addr = bce_COM_b06FwStartAddr;
3208 fw.text_addr = bce_COM_b06FwTextAddr;
3209 fw.text_len = bce_COM_b06FwTextLen;
3211 fw.text = bce_COM_b06FwText;
3213 fw.data_addr = bce_COM_b06FwDataAddr;
3214 fw.data_len = bce_COM_b06FwDataLen;
3216 fw.data = bce_COM_b06FwData;
3218 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3219 fw.sbss_len = bce_COM_b06FwSbssLen;
3221 fw.sbss = bce_COM_b06FwSbss;
3223 fw.bss_addr = bce_COM_b06FwBssAddr;
3224 fw.bss_len = bce_COM_b06FwBssLen;
3226 fw.bss = bce_COM_b06FwBss;
3228 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3229 fw.rodata_len = bce_COM_b06FwRodataLen;
3230 fw.rodata_index = 0;
3231 fw.rodata = bce_COM_b06FwRodata;
3234 bce_load_cpu_fw(sc, &cpu_reg, &fw);
3235 bce_start_cpu(sc, &cpu_reg);
3238 /****************************************************************************/
3239 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs. */
3241 /* Loads the firmware for each CPU and starts the CPU. */
3245 /****************************************************************************/
3247 bce_init_cpus(struct bce_softc *sc)
3249 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3250 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3251 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3252 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3253 sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3254 bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3255 sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3257 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3258 sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3259 bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3260 sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3263 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3264 sizeof(bce_rv2p_proc1), RV2P_PROC1);
3265 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3266 sizeof(bce_rv2p_proc2), RV2P_PROC2);
3269 bce_init_rxp_cpu(sc);
3270 bce_init_txp_cpu(sc);
3271 bce_init_tpat_cpu(sc);
3272 bce_init_com_cpu(sc);
3273 bce_init_cp_cpu(sc);
3276 /****************************************************************************/
3277 /* Initialize context memory. */
3279 /* Clears the memory associated with each Context ID (CID). */
3283 /****************************************************************************/
3285 bce_init_ctx(struct bce_softc *sc)
3287 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3288 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3289 /* DRC: Replace this constant value with a #define. */
3290 int i, retry_cnt = 10;
3294 * BCM5709 context memory may be cached
3295 * in host memory so prepare the host memory
3298 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3300 val |= (BCM_PAGE_BITS - 8) << 16;
3301 REG_WR(sc, BCE_CTX_COMMAND, val);
3303 /* Wait for mem init command to complete. */
3304 for (i = 0; i < retry_cnt; i++) {
3305 val = REG_RD(sc, BCE_CTX_COMMAND);
3306 if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3310 if (i == retry_cnt) {
3311 device_printf(sc->bce_dev,
3312 "Context memory initialization failed!\n");
3316 for (i = 0; i < sc->ctx_pages; i++) {
3320 * Set the physical address of the context
3323 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3324 BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3325 BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3326 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3327 BCE_ADDR_HI(sc->ctx_paddr[i]));
3328 REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3329 i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3332 * Verify that the context memory write was successful.
3334 for (j = 0; j < retry_cnt; j++) {
3335 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3337 BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3341 if (j == retry_cnt) {
3342 device_printf(sc->bce_dev,
3343 "Failed to initialize context page!\n");
3348 uint32_t vcid_addr, offset;
3351 * For the 5706/5708, context memory is local to
3352 * the controller, so initialize the controller
3356 vcid_addr = GET_CID_ADDR(96);
3358 vcid_addr -= PHY_CTX_SIZE;
3360 REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3361 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3363 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3364 CTX_WR(sc, 0x00, offset, 0);
3366 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3367 REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3373 /****************************************************************************/
3374 /* Fetch the permanent MAC address of the controller. */
3378 /****************************************************************************/
3380 bce_get_mac_addr(struct bce_softc *sc)
3382 uint32_t mac_lo = 0, mac_hi = 0;
3385 * The NetXtreme II bootcode populates various NIC
3386 * power-on and runtime configuration items in a
3387 * shared memory area. The factory configured MAC
3388 * address is available from both NVRAM and the
3389 * shared memory area so we'll read the value from
3390 * shared memory for speed.
3393 mac_hi = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_UPPER);
3394 mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3396 if (mac_lo == 0 && mac_hi == 0) {
3397 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3399 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3400 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3401 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3402 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3403 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3404 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3408 /****************************************************************************/
3409 /* Program the MAC address. */
3413 /****************************************************************************/
3415 bce_set_mac_addr(struct bce_softc *sc)
3417 const uint8_t *mac_addr = sc->eaddr;
3420 val = (mac_addr[0] << 8) | mac_addr[1];
3421 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3423 val = (mac_addr[2] << 24) |
3424 (mac_addr[3] << 16) |
3425 (mac_addr[4] << 8) |
3427 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3430 /****************************************************************************/
3431 /* Stop the controller. */
3435 /****************************************************************************/
3437 bce_stop(struct bce_softc *sc)
3439 struct ifnet *ifp = &sc->arpcom.ac_if;
3442 ASSERT_IFNET_SERIALIZED_ALL(ifp);
3444 callout_stop(&sc->bce_tick_callout);
3446 /* Disable the transmit/receive blocks. */
3447 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3448 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3451 bce_disable_intr(sc);
3453 ifp->if_flags &= ~IFF_RUNNING;
3454 for (i = 0; i < sc->tx_ring_cnt; ++i) {
3455 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3456 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3459 /* Free the RX lists. */
3460 for (i = 0; i < sc->rx_ring_cnt; ++i)
3461 bce_free_rx_chain(&sc->rx_rings[i]);
3463 /* Free TX buffers. */
3464 for (i = 0; i < sc->tx_ring_cnt; ++i)
3465 bce_free_tx_chain(&sc->tx_rings[i]);
3468 sc->bce_coalchg_mask = 0;
3472 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3477 /* Wait for pending PCI transactions to complete. */
3478 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3479 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3480 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3481 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3482 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3483 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3487 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3488 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3489 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3490 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3491 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3494 /* Assume bootcode is running. */
3495 sc->bce_fw_timed_out = 0;
3496 sc->bce_drv_cardiac_arrest = 0;
3498 /* Give the firmware a chance to prepare for the reset. */
3499 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3501 if_printf(&sc->arpcom.ac_if,
3502 "Firmware is not ready for reset\n");
3506 /* Set a firmware reminder that this is a soft reset. */
3507 bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3508 BCE_DRV_RESET_SIGNATURE_MAGIC);
3510 /* Dummy read to force the chip to complete all current transactions. */
3511 val = REG_RD(sc, BCE_MISC_ID);
3514 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3515 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3516 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3517 REG_RD(sc, BCE_MISC_COMMAND);
3520 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3521 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3523 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3525 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3526 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3527 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3528 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3530 /* Allow up to 30us for reset to complete. */
3531 for (i = 0; i < 10; i++) {
3532 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3533 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3534 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3539 /* Check that reset completed successfully. */
3540 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3541 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3542 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3547 /* Make sure byte swapping is properly configured. */
3548 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3549 if (val != 0x01020304) {
3550 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3554 /* Just completed a reset, assume that firmware is running again. */
3555 sc->bce_fw_timed_out = 0;
3556 sc->bce_drv_cardiac_arrest = 0;
3558 /* Wait for the firmware to finish its initialization. */
3559 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3561 if_printf(&sc->arpcom.ac_if,
3562 "Firmware did not complete initialization!\n");
3565 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3566 bce_setup_msix_table(sc);
3567 /* Prevent MSIX table reads and write from timing out */
3568 REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3569 BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3576 bce_chipinit(struct bce_softc *sc)
3581 /* Make sure the interrupt is not active. */
3582 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3583 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3586 * Initialize DMA byte/word swapping, configure the number of DMA
3587 * channels and PCI clock compensation delay.
3589 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3590 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3591 #if BYTE_ORDER == BIG_ENDIAN
3592 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3594 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3595 DMA_READ_CHANS << 12 |
3596 DMA_WRITE_CHANS << 16;
3598 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3600 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3601 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3604 * This setting resolves a problem observed on certain Intel PCI
3605 * chipsets that cannot handle multiple outstanding DMA operations.
3606 * See errata E9_5706A1_65.
3608 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3609 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3610 !(sc->bce_flags & BCE_PCIX_FLAG))
3611 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3613 REG_WR(sc, BCE_DMA_CONFIG, val);
3615 /* Enable the RX_V2P and Context state machines before access. */
3616 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3617 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3618 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3619 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3621 /* Initialize context mapping and zero out the quick contexts. */
3622 rc = bce_init_ctx(sc);
3626 /* Initialize the on-boards CPUs */
3629 /* Enable management frames (NC-SI) to flow to the MCP. */
3630 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3631 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3632 BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3633 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3636 /* Prepare NVRAM for access. */
3637 rc = bce_init_nvram(sc);
3641 /* Set the kernel bypass block size */
3642 val = REG_RD(sc, BCE_MQ_CONFIG);
3643 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3644 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3646 /* Enable bins used on the 5709/5716. */
3647 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3648 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3649 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3650 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3651 val |= BCE_MQ_CONFIG_HALT_DIS;
3654 REG_WR(sc, BCE_MQ_CONFIG, val);
3656 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3657 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3658 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3660 /* Set the page size and clear the RV2P processor stall bits. */
3661 val = (BCM_PAGE_BITS - 8) << 24;
3662 REG_WR(sc, BCE_RV2P_CONFIG, val);
3664 /* Configure page size. */
3665 val = REG_RD(sc, BCE_TBDR_CONFIG);
3666 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3667 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3668 REG_WR(sc, BCE_TBDR_CONFIG, val);
3670 /* Set the perfect match control register to default. */
3671 REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3676 /****************************************************************************/
3677 /* Initialize the controller in preparation to send/receive traffic. */
3680 /* 0 for success, positive value for failure. */
3681 /****************************************************************************/
3683 bce_blockinit(struct bce_softc *sc)
3688 /* Load the hardware default MAC address. */
3689 bce_set_mac_addr(sc);
3691 /* Set the Ethernet backoff seed value */
3692 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3693 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3694 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3696 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3698 /* Set up link change interrupt generation. */
3699 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3701 /* Program the physical address of the status block. */
3702 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3703 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3705 /* Program the physical address of the statistics block. */
3706 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3707 BCE_ADDR_LO(sc->stats_block_paddr));
3708 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3709 BCE_ADDR_HI(sc->stats_block_paddr));
3711 /* Program various host coalescing parameters. */
3712 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3713 (sc->bce_tx_quick_cons_trip_int << 16) |
3714 sc->bce_tx_quick_cons_trip);
3715 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3716 (sc->bce_rx_quick_cons_trip_int << 16) |
3717 sc->bce_rx_quick_cons_trip);
3718 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3719 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3720 REG_WR(sc, BCE_HC_TX_TICKS,
3721 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3722 REG_WR(sc, BCE_HC_RX_TICKS,
3723 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3724 REG_WR(sc, BCE_HC_COM_TICKS,
3725 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3726 REG_WR(sc, BCE_HC_CMD_TICKS,
3727 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3728 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3729 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3731 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3732 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3734 val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3735 if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3736 sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3738 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3739 if_printf(&sc->arpcom.ac_if,
3742 if_printf(&sc->arpcom.ac_if,
3743 "using oneshot MSI\n");
3746 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3747 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3748 val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3750 REG_WR(sc, BCE_HC_CONFIG, val);
3752 for (i = 1; i < sc->rx_ring_cnt; ++i) {
3755 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3756 KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3759 BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3760 /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3761 BCE_HC_SB_CONFIG_1_ONE_SHOT);
3763 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3764 (sc->bce_tx_quick_cons_trip_int << 16) |
3765 sc->bce_tx_quick_cons_trip);
3766 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3767 (sc->bce_rx_quick_cons_trip_int << 16) |
3768 sc->bce_rx_quick_cons_trip);
3769 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3770 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3771 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3772 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3775 /* Clear the internal statistics counters. */
3776 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3778 /* Verify that bootcode is running. */
3779 reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3781 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3782 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3783 if_printf(&sc->arpcom.ac_if,
3784 "Bootcode not running! Found: 0x%08X, "
3785 "Expected: 08%08X\n",
3786 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3787 BCE_DEV_INFO_SIGNATURE_MAGIC);
3792 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3793 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3794 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3795 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3796 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3799 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3800 bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3802 /* Enable link state change interrupt generation. */
3803 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3805 /* Enable the RXP. */
3806 bce_start_rxp_cpu(sc);
3808 /* Disable management frames (NC-SI) from flowing to the MCP. */
3809 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3810 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3811 ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3812 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3815 /* Enable all remaining blocks in the MAC. */
3816 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3817 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3818 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3819 BCE_MISC_ENABLE_DEFAULT_XI);
3821 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3823 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3826 /* Save the current host coalescing block settings. */
3827 sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3832 /****************************************************************************/
3833 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3835 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3836 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3840 /* 0 for success, positive value for failure. */
3841 /****************************************************************************/
3843 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t chain_prod,
3844 uint32_t *prod_bseq, int init)
3846 struct bce_rx_buf *rx_buf;
3848 bus_dma_segment_t seg;
3852 /* This is a new mbuf allocation. */
3853 m_new = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR);
3857 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3859 /* Map the mbuf cluster into device memory. */
3860 error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3861 rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3865 if_printf(&rxr->sc->arpcom.ac_if,
3866 "Error mapping mbuf into RX chain!\n");
3871 rx_buf = &rxr->rx_bufs[chain_prod];
3872 if (rx_buf->rx_mbuf_ptr != NULL)
3873 bus_dmamap_unload(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map);
3875 map = rx_buf->rx_mbuf_map;
3876 rx_buf->rx_mbuf_map = rxr->rx_mbuf_tmpmap;
3877 rxr->rx_mbuf_tmpmap = map;
3879 /* Save the mbuf and update our counter. */
3880 rx_buf->rx_mbuf_ptr = m_new;
3881 rx_buf->rx_mbuf_paddr = seg.ds_addr;
3884 bce_setup_rxdesc_std(rxr, chain_prod, prod_bseq);
3890 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3891 uint32_t *prod_bseq)
3893 const struct bce_rx_buf *rx_buf;
3898 rx_buf = &rxr->rx_bufs[chain_prod];
3899 paddr = rx_buf->rx_mbuf_paddr;
3900 len = rx_buf->rx_mbuf_ptr->m_len;
3902 /* Setup the rx_bd for the first segment. */
3903 rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3905 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3906 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3907 rxbd->rx_bd_len = htole32(len);
3908 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3911 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3914 /****************************************************************************/
3915 /* Initialize the TX context memory. */
3919 /****************************************************************************/
3921 bce_init_tx_context(struct bce_tx_ring *txr)
3925 /* Initialize the context ID for an L2 TX chain. */
3926 if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3927 BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3928 /* Set the CID type to support an L2 connection. */
3929 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3930 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3931 BCE_L2CTX_TX_TYPE_XI, val);
3932 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3933 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3934 BCE_L2CTX_TX_CMD_TYPE_XI, val);
3936 /* Point the hardware to the first page in the chain. */
3937 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3938 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3939 BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3940 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3941 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3942 BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3944 /* Set the CID type to support an L2 connection. */
3945 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3946 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3947 BCE_L2CTX_TX_TYPE, val);
3948 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3949 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3950 BCE_L2CTX_TX_CMD_TYPE, val);
3952 /* Point the hardware to the first page in the chain. */
3953 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3954 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3955 BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3956 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3957 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3958 BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3962 /****************************************************************************/
3963 /* Allocate memory and initialize the TX data structures. */
3966 /* 0 for success, positive value for failure. */
3967 /****************************************************************************/
3969 bce_init_tx_chain(struct bce_tx_ring *txr)
3974 /* Set the initial TX producer/consumer indices. */
3977 txr->tx_prod_bseq = 0;
3978 txr->used_tx_bd = 0;
3979 txr->max_tx_bd = USABLE_TX_BD(txr);
3982 * The NetXtreme II supports a linked-list structre called
3983 * a Buffer Descriptor Chain (or BD chain). A BD chain
3984 * consists of a series of 1 or more chain pages, each of which
3985 * consists of a fixed number of BD entries.
3986 * The last BD entry on each page is a pointer to the next page
3987 * in the chain, and the last pointer in the BD chain
3988 * points back to the beginning of the chain.
3991 /* Set the TX next pointer chain entries. */
3992 for (i = 0; i < txr->tx_pages; i++) {
3995 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3997 /* Check if we've reached the last page. */
3998 if (i == (txr->tx_pages - 1))
4003 txbd->tx_bd_haddr_hi =
4004 htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4005 txbd->tx_bd_haddr_lo =
4006 htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4008 bce_init_tx_context(txr);
4013 /****************************************************************************/
4014 /* Free memory and clear the TX data structures. */
4018 /****************************************************************************/
4020 bce_free_tx_chain(struct bce_tx_ring *txr)
4024 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4025 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4026 struct bce_tx_buf *tx_buf = &txr->tx_bufs[i];
4028 if (tx_buf->tx_mbuf_ptr != NULL) {
4029 bus_dmamap_unload(txr->tx_mbuf_tag,
4030 tx_buf->tx_mbuf_map);
4031 m_freem(tx_buf->tx_mbuf_ptr);
4032 tx_buf->tx_mbuf_ptr = NULL;
4036 /* Clear each TX chain page. */
4037 for (i = 0; i < txr->tx_pages; i++)
4038 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4039 txr->used_tx_bd = 0;
4042 /****************************************************************************/
4043 /* Initialize the RX context memory. */
4047 /****************************************************************************/
4049 bce_init_rx_context(struct bce_rx_ring *rxr)
4053 /* Initialize the context ID for an L2 RX chain. */
4054 val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4055 BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4058 * Set the level for generating pause frames
4059 * when the number of available rx_bd's gets
4060 * too low (the low watermark) and the level
4061 * when pause frames can be stopped (the high
4064 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4065 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4066 uint32_t lo_water, hi_water;
4068 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4069 hi_water = USABLE_RX_BD(rxr) / 4;
4071 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4072 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4076 else if (hi_water == 0)
4079 (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4082 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4083 BCE_L2CTX_RX_CTX_TYPE, val);
4085 /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4086 if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4087 BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4088 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4089 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4092 /* Point the hardware to the first page in the chain. */
4093 val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4094 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4095 BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4096 val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4097 CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4098 BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4101 /****************************************************************************/
4102 /* Allocate memory and initialize the RX data structures. */
4105 /* 0 for success, positive value for failure. */
4106 /****************************************************************************/
4108 bce_init_rx_chain(struct bce_rx_ring *rxr)
4112 uint16_t prod, chain_prod;
4115 /* Initialize the RX producer and consumer indices. */
4118 rxr->rx_prod_bseq = 0;
4119 rxr->free_rx_bd = USABLE_RX_BD(rxr);
4120 rxr->max_rx_bd = USABLE_RX_BD(rxr);
4122 /* Clear cache status index */
4123 rxr->last_status_idx = 0;
4125 /* Initialize the RX next pointer chain entries. */
4126 for (i = 0; i < rxr->rx_pages; i++) {
4129 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4131 /* Check if we've reached the last page. */
4132 if (i == (rxr->rx_pages - 1))
4137 /* Setup the chain page pointers. */
4138 rxbd->rx_bd_haddr_hi =
4139 htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4140 rxbd->rx_bd_haddr_lo =
4141 htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4144 /* Allocate mbuf clusters for the rx_bd chain. */
4145 prod = prod_bseq = 0;
4146 while (prod < TOTAL_RX_BD(rxr)) {
4147 chain_prod = RX_CHAIN_IDX(rxr, prod);
4148 if (bce_newbuf_std(rxr, &prod, chain_prod, &prod_bseq, 1)) {
4149 if_printf(&rxr->sc->arpcom.ac_if,
4150 "Error filling RX chain: rx_bd[0x%04X]!\n",
4155 prod = NEXT_RX_BD(prod);
4158 /* Save the RX chain producer index. */
4159 rxr->rx_prod = prod;
4160 rxr->rx_prod_bseq = prod_bseq;
4162 /* Tell the chip about the waiting rx_bd's. */
4163 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4165 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4168 bce_init_rx_context(rxr);
4173 /****************************************************************************/
4174 /* Free memory and clear the RX data structures. */
4178 /****************************************************************************/
4180 bce_free_rx_chain(struct bce_rx_ring *rxr)
4184 /* Free any mbufs still in the RX mbuf chain. */
4185 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4186 struct bce_rx_buf *rx_buf = &rxr->rx_bufs[i];
4188 if (rx_buf->rx_mbuf_ptr != NULL) {
4189 bus_dmamap_unload(rxr->rx_mbuf_tag,
4190 rx_buf->rx_mbuf_map);
4191 m_freem(rx_buf->rx_mbuf_ptr);
4192 rx_buf->rx_mbuf_ptr = NULL;
4196 /* Clear each RX chain page. */
4197 for (i = 0; i < rxr->rx_pages; i++)
4198 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4201 /****************************************************************************/
4202 /* Set media options. */
4205 /* 0 for success, positive value for failure. */
4206 /****************************************************************************/
4208 bce_ifmedia_upd(struct ifnet *ifp)
4210 struct bce_softc *sc = ifp->if_softc;
4211 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4215 * 'mii' will be NULL, when this function is called on following
4216 * code path: bce_attach() -> bce_mgmt_init()
4219 /* Make sure the MII bus has been enumerated. */
4221 if (mii->mii_instance) {
4222 struct mii_softc *miisc;
4224 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4225 mii_phy_reset(miisc);
4227 error = mii_mediachg(mii);
4232 /****************************************************************************/
4233 /* Reports current media status. */
4237 /****************************************************************************/
4239 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4241 struct bce_softc *sc = ifp->if_softc;
4242 struct mii_data *mii = device_get_softc(sc->bce_miibus);
4245 ifmr->ifm_active = mii->mii_media_active;
4246 ifmr->ifm_status = mii->mii_media_status;
4249 /****************************************************************************/
4250 /* Handles PHY generated interrupt events. */
4254 /****************************************************************************/
4256 bce_phy_intr(struct bce_softc *sc)
4258 uint32_t new_link_state, old_link_state;
4259 struct ifnet *ifp = &sc->arpcom.ac_if;
4261 ASSERT_SERIALIZED(&sc->main_serialize);
4263 new_link_state = sc->status_block->status_attn_bits &
4264 STATUS_ATTN_BITS_LINK_STATE;
4265 old_link_state = sc->status_block->status_attn_bits_ack &
4266 STATUS_ATTN_BITS_LINK_STATE;
4268 /* Handle any changes if the link state has changed. */
4269 if (new_link_state != old_link_state) { /* XXX redundant? */
4270 /* Update the status_attn_bits_ack field in the status block. */
4271 if (new_link_state) {
4272 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4273 STATUS_ATTN_BITS_LINK_STATE);
4275 if_printf(ifp, "Link is now UP.\n");
4277 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4278 STATUS_ATTN_BITS_LINK_STATE);
4280 if_printf(ifp, "Link is now DOWN.\n");
4284 * Assume link is down and allow tick routine to
4285 * update the state based on the actual media state.
4288 callout_stop(&sc->bce_tick_callout);
4289 bce_tick_serialized(sc);
4292 /* Acknowledge the link change interrupt. */
4293 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4296 /****************************************************************************/
4297 /* Reads the receive consumer value from the status block (skipping over */
4298 /* chain page pointer if necessary). */
4302 /****************************************************************************/
4303 static __inline uint16_t
4304 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4306 uint16_t hw_cons = *rxr->rx_hw_cons;
4308 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4313 /****************************************************************************/
4314 /* Handles received frame interrupt events. */
4318 /****************************************************************************/
4320 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4322 struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4323 uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4324 uint32_t sw_prod_bseq;
4325 int cpuid = mycpuid;
4327 ASSERT_SERIALIZED(&rxr->rx_serialize);
4329 /* Get working copies of the driver's view of the RX indices. */
4330 sw_cons = rxr->rx_cons;
4331 sw_prod = rxr->rx_prod;
4332 sw_prod_bseq = rxr->rx_prod_bseq;
4334 /* Scan through the receive chain as long as there is work to do. */
4335 while (sw_cons != hw_cons) {
4336 struct pktinfo pi0, *pi = NULL;
4337 struct bce_rx_buf *rx_buf;
4338 struct mbuf *m = NULL;
4339 struct l2_fhdr *l2fhdr = NULL;
4341 uint32_t status = 0;
4343 #ifdef IFPOLL_ENABLE
4344 if (count >= 0 && count-- == 0)
4349 * Convert the producer/consumer indices
4350 * to an actual rx_bd index.
4352 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4353 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4354 rx_buf = &rxr->rx_bufs[sw_chain_cons];
4358 /* The mbuf is stored with the last rx_bd entry of a packet. */
4359 if (rx_buf->rx_mbuf_ptr != NULL) {
4360 if (sw_chain_cons != sw_chain_prod) {
4361 if_printf(ifp, "RX cons(%d) != prod(%d), "
4362 "drop!\n", sw_chain_cons, sw_chain_prod);
4363 IFNET_STAT_INC(ifp, ierrors, 1);
4365 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4368 goto bce_rx_int_next_rx;
4371 /* Unmap the mbuf from DMA space. */
4372 bus_dmamap_sync(rxr->rx_mbuf_tag, rx_buf->rx_mbuf_map,
4373 BUS_DMASYNC_POSTREAD);
4375 /* Save the mbuf from the driver's chain. */
4376 m = rx_buf->rx_mbuf_ptr;
4379 * Frames received on the NetXteme II are prepended
4380 * with an l2_fhdr structure which provides status
4381 * information about the received frame (including
4382 * VLAN tags and checksum info). The frames are also
4383 * automatically adjusted to align the IP header
4384 * (i.e. two null bytes are inserted before the
4385 * Ethernet header). As a result the data DMA'd by
4386 * the controller into the mbuf is as follows:
4388 * +---------+-----+---------------------+-----+
4389 * | l2_fhdr | pad | packet data | FCS |
4390 * +---------+-----+---------------------+-----+
4392 * The l2_fhdr needs to be checked and skipped and the
4393 * FCS needs to be stripped before sending the packet
4396 l2fhdr = mtod(m, struct l2_fhdr *);
4398 len = l2fhdr->l2_fhdr_pkt_len;
4399 status = l2fhdr->l2_fhdr_status;
4401 len -= ETHER_CRC_LEN;
4403 /* Check the received frame for errors. */
4404 if (status & (L2_FHDR_ERRORS_BAD_CRC |
4405 L2_FHDR_ERRORS_PHY_DECODE |
4406 L2_FHDR_ERRORS_ALIGNMENT |
4407 L2_FHDR_ERRORS_TOO_SHORT |
4408 L2_FHDR_ERRORS_GIANT_FRAME)) {
4409 IFNET_STAT_INC(ifp, ierrors, 1);
4411 /* Reuse the mbuf for a new frame. */
4412 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4415 goto bce_rx_int_next_rx;
4419 * Get a new mbuf for the rx_bd. If no new
4420 * mbufs are available then reuse the current mbuf,
4421 * log an ierror on the interface, and generate
4422 * an error in the system log.
4424 if (bce_newbuf_std(rxr, &sw_prod, sw_chain_prod,
4425 &sw_prod_bseq, 0)) {
4426 IFNET_STAT_INC(ifp, ierrors, 1);
4428 /* Try and reuse the exisitng mbuf. */
4429 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4432 goto bce_rx_int_next_rx;
4436 * Skip over the l2_fhdr when passing
4437 * the data up the stack.
4439 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4441 m->m_pkthdr.len = m->m_len = len;
4442 m->m_pkthdr.rcvif = ifp;
4444 /* Validate the checksum if offload enabled. */
4445 if (ifp->if_capenable & IFCAP_RXCSUM) {
4446 /* Check for an IP datagram. */
4447 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4448 m->m_pkthdr.csum_flags |=
4451 /* Check if the IP checksum is valid. */
4452 if ((l2fhdr->l2_fhdr_ip_xsum ^
4454 m->m_pkthdr.csum_flags |=
4459 /* Check for a valid TCP/UDP frame. */
4460 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4461 L2_FHDR_STATUS_UDP_DATAGRAM)) {
4463 /* Check for a good TCP/UDP checksum. */
4465 (L2_FHDR_ERRORS_TCP_XSUM |
4466 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4467 m->m_pkthdr.csum_data =
4468 l2fhdr->l2_fhdr_tcp_udp_xsum;
4469 m->m_pkthdr.csum_flags |=
4475 if (ifp->if_capenable & IFCAP_RSS) {
4476 pi = bce_rss_pktinfo(&pi0, status, l2fhdr);
4478 (status & L2_FHDR_STATUS_RSS_HASH)) {
4480 toeplitz_hash(l2fhdr->l2_fhdr_hash));
4484 IFNET_STAT_INC(ifp, ipackets, 1);
4486 sw_prod = NEXT_RX_BD(sw_prod);
4489 sw_cons = NEXT_RX_BD(sw_cons);
4491 /* If we have a packet, pass it up the stack */
4493 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4494 m->m_flags |= M_VLANTAG;
4495 m->m_pkthdr.ether_vlantag =
4496 l2fhdr->l2_fhdr_vlan_tag;
4498 ifp->if_input(ifp, m, pi, cpuid);
4499 #ifdef BCE_RSS_DEBUG
4505 rxr->rx_cons = sw_cons;
4506 rxr->rx_prod = sw_prod;
4507 rxr->rx_prod_bseq = sw_prod_bseq;
4509 REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4511 REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4515 /****************************************************************************/
4516 /* Reads the transmit consumer value from the status block (skipping over */
4517 /* chain page pointer if necessary). */
4521 /****************************************************************************/
4522 static __inline uint16_t
4523 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4525 uint16_t hw_cons = *txr->tx_hw_cons;
4527 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4532 /****************************************************************************/
4533 /* Handles transmit completion interrupt events. */
4537 /****************************************************************************/
4539 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4541 struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4542 uint16_t sw_tx_cons, sw_tx_chain_cons;
4544 ASSERT_SERIALIZED(&txr->tx_serialize);
4546 /* Get the hardware's view of the TX consumer index. */
4547 sw_tx_cons = txr->tx_cons;
4549 /* Cycle through any completed TX chain page entries. */
4550 while (sw_tx_cons != hw_tx_cons) {
4551 struct bce_tx_buf *tx_buf;
4553 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4554 tx_buf = &txr->tx_bufs[sw_tx_chain_cons];
4557 * Free the associated mbuf. Remember
4558 * that only the last tx_bd of a packet
4559 * has an mbuf pointer and DMA map.
4561 if (tx_buf->tx_mbuf_ptr != NULL) {
4562 /* Unmap the mbuf. */
4563 bus_dmamap_unload(txr->tx_mbuf_tag,
4564 tx_buf->tx_mbuf_map);
4566 /* Free the mbuf. */
4567 m_freem(tx_buf->tx_mbuf_ptr);
4568 tx_buf->tx_mbuf_ptr = NULL;
4570 IFNET_STAT_INC(ifp, opackets, 1);
4571 #ifdef BCE_TSS_DEBUG
4577 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4580 if (txr->used_tx_bd == 0) {
4581 /* Clear the TX timeout timer. */
4582 txr->tx_watchdog.wd_timer = 0;
4585 /* Clear the tx hardware queue full flag. */
4586 if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4587 ifsq_clr_oactive(txr->ifsq);
4588 txr->tx_cons = sw_tx_cons;
4591 /****************************************************************************/
4592 /* Disables interrupt generation. */
4596 /****************************************************************************/
4598 bce_disable_intr(struct bce_softc *sc)
4602 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4603 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4604 (sc->rx_rings[i].idx << 24) |
4605 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4607 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4609 callout_stop(&sc->bce_ckmsi_callout);
4610 sc->bce_msi_maylose = FALSE;
4611 sc->bce_check_rx_cons = 0;
4612 sc->bce_check_tx_cons = 0;
4613 sc->bce_check_status_idx = 0xffff;
4615 for (i = 0; i < sc->rx_ring_cnt; ++i)
4616 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4619 /****************************************************************************/
4620 /* Enables interrupt generation. */
4624 /****************************************************************************/
4626 bce_enable_intr(struct bce_softc *sc)
4630 for (i = 0; i < sc->rx_ring_cnt; ++i)
4631 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4633 for (i = 0; i < sc->rx_ring_cnt; ++i) {
4634 struct bce_rx_ring *rxr = &sc->rx_rings[i];
4636 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4637 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4638 BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4639 rxr->last_status_idx);
4640 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4641 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4642 rxr->last_status_idx);
4644 REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4646 if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4647 sc->bce_msi_maylose = FALSE;
4648 sc->bce_check_rx_cons = 0;
4649 sc->bce_check_tx_cons = 0;
4650 sc->bce_check_status_idx = 0xffff;
4653 if_printf(&sc->arpcom.ac_if, "check msi\n");
4655 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4656 bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4660 /****************************************************************************/
4661 /* Reenables interrupt generation during interrupt handling. */
4665 /****************************************************************************/
4667 bce_reenable_intr(struct bce_rx_ring *rxr)
4669 REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4670 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4673 /****************************************************************************/
4674 /* Handles controller initialization. */
4678 /****************************************************************************/
4682 struct bce_softc *sc = xsc;
4683 struct ifnet *ifp = &sc->arpcom.ac_if;
4688 ASSERT_IFNET_SERIALIZED_ALL(ifp);
4690 /* Check if the driver is still running and bail out if it is. */
4691 if (ifp->if_flags & IFF_RUNNING)
4696 error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4698 if_printf(ifp, "Controller reset failed!\n");
4702 error = bce_chipinit(sc);
4704 if_printf(ifp, "Controller initialization failed!\n");
4708 error = bce_blockinit(sc);
4710 if_printf(ifp, "Block initialization failed!\n");
4714 /* Load our MAC address. */
4715 bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4716 bce_set_mac_addr(sc);
4718 /* Calculate and program the Ethernet MTU size. */
4719 ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4722 * Program the mtu, enabling jumbo frame
4723 * support if necessary. Also set the mbuf
4724 * allocation count for RX frames.
4726 if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4728 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4729 min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4730 BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4732 panic("jumbo buffer is not supported yet");
4735 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4738 /* Program appropriate promiscuous/multicast filtering. */
4739 bce_set_rx_mode(sc);
4742 * Init RX buffer descriptor chain.
4744 REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4745 bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4747 for (i = 0; i < sc->rx_ring_cnt; ++i)
4748 bce_init_rx_chain(&sc->rx_rings[i]); /* XXX return value */
4750 if (sc->rx_ring_cnt > 1)
4754 * Init TX buffer descriptor chain.
4756 REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4758 for (i = 0; i < sc->tx_ring_cnt; ++i)
4759 bce_init_tx_chain(&sc->tx_rings[i]);
4761 if (sc->tx_ring_cnt > 1) {
4762 REG_WR(sc, BCE_TSCH_TSS_CFG,
4763 ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4767 #ifdef IFPOLL_ENABLE
4768 if (ifp->if_flags & IFF_NPOLLING)
4773 /* Disable interrupts if we are polling. */
4774 bce_disable_intr(sc);
4776 /* Change coalesce parameters */
4777 bce_npoll_coal_change(sc);
4779 /* Enable host interrupts. */
4780 bce_enable_intr(sc);
4782 bce_set_timer_cpuid(sc, polling);
4784 bce_ifmedia_upd(ifp);
4786 ifp->if_flags |= IFF_RUNNING;
4787 for (i = 0; i < sc->tx_ring_cnt; ++i) {
4788 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4789 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4792 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4793 sc->bce_timer_cpuid);
4799 /****************************************************************************/
4800 /* Initialize the controller just enough so that any management firmware */
4801 /* running on the device will continue to operate corectly. */
4805 /****************************************************************************/
4807 bce_mgmt_init(struct bce_softc *sc)
4809 struct ifnet *ifp = &sc->arpcom.ac_if;
4811 /* Bail out if management firmware is not running. */
4812 if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4815 /* Enable all critical blocks in the MAC. */
4816 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4817 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4818 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4819 BCE_MISC_ENABLE_DEFAULT_XI);
4821 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4823 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4826 bce_ifmedia_upd(ifp);
4829 /****************************************************************************/
4830 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4831 /* memory visible to the controller. */
4834 /* 0 for success, positive value for failure. */
4835 /****************************************************************************/
4837 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4839 bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4840 bus_dmamap_t map, tmp_map;
4841 struct mbuf *m0 = *m_head;
4842 struct tx_bd *txbd = NULL;
4843 uint16_t vlan_tag = 0, flags = 0, mss = 0;
4844 uint16_t chain_prod, chain_prod_start, prod;
4846 int i, error, maxsegs, nsegs;
4848 /* Transfer any checksum offload flags to the bd. */
4849 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4850 error = bce_tso_setup(txr, m_head, &flags, &mss);
4854 } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4855 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4856 flags |= TX_BD_FLAGS_IP_CKSUM;
4857 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4858 flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4861 /* Transfer any VLAN tags to the bd. */
4862 if (m0->m_flags & M_VLANTAG) {
4863 flags |= TX_BD_FLAGS_VLAN_TAG;
4864 vlan_tag = m0->m_pkthdr.ether_vlantag;
4867 prod = txr->tx_prod;
4868 chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4870 /* Map the mbuf into DMAable memory. */
4871 map = txr->tx_bufs[chain_prod_start].tx_mbuf_map;
4873 maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4874 KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4875 ("not enough segments %d", maxsegs));
4876 if (maxsegs > BCE_MAX_SEGMENTS)
4877 maxsegs = BCE_MAX_SEGMENTS;
4879 /* Map the mbuf into our DMA address space. */
4880 error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4881 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4884 bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4886 *nsegs_used += nsegs;
4891 /* prod points to an empty tx_bd at this point. */
4892 prod_bseq = txr->tx_prod_bseq;
4895 * Cycle through each mbuf segment that makes up
4896 * the outgoing frame, gathering the mapping info
4897 * for that segment and creating a tx_bd to for
4900 for (i = 0; i < nsegs; i++) {
4901 chain_prod = TX_CHAIN_IDX(txr, prod);
4903 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4905 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4906 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4907 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4908 htole16(segs[i].ds_len);
4909 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4910 txbd->tx_bd_flags = htole16(flags);
4912 prod_bseq += segs[i].ds_len;
4914 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4915 prod = NEXT_TX_BD(prod);
4918 /* Set the END flag on the last TX buffer descriptor. */
4919 txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4922 * Ensure that the mbuf pointer for this transmission
4923 * is placed at the array index of the last
4924 * descriptor in this chain. This is done
4925 * because a single map is used for all
4926 * segments of the mbuf and we don't want to
4927 * unload the map before all of the segments
4930 txr->tx_bufs[chain_prod].tx_mbuf_ptr = m0;
4932 tmp_map = txr->tx_bufs[chain_prod].tx_mbuf_map;
4933 txr->tx_bufs[chain_prod].tx_mbuf_map = map;
4934 txr->tx_bufs[chain_prod_start].tx_mbuf_map = tmp_map;
4936 txr->used_tx_bd += nsegs;
4938 /* prod points to the next free tx_bd at this point. */
4939 txr->tx_prod = prod;
4940 txr->tx_prod_bseq = prod_bseq;
4950 bce_xmit(struct bce_tx_ring *txr)
4952 /* Start the transmit. */
4953 REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4955 REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4959 /****************************************************************************/
4960 /* Main transmit routine when called from another routine with a lock. */
4964 /****************************************************************************/
4966 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4968 struct bce_softc *sc = ifp->if_softc;
4969 struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4972 KKASSERT(txr->ifsq == ifsq);
4973 ASSERT_SERIALIZED(&txr->tx_serialize);
4975 /* If there's no link or the transmit queue is empty then just exit. */
4976 if (!sc->bce_link) {
4981 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
4985 struct mbuf *m_head;
4988 * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4991 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4992 ifsq_set_oactive(ifsq);
4996 /* Check for any frames to send. */
4997 m_head = ifsq_dequeue(ifsq);
5002 * Pack the data into the transmit ring. If we
5003 * don't have room, place the mbuf back at the
5004 * head of the queue and set the OACTIVE flag
5005 * to wait for the NIC to drain the chain.
5007 if (bce_encap(txr, &m_head, &count)) {
5008 IFNET_STAT_INC(ifp, oerrors, 1);
5009 if (txr->used_tx_bd == 0) {
5012 ifsq_set_oactive(ifsq);
5017 if (count >= txr->tx_wreg) {
5022 /* Send a copy of the frame to any BPF listeners. */
5023 ETHER_BPF_MTAP(ifp, m_head);
5025 /* Set the tx timeout. */
5026 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5032 /****************************************************************************/
5033 /* Handles any IOCTL calls from the operating system. */
5036 /* 0 for success, positive value for failure. */
5037 /****************************************************************************/
5039 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5041 struct bce_softc *sc = ifp->if_softc;
5042 struct ifreq *ifr = (struct ifreq *)data;
5043 struct mii_data *mii;
5044 int mask, error = 0;
5046 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5050 /* Check that the MTU setting is supported. */
5051 if (ifr->ifr_mtu < BCE_MIN_MTU ||
5053 ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5055 ifr->ifr_mtu > ETHERMTU
5062 ifp->if_mtu = ifr->ifr_mtu;
5063 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5068 if (ifp->if_flags & IFF_UP) {
5069 if (ifp->if_flags & IFF_RUNNING) {
5070 mask = ifp->if_flags ^ sc->bce_if_flags;
5072 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5073 bce_set_rx_mode(sc);
5077 } else if (ifp->if_flags & IFF_RUNNING) {
5080 /* If MFW is running, restart the controller a bit. */
5081 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5082 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5087 sc->bce_if_flags = ifp->if_flags;
5092 if (ifp->if_flags & IFF_RUNNING)
5093 bce_set_rx_mode(sc);
5098 mii = device_get_softc(sc->bce_miibus);
5099 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5103 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5104 if (mask & IFCAP_HWCSUM) {
5105 ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5106 if (ifp->if_capenable & IFCAP_TXCSUM)
5107 ifp->if_hwassist |= BCE_CSUM_FEATURES;
5109 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5111 if (mask & IFCAP_TSO) {
5112 ifp->if_capenable ^= IFCAP_TSO;
5113 if (ifp->if_capenable & IFCAP_TSO)
5114 ifp->if_hwassist |= CSUM_TSO;
5116 ifp->if_hwassist &= ~CSUM_TSO;
5118 if (mask & IFCAP_RSS)
5119 ifp->if_capenable ^= IFCAP_RSS;
5123 error = ether_ioctl(ifp, command, data);
5129 /****************************************************************************/
5130 /* Transmit timeout handler. */
5134 /****************************************************************************/
5136 bce_watchdog(struct ifaltq_subque *ifsq)
5138 struct ifnet *ifp = ifsq_get_ifp(ifsq);
5139 struct bce_softc *sc = ifp->if_softc;
5142 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5145 * If we are in this routine because of pause frames, then
5146 * don't reset the hardware.
5148 if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED)
5151 if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5153 ifp->if_flags &= ~IFF_RUNNING; /* Force reinitialize */
5156 IFNET_STAT_INC(ifp, oerrors, 1);
5158 for (i = 0; i < sc->tx_ring_cnt; ++i)
5159 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5162 #ifdef IFPOLL_ENABLE
5165 bce_npoll_status(struct ifnet *ifp)
5167 struct bce_softc *sc = ifp->if_softc;
5168 struct status_block *sblk = sc->status_block;
5169 uint32_t status_attn_bits;
5171 ASSERT_SERIALIZED(&sc->main_serialize);
5173 status_attn_bits = sblk->status_attn_bits;
5175 /* Was it a link change interrupt? */
5176 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5177 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5181 * Clear any transient status updates during link state change.
5183 REG_WR(sc, BCE_HC_COMMAND,
5184 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5185 REG_RD(sc, BCE_HC_COMMAND);
5189 * If any other attention is asserted then the chip is toast.
5191 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5192 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5193 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5194 sblk->status_attn_bits);
5195 bce_serialize_skipmain(sc);
5197 bce_deserialize_skipmain(sc);
5202 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5204 struct bce_rx_ring *rxr = arg;
5205 uint16_t hw_rx_cons;
5207 ASSERT_SERIALIZED(&rxr->rx_serialize);
5210 * Save the status block index value for use when enabling
5213 rxr->last_status_idx = *rxr->hw_status_idx;
5215 /* Make sure status index is extracted before RX/TX cons */
5218 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5220 /* Check for any completed RX frames. */
5221 if (hw_rx_cons != rxr->rx_cons)
5222 bce_rx_intr(rxr, count, hw_rx_cons);
5226 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5228 struct bce_rx_ring *rxr = arg;
5230 KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5231 bce_npoll_rx(ifp, rxr, count);
5233 KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5234 ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5235 rxr->sc->rx_ring_cnt2));
5237 /* Last ring carries packets whose masked hash is 0 */
5238 rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5240 lwkt_serialize_enter(&rxr->rx_serialize);
5241 bce_npoll_rx(ifp, rxr, count);
5242 lwkt_serialize_exit(&rxr->rx_serialize);
5246 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5248 struct bce_tx_ring *txr = arg;
5249 uint16_t hw_tx_cons;
5251 ASSERT_SERIALIZED(&txr->tx_serialize);
5253 hw_tx_cons = bce_get_hw_tx_cons(txr);
5255 /* Check for any completed TX frames. */
5256 if (hw_tx_cons != txr->tx_cons) {
5257 bce_tx_intr(txr, hw_tx_cons);
5258 if (!ifsq_is_empty(txr->ifsq))
5259 ifsq_devstart(txr->ifsq);
5264 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5266 struct bce_softc *sc = ifp->if_softc;
5269 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5274 info->ifpi_status.status_func = bce_npoll_status;
5275 info->ifpi_status.serializer = &sc->main_serialize;
5277 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5278 struct bce_tx_ring *txr = &sc->tx_rings[i];
5280 cpu = if_ringmap_cpumap(sc->tx_rmap, i);
5281 KKASSERT(cpu < netisr_ncpus);
5282 info->ifpi_tx[cpu].poll_func = bce_npoll_tx;
5283 info->ifpi_tx[cpu].arg = txr;
5284 info->ifpi_tx[cpu].serializer = &txr->tx_serialize;
5285 ifsq_set_cpuid(txr->ifsq, cpu);
5288 for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5289 struct bce_rx_ring *rxr = &sc->rx_rings[i];
5291 cpu = if_ringmap_cpumap(sc->rx_rmap, i);
5292 KKASSERT(cpu < netisr_ncpus);
5293 if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5295 * If RSS is enabled, the packets whose
5296 * masked hash are 0 are queued to the
5297 * last RX ring; piggyback the last RX
5298 * ring's processing in the first RX
5299 * polling handler. (see also: comment
5300 * in bce_setup_ring_cnt())
5303 if_printf(ifp, "npoll pack last "
5304 "RX ring on cpu%d\n", cpu);
5306 info->ifpi_rx[cpu].poll_func =
5309 info->ifpi_rx[cpu].poll_func = bce_npoll_rx;
5311 info->ifpi_rx[cpu].arg = rxr;
5312 info->ifpi_rx[cpu].serializer = &rxr->rx_serialize;
5315 if (ifp->if_flags & IFF_RUNNING) {
5316 bce_set_timer_cpuid(sc, TRUE);
5317 bce_disable_intr(sc);
5318 bce_npoll_coal_change(sc);
5321 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5322 ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5323 sc->bce_msix[i].msix_cpuid);
5326 if (ifp->if_flags & IFF_RUNNING) {
5327 bce_set_timer_cpuid(sc, FALSE);
5328 bce_enable_intr(sc);
5330 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5331 BCE_COALMASK_RX_BDS_INT;
5332 bce_coal_change(sc);
5337 #endif /* IFPOLL_ENABLE */
5340 * Interrupt handler.
5342 /****************************************************************************/
5343 /* Main interrupt entry point. Verifies that the controller generated the */
5344 /* interrupt and then calls a separate routine for handle the various */
5345 /* interrupt causes (PHY, TX, RX). */
5348 /* 0 for success, positive value for failure. */
5349 /****************************************************************************/
5351 bce_intr(struct bce_softc *sc)
5353 struct ifnet *ifp = &sc->arpcom.ac_if;
5354 struct status_block *sblk;
5355 uint16_t hw_rx_cons, hw_tx_cons;
5356 uint32_t status_attn_bits;
5357 struct bce_tx_ring *txr = &sc->tx_rings[0];
5358 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5360 ASSERT_SERIALIZED(&sc->main_serialize);
5362 sblk = sc->status_block;
5365 * Save the status block index value for use during
5366 * the next interrupt.
5368 rxr->last_status_idx = *rxr->hw_status_idx;
5370 /* Make sure status index is extracted before RX/TX cons */
5373 /* Check if the hardware has finished any work. */
5374 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5375 hw_tx_cons = bce_get_hw_tx_cons(txr);
5377 status_attn_bits = sblk->status_attn_bits;
5379 /* Was it a link change interrupt? */
5380 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5381 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5385 * Clear any transient status updates during link state
5388 REG_WR(sc, BCE_HC_COMMAND,
5389 sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5390 REG_RD(sc, BCE_HC_COMMAND);
5394 * If any other attention is asserted then
5395 * the chip is toast.
5397 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5398 (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5399 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5400 sblk->status_attn_bits);
5401 bce_serialize_skipmain(sc);
5403 bce_deserialize_skipmain(sc);
5407 /* Check for any completed RX frames. */
5408 lwkt_serialize_enter(&rxr->rx_serialize);
5409 if (hw_rx_cons != rxr->rx_cons)
5410 bce_rx_intr(rxr, -1, hw_rx_cons);
5411 lwkt_serialize_exit(&rxr->rx_serialize);
5413 /* Check for any completed TX frames. */
5414 lwkt_serialize_enter(&txr->tx_serialize);
5415 if (hw_tx_cons != txr->tx_cons) {
5416 bce_tx_intr(txr, hw_tx_cons);
5417 if (!ifsq_is_empty(txr->ifsq))
5418 ifsq_devstart(txr->ifsq);
5420 lwkt_serialize_exit(&txr->tx_serialize);
5424 bce_intr_legacy(void *xsc)
5426 struct bce_softc *sc = xsc;
5427 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5428 struct status_block *sblk;
5430 sblk = sc->status_block;
5433 * If the hardware status block index matches the last value
5434 * read by the driver and we haven't asserted our interrupt
5435 * then there's nothing to do.
5437 if (sblk->status_idx == rxr->last_status_idx &&
5438 (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5439 BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5442 /* Ack the interrupt and stop others from occuring. */
5443 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5444 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5445 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5448 * Read back to deassert IRQ immediately to avoid too
5449 * many spurious interrupts.
5451 REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5455 /* Re-enable interrupts. */
5456 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5457 BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5458 BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5459 bce_reenable_intr(rxr);
5463 bce_intr_msi(void *xsc)
5465 struct bce_softc *sc = xsc;
5467 /* Ack the interrupt and stop others from occuring. */
5468 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5469 BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5470 BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5474 /* Re-enable interrupts */
5475 bce_reenable_intr(&sc->rx_rings[0]);
5479 bce_intr_msi_oneshot(void *xsc)
5481 struct bce_softc *sc = xsc;
5485 /* Re-enable interrupts */
5486 bce_reenable_intr(&sc->rx_rings[0]);
5490 bce_intr_msix_rxtx(void *xrxr)
5492 struct bce_rx_ring *rxr = xrxr;
5493 struct bce_tx_ring *txr;
5494 uint16_t hw_rx_cons, hw_tx_cons;
5496 ASSERT_SERIALIZED(&rxr->rx_serialize);
5498 KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5499 txr = &rxr->sc->tx_rings[rxr->idx];
5502 * Save the status block index value for use during
5503 * the next interrupt.
5505 rxr->last_status_idx = *rxr->hw_status_idx;
5507 /* Make sure status index is extracted before RX/TX cons */
5510 /* Check if the hardware has finished any work. */
5511 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5512 if (hw_rx_cons != rxr->rx_cons)
5513 bce_rx_intr(rxr, -1, hw_rx_cons);
5515 /* Check for any completed TX frames. */
5516 hw_tx_cons = bce_get_hw_tx_cons(txr);
5517 lwkt_serialize_enter(&txr->tx_serialize);
5518 if (hw_tx_cons != txr->tx_cons) {
5519 bce_tx_intr(txr, hw_tx_cons);
5520 if (!ifsq_is_empty(txr->ifsq))
5521 ifsq_devstart(txr->ifsq);
5523 lwkt_serialize_exit(&txr->tx_serialize);
5525 /* Re-enable interrupts */
5526 bce_reenable_intr(rxr);
5530 bce_intr_msix_rx(void *xrxr)
5532 struct bce_rx_ring *rxr = xrxr;
5533 uint16_t hw_rx_cons;
5535 ASSERT_SERIALIZED(&rxr->rx_serialize);
5538 * Save the status block index value for use during
5539 * the next interrupt.
5541 rxr->last_status_idx = *rxr->hw_status_idx;
5543 /* Make sure status index is extracted before RX cons */
5546 /* Check if the hardware has finished any work. */
5547 hw_rx_cons = bce_get_hw_rx_cons(rxr);
5548 if (hw_rx_cons != rxr->rx_cons)
5549 bce_rx_intr(rxr, -1, hw_rx_cons);
5551 /* Re-enable interrupts */
5552 bce_reenable_intr(rxr);
5555 /****************************************************************************/
5556 /* Programs the various packet receive modes (broadcast and multicast). */
5560 /****************************************************************************/
5562 bce_set_rx_mode(struct bce_softc *sc)
5564 struct ifnet *ifp = &sc->arpcom.ac_if;
5565 struct ifmultiaddr *ifma;
5566 uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5567 uint32_t rx_mode, sort_mode;
5570 ASSERT_IFNET_SERIALIZED_ALL(ifp);
5572 /* Initialize receive mode default settings. */
5573 rx_mode = sc->rx_mode &
5574 ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5575 BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5576 sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5579 * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5582 if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5583 !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5584 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5587 * Check for promiscuous, all multicast, or selected
5588 * multicast address filtering.
5590 if (ifp->if_flags & IFF_PROMISC) {
5591 /* Enable promiscuous mode. */
5592 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5593 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5594 } else if (ifp->if_flags & IFF_ALLMULTI) {
5595 /* Enable all multicast addresses. */
5596 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5597 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5600 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5602 /* Accept one or more multicast(s). */
5603 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5604 if (ifma->ifma_addr->sa_family != AF_LINK)
5607 LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5608 ETHER_ADDR_LEN) & 0xFF;
5609 hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5612 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5613 REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5616 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5619 /* Only make changes if the recive mode has actually changed. */
5620 if (rx_mode != sc->rx_mode) {
5621 sc->rx_mode = rx_mode;
5622 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5625 /* Disable and clear the exisitng sort before enabling a new sort. */
5626 REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5627 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5628 REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5631 /****************************************************************************/
5632 /* Called periodically to updates statistics from the controllers */
5633 /* statistics block. */
5637 /****************************************************************************/
5639 bce_stats_update(struct bce_softc *sc)
5641 struct ifnet *ifp = &sc->arpcom.ac_if;
5642 struct statistics_block *stats = sc->stats_block;
5644 ASSERT_SERIALIZED(&sc->main_serialize);
5647 * Certain controllers don't report carrier sense errors correctly.
5648 * See errata E11_5708CA0_1165.
5650 if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5651 !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5652 IFNET_STAT_INC(ifp, oerrors,
5653 (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5657 * Update the sysctl statistics from the hardware statistics.
5659 sc->stat_IfHCInOctets =
5660 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5661 (uint64_t)stats->stat_IfHCInOctets_lo;
5663 sc->stat_IfHCInBadOctets =
5664 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5665 (uint64_t)stats->stat_IfHCInBadOctets_lo;
5667 sc->stat_IfHCOutOctets =
5668 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5669 (uint64_t)stats->stat_IfHCOutOctets_lo;
5671 sc->stat_IfHCOutBadOctets =
5672 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5673 (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5675 sc->stat_IfHCInUcastPkts =
5676 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5677 (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5679 sc->stat_IfHCInMulticastPkts =
5680 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5681 (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5683 sc->stat_IfHCInBroadcastPkts =
5684 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5685 (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5687 sc->stat_IfHCOutUcastPkts =
5688 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5689 (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5691 sc->stat_IfHCOutMulticastPkts =
5692 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5693 (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5695 sc->stat_IfHCOutBroadcastPkts =
5696 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5697 (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5699 sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5700 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5702 sc->stat_Dot3StatsCarrierSenseErrors =
5703 stats->stat_Dot3StatsCarrierSenseErrors;
5705 sc->stat_Dot3StatsFCSErrors =
5706 stats->stat_Dot3StatsFCSErrors;
5708 sc->stat_Dot3StatsAlignmentErrors =
5709 stats->stat_Dot3StatsAlignmentErrors;
5711 sc->stat_Dot3StatsSingleCollisionFrames =
5712 stats->stat_Dot3StatsSingleCollisionFrames;
5714 sc->stat_Dot3StatsMultipleCollisionFrames =
5715 stats->stat_Dot3StatsMultipleCollisionFrames;
5717 sc->stat_Dot3StatsDeferredTransmissions =
5718 stats->stat_Dot3StatsDeferredTransmissions;
5720 sc->stat_Dot3StatsExcessiveCollisions =
5721 stats->stat_Dot3StatsExcessiveCollisions;
5723 sc->stat_Dot3StatsLateCollisions =
5724 stats->stat_Dot3StatsLateCollisions;
5726 sc->stat_EtherStatsCollisions =
5727 stats->stat_EtherStatsCollisions;
5729 sc->stat_EtherStatsFragments =
5730 stats->stat_EtherStatsFragments;
5732 sc->stat_EtherStatsJabbers =
5733 stats->stat_EtherStatsJabbers;
5735 sc->stat_EtherStatsUndersizePkts =
5736 stats->stat_EtherStatsUndersizePkts;
5738 sc->stat_EtherStatsOverrsizePkts =
5739 stats->stat_EtherStatsOverrsizePkts;
5741 sc->stat_EtherStatsPktsRx64Octets =
5742 stats->stat_EtherStatsPktsRx64Octets;
5744 sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5745 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5747 sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5748 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5750 sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5751 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5753 sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5754 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5756 sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5757 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5759 sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5760 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5762 sc->stat_EtherStatsPktsTx64Octets =
5763 stats->stat_EtherStatsPktsTx64Octets;
5765 sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5766 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5768 sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5769 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5771 sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5772 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5774 sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5775 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5777 sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5778 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5780 sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5781 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5783 sc->stat_XonPauseFramesReceived =
5784 stats->stat_XonPauseFramesReceived;
5786 sc->stat_XoffPauseFramesReceived =
5787 stats->stat_XoffPauseFramesReceived;
5789 sc->stat_OutXonSent =
5790 stats->stat_OutXonSent;
5792 sc->stat_OutXoffSent =
5793 stats->stat_OutXoffSent;
5795 sc->stat_FlowControlDone =
5796 stats->stat_FlowControlDone;
5798 sc->stat_MacControlFramesReceived =
5799 stats->stat_MacControlFramesReceived;
5801 sc->stat_XoffStateEntered =
5802 stats->stat_XoffStateEntered;
5804 sc->stat_IfInFramesL2FilterDiscards =
5805 stats->stat_IfInFramesL2FilterDiscards;
5807 sc->stat_IfInRuleCheckerDiscards =
5808 stats->stat_IfInRuleCheckerDiscards;
5810 sc->stat_IfInFTQDiscards =
5811 stats->stat_IfInFTQDiscards;
5813 sc->stat_IfInMBUFDiscards =
5814 stats->stat_IfInMBUFDiscards;
5816 sc->stat_IfInRuleCheckerP4Hit =
5817 stats->stat_IfInRuleCheckerP4Hit;
5819 sc->stat_CatchupInRuleCheckerDiscards =
5820 stats->stat_CatchupInRuleCheckerDiscards;
5822 sc->stat_CatchupInFTQDiscards =
5823 stats->stat_CatchupInFTQDiscards;
5825 sc->stat_CatchupInMBUFDiscards =
5826 stats->stat_CatchupInMBUFDiscards;
5828 sc->stat_CatchupInRuleCheckerP4Hit =
5829 stats->stat_CatchupInRuleCheckerP4Hit;
5831 sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5834 * Update the interface statistics from the
5835 * hardware statistics.
5837 IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5839 IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5840 (u_long)sc->stat_EtherStatsOverrsizePkts +
5841 (u_long)sc->stat_IfInMBUFDiscards +
5842 (u_long)sc->stat_Dot3StatsAlignmentErrors +
5843 (u_long)sc->stat_Dot3StatsFCSErrors +
5844 (u_long)sc->stat_IfInRuleCheckerDiscards +
5845 (u_long)sc->stat_IfInFTQDiscards +
5846 (u_long)sc->com_no_buffers);
5848 IFNET_STAT_SET(ifp, oerrors,
5849 (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5850 (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5851 (u_long)sc->stat_Dot3StatsLateCollisions);
5854 /****************************************************************************/
5855 /* Periodic function to notify the bootcode that the driver is still */
5860 /****************************************************************************/
5862 bce_pulse(void *xsc)
5864 struct bce_softc *sc = xsc;
5865 struct ifnet *ifp = &sc->arpcom.ac_if;
5868 lwkt_serialize_enter(&sc->main_serialize);
5870 /* Tell the firmware that the driver is still running. */
5871 msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5872 bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5874 /* Update the bootcode condition. */
5875 sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5877 /* Report whether the bootcode still knows the driver is running. */
5878 if (!sc->bce_drv_cardiac_arrest) {
5879 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5880 sc->bce_drv_cardiac_arrest = 1;
5881 if_printf(ifp, "Bootcode lost the driver pulse! "
5882 "(bc_state = 0x%08X)\n", sc->bc_state);
5886 * Not supported by all bootcode versions.
5887 * (v5.0.11+ and v5.2.1+) Older bootcode
5888 * will require the driver to reset the
5889 * controller to clear this condition.
5891 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5892 sc->bce_drv_cardiac_arrest = 0;
5893 if_printf(ifp, "Bootcode found the driver pulse! "
5894 "(bc_state = 0x%08X)\n", sc->bc_state);
5898 /* Schedule the next pulse. */
5899 callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5900 sc->bce_timer_cpuid);
5902 lwkt_serialize_exit(&sc->main_serialize);
5905 /****************************************************************************/
5906 /* Periodic function to check whether MSI is lost */
5910 /****************************************************************************/
5912 bce_check_msi(void *xsc)
5914 struct bce_softc *sc = xsc;
5915 struct ifnet *ifp = &sc->arpcom.ac_if;
5916 struct status_block *sblk = sc->status_block;
5917 struct bce_tx_ring *txr = &sc->tx_rings[0];
5918 struct bce_rx_ring *rxr = &sc->rx_rings[0];
5920 lwkt_serialize_enter(&sc->main_serialize);
5922 KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5924 if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5925 lwkt_serialize_exit(&sc->main_serialize);
5929 if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5930 bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5931 (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5932 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5933 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5934 sc->bce_check_tx_cons == txr->tx_cons &&
5935 sc->bce_check_status_idx == rxr->last_status_idx) {
5938 if (!sc->bce_msi_maylose) {
5939 sc->bce_msi_maylose = TRUE;
5943 msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5944 if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5946 if_printf(ifp, "lost MSI\n");
5948 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5949 msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5950 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5953 } else if (bootverbose) {
5954 if_printf(ifp, "MSI may be lost\n");
5958 sc->bce_msi_maylose = FALSE;
5959 sc->bce_check_rx_cons = rxr->rx_cons;
5960 sc->bce_check_tx_cons = txr->tx_cons;
5961 sc->bce_check_status_idx = rxr->last_status_idx;
5964 callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5966 lwkt_serialize_exit(&sc->main_serialize);
5969 /****************************************************************************/
5970 /* Periodic function to perform maintenance tasks. */
5974 /****************************************************************************/
5976 bce_tick_serialized(struct bce_softc *sc)
5978 struct mii_data *mii;
5980 ASSERT_SERIALIZED(&sc->main_serialize);
5982 /* Update the statistics from the hardware statistics block. */
5983 bce_stats_update(sc);
5985 /* Schedule the next tick. */
5986 callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5987 sc->bce_timer_cpuid);
5989 /* If link is up already up then we're done. */
5993 mii = device_get_softc(sc->bce_miibus);
5996 /* Check if the link has come up. */
5997 if ((mii->mii_media_status & IFM_ACTIVE) &&
5998 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
6002 /* Now that link is up, handle any outstanding TX traffic. */
6003 for (i = 0; i < sc->tx_ring_cnt; ++i)
6004 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6011 struct bce_softc *sc = xsc;
6013 lwkt_serialize_enter(&sc->main_serialize);
6014 bce_tick_serialized(sc);
6015 lwkt_serialize_exit(&sc->main_serialize);
6018 /****************************************************************************/
6019 /* Adds any sysctl parameters for tuning or debugging purposes. */
6022 /* 0 for success, positive value for failure. */
6023 /****************************************************************************/
6025 bce_add_sysctls(struct bce_softc *sc)
6027 struct sysctl_ctx_list *ctx;
6028 struct sysctl_oid_list *children;
6029 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6034 ctx = device_get_sysctl_ctx(sc->bce_dev);
6035 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bce_dev));
6037 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6038 CTLTYPE_INT | CTLFLAG_RW,
6039 sc, 0, bce_sysctl_tx_bds_int, "I",
6040 "Send max coalesced BD count during interrupt");
6041 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6042 CTLTYPE_INT | CTLFLAG_RW,
6043 sc, 0, bce_sysctl_tx_bds, "I",
6044 "Send max coalesced BD count");
6045 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6046 CTLTYPE_INT | CTLFLAG_RW,
6047 sc, 0, bce_sysctl_tx_ticks_int, "I",
6048 "Send coalescing ticks during interrupt");
6049 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6050 CTLTYPE_INT | CTLFLAG_RW,
6051 sc, 0, bce_sysctl_tx_ticks, "I",
6052 "Send coalescing ticks");
6054 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6055 CTLTYPE_INT | CTLFLAG_RW,
6056 sc, 0, bce_sysctl_rx_bds_int, "I",
6057 "Receive max coalesced BD count during interrupt");
6058 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6059 CTLTYPE_INT | CTLFLAG_RW,
6060 sc, 0, bce_sysctl_rx_bds, "I",
6061 "Receive max coalesced BD count");
6062 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6063 CTLTYPE_INT | CTLFLAG_RW,
6064 sc, 0, bce_sysctl_rx_ticks_int, "I",
6065 "Receive coalescing ticks during interrupt");
6066 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6067 CTLTYPE_INT | CTLFLAG_RW,
6068 sc, 0, bce_sysctl_rx_ticks, "I",
6069 "Receive coalescing ticks");
6071 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6072 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6073 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6074 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6076 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6077 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6078 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6079 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6081 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6082 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6083 "# segments before write to hardware registers");
6085 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
6086 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_cpumap",
6087 CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6088 if_ringmap_cpumap_sysctl, "I", "TX ring CPU map");
6089 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_cpumap",
6090 CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6091 if_ringmap_cpumap_sysctl, "I", "RX ring CPU map");
6093 #ifdef IFPOLL_ENABLE
6094 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_poll_cpumap",
6095 CTLTYPE_OPAQUE | CTLFLAG_RD, sc->tx_rmap, 0,
6096 if_ringmap_cpumap_sysctl, "I", "TX poll CPU map");
6097 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_poll_cpumap",
6098 CTLTYPE_OPAQUE | CTLFLAG_RD, sc->rx_rmap, 0,
6099 if_ringmap_cpumap_sysctl, "I", "RX poll CPU map");
6103 #ifdef BCE_RSS_DEBUG
6104 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6105 CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6106 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6107 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6108 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6109 CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6114 #ifdef BCE_TSS_DEBUG
6115 for (i = 0; i < sc->tx_ring_cnt; ++i) {
6116 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6117 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6118 CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6123 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6124 "stat_IfHCInOctets",
6125 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6128 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6129 "stat_IfHCInBadOctets",
6130 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6131 "Bad bytes received");
6133 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6134 "stat_IfHCOutOctets",
6135 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6138 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6139 "stat_IfHCOutBadOctets",
6140 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6143 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6144 "stat_IfHCInUcastPkts",
6145 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6146 "Unicast packets received");
6148 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6149 "stat_IfHCInMulticastPkts",
6150 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6151 "Multicast packets received");
6153 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6154 "stat_IfHCInBroadcastPkts",
6155 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6156 "Broadcast packets received");
6158 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6159 "stat_IfHCOutUcastPkts",
6160 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6161 "Unicast packets sent");
6163 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6164 "stat_IfHCOutMulticastPkts",
6165 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6166 "Multicast packets sent");
6168 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO,
6169 "stat_IfHCOutBroadcastPkts",
6170 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6171 "Broadcast packets sent");
6173 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6174 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6175 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6176 0, "Internal MAC transmit errors");
6178 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6179 "stat_Dot3StatsCarrierSenseErrors",
6180 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6181 0, "Carrier sense errors");
6183 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6184 "stat_Dot3StatsFCSErrors",
6185 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6186 0, "Frame check sequence errors");
6188 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6189 "stat_Dot3StatsAlignmentErrors",
6190 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6191 0, "Alignment errors");
6193 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6194 "stat_Dot3StatsSingleCollisionFrames",
6195 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6196 0, "Single Collision Frames");
6198 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6199 "stat_Dot3StatsMultipleCollisionFrames",
6200 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6201 0, "Multiple Collision Frames");
6203 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6204 "stat_Dot3StatsDeferredTransmissions",
6205 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6206 0, "Deferred Transmissions");
6208 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6209 "stat_Dot3StatsExcessiveCollisions",
6210 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6211 0, "Excessive Collisions");
6213 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6214 "stat_Dot3StatsLateCollisions",
6215 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6216 0, "Late Collisions");
6218 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6219 "stat_EtherStatsCollisions",
6220 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6223 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6224 "stat_EtherStatsFragments",
6225 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6228 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6229 "stat_EtherStatsJabbers",
6230 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6233 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6234 "stat_EtherStatsUndersizePkts",
6235 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6236 0, "Undersize packets");
6238 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6239 "stat_EtherStatsOverrsizePkts",
6240 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6241 0, "stat_EtherStatsOverrsizePkts");
6243 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6244 "stat_EtherStatsPktsRx64Octets",
6245 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6246 0, "Bytes received in 64 byte packets");
6248 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6249 "stat_EtherStatsPktsRx65Octetsto127Octets",
6250 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6251 0, "Bytes received in 65 to 127 byte packets");
6253 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6254 "stat_EtherStatsPktsRx128Octetsto255Octets",
6255 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6256 0, "Bytes received in 128 to 255 byte packets");
6258 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6259 "stat_EtherStatsPktsRx256Octetsto511Octets",
6260 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6261 0, "Bytes received in 256 to 511 byte packets");
6263 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6264 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6265 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6266 0, "Bytes received in 512 to 1023 byte packets");
6268 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6269 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6270 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6271 0, "Bytes received in 1024 t0 1522 byte packets");
6273 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6274 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6275 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6276 0, "Bytes received in 1523 to 9022 byte packets");
6278 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6279 "stat_EtherStatsPktsTx64Octets",
6280 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6281 0, "Bytes sent in 64 byte packets");
6283 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6284 "stat_EtherStatsPktsTx65Octetsto127Octets",
6285 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6286 0, "Bytes sent in 65 to 127 byte packets");
6288 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6289 "stat_EtherStatsPktsTx128Octetsto255Octets",
6290 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6291 0, "Bytes sent in 128 to 255 byte packets");
6293 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6294 "stat_EtherStatsPktsTx256Octetsto511Octets",
6295 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6296 0, "Bytes sent in 256 to 511 byte packets");
6298 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6299 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6300 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6301 0, "Bytes sent in 512 to 1023 byte packets");
6303 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6304 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6305 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6306 0, "Bytes sent in 1024 to 1522 byte packets");
6308 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6309 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6310 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6311 0, "Bytes sent in 1523 to 9022 byte packets");
6313 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6314 "stat_XonPauseFramesReceived",
6315 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6316 0, "XON pause frames receved");
6318 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6319 "stat_XoffPauseFramesReceived",
6320 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6321 0, "XOFF pause frames received");
6323 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6325 CTLFLAG_RD, &sc->stat_OutXonSent,
6326 0, "XON pause frames sent");
6328 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6330 CTLFLAG_RD, &sc->stat_OutXoffSent,
6331 0, "XOFF pause frames sent");
6333 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6334 "stat_FlowControlDone",
6335 CTLFLAG_RD, &sc->stat_FlowControlDone,
6336 0, "Flow control done");
6338 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6339 "stat_MacControlFramesReceived",
6340 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6341 0, "MAC control frames received");
6343 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6344 "stat_XoffStateEntered",
6345 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6346 0, "XOFF state entered");
6348 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6349 "stat_IfInFramesL2FilterDiscards",
6350 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6351 0, "Received L2 packets discarded");
6353 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6354 "stat_IfInRuleCheckerDiscards",
6355 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6356 0, "Received packets discarded by rule");
6358 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6359 "stat_IfInFTQDiscards",
6360 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6361 0, "Received packet FTQ discards");
6363 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6364 "stat_IfInMBUFDiscards",
6365 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6366 0, "Received packets discarded due to lack of controller buffer memory");
6368 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6369 "stat_IfInRuleCheckerP4Hit",
6370 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6371 0, "Received packets rule checker hits");
6373 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6374 "stat_CatchupInRuleCheckerDiscards",
6375 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6376 0, "Received packets discarded in Catchup path");
6378 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6379 "stat_CatchupInFTQDiscards",
6380 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6381 0, "Received packets discarded in FTQ in Catchup path");
6383 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6384 "stat_CatchupInMBUFDiscards",
6385 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6386 0, "Received packets discarded in controller buffer memory in Catchup path");
6388 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6389 "stat_CatchupInRuleCheckerP4Hit",
6390 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6391 0, "Received packets rule checker hits in Catchup path");
6393 SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
6395 CTLFLAG_RD, &sc->com_no_buffers,
6396 0, "Valid packets received but no RX buffers available");
6400 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6402 struct bce_softc *sc = arg1;
6404 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6405 &sc->bce_tx_quick_cons_trip_int,
6406 BCE_COALMASK_TX_BDS_INT);
6410 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6412 struct bce_softc *sc = arg1;
6414 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6415 &sc->bce_tx_quick_cons_trip,
6416 BCE_COALMASK_TX_BDS);
6420 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6422 struct bce_softc *sc = arg1;
6424 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6425 &sc->bce_tx_ticks_int,
6426 BCE_COALMASK_TX_TICKS_INT);
6430 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6432 struct bce_softc *sc = arg1;
6434 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6436 BCE_COALMASK_TX_TICKS);
6440 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6442 struct bce_softc *sc = arg1;
6444 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6445 &sc->bce_rx_quick_cons_trip_int,
6446 BCE_COALMASK_RX_BDS_INT);
6450 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6452 struct bce_softc *sc = arg1;
6454 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6455 &sc->bce_rx_quick_cons_trip,
6456 BCE_COALMASK_RX_BDS);
6460 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6462 struct bce_softc *sc = arg1;
6464 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6465 &sc->bce_rx_ticks_int,
6466 BCE_COALMASK_RX_TICKS_INT);
6470 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6472 struct bce_softc *sc = arg1;
6474 return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6476 BCE_COALMASK_RX_TICKS);
6480 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6481 uint32_t coalchg_mask)
6483 struct bce_softc *sc = arg1;
6484 struct ifnet *ifp = &sc->arpcom.ac_if;
6487 ifnet_serialize_all(ifp);
6490 error = sysctl_handle_int(oidp, &v, 0, req);
6491 if (!error && req->newptr != NULL) {
6496 sc->bce_coalchg_mask |= coalchg_mask;
6498 /* Commit changes */
6499 bce_coal_change(sc);
6503 ifnet_deserialize_all(ifp);
6508 bce_coal_change(struct bce_softc *sc)
6510 struct ifnet *ifp = &sc->arpcom.ac_if;
6513 ASSERT_SERIALIZED(&sc->main_serialize);
6515 if ((ifp->if_flags & IFF_RUNNING) == 0) {
6516 sc->bce_coalchg_mask = 0;
6520 if (sc->bce_coalchg_mask &
6521 (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6522 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6523 (sc->bce_tx_quick_cons_trip_int << 16) |
6524 sc->bce_tx_quick_cons_trip);
6525 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6528 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6530 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6531 (sc->bce_tx_quick_cons_trip_int << 16) |
6532 sc->bce_tx_quick_cons_trip);
6535 if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6536 sc->bce_tx_quick_cons_trip,
6537 sc->bce_tx_quick_cons_trip_int);
6541 if (sc->bce_coalchg_mask &
6542 (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6543 REG_WR(sc, BCE_HC_TX_TICKS,
6544 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6545 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6548 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6550 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6551 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6554 if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6555 sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6559 if (sc->bce_coalchg_mask &
6560 (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6561 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6562 (sc->bce_rx_quick_cons_trip_int << 16) |
6563 sc->bce_rx_quick_cons_trip);
6564 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6567 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6569 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6570 (sc->bce_rx_quick_cons_trip_int << 16) |
6571 sc->bce_rx_quick_cons_trip);
6574 if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6575 sc->bce_rx_quick_cons_trip,
6576 sc->bce_rx_quick_cons_trip_int);
6580 if (sc->bce_coalchg_mask &
6581 (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6582 REG_WR(sc, BCE_HC_RX_TICKS,
6583 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6584 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6587 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6589 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6590 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6593 if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6594 sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6598 sc->bce_coalchg_mask = 0;
6602 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6603 uint16_t *flags0, uint16_t *mss0)
6607 int thoff, iphlen, hoff;
6610 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6612 hoff = m->m_pkthdr.csum_lhlen;
6613 iphlen = m->m_pkthdr.csum_iphlen;
6614 thoff = m->m_pkthdr.csum_thlen;
6616 KASSERT(hoff >= sizeof(struct ether_header),
6617 ("invalid ether header len %d", hoff));
6618 KASSERT(iphlen >= sizeof(struct ip),
6619 ("invalid ip header len %d", iphlen));
6620 KASSERT(thoff >= sizeof(struct tcphdr),
6621 ("invalid tcp header len %d", thoff));
6623 if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6624 m = m_pullup(m, hoff + iphlen + thoff);
6632 /* Set the LSO flag in the TX BD */
6633 flags = TX_BD_FLAGS_SW_LSO;
6635 /* Set the length of IP + TCP options (in 32 bit words) */
6636 flags |= (((iphlen + thoff -
6637 sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6639 *mss0 = htole16(m->m_pkthdr.tso_segsz);
6646 bce_setup_serialize(struct bce_softc *sc)
6651 * Allocate serializer array
6654 /* Main + TX + RX */
6655 sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6658 kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6659 M_DEVBUF, M_WAITOK | M_ZERO);
6664 * NOTE: Order is critical
6669 KKASSERT(i < sc->serialize_cnt);
6670 sc->serializes[i++] = &sc->main_serialize;
6672 for (j = 0; j < sc->rx_ring_cnt; ++j) {
6673 KKASSERT(i < sc->serialize_cnt);
6674 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6677 for (j = 0; j < sc->tx_ring_cnt; ++j) {
6678 KKASSERT(i < sc->serialize_cnt);
6679 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6682 KKASSERT(i == sc->serialize_cnt);
6686 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6688 struct bce_softc *sc = ifp->if_softc;
6690 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, slz);
6694 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6696 struct bce_softc *sc = ifp->if_softc;
6698 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, slz);
6702 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6704 struct bce_softc *sc = ifp->if_softc;
6706 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6713 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6714 boolean_t serialized)
6716 struct bce_softc *sc = ifp->if_softc;
6718 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6722 #endif /* INVARIANTS */
6725 bce_serialize_skipmain(struct bce_softc *sc)
6727 lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6731 bce_deserialize_skipmain(struct bce_softc *sc)
6733 lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6737 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6740 sc->bce_timer_cpuid = 0; /* XXX */
6742 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6746 bce_alloc_intr(struct bce_softc *sc)
6750 bce_try_alloc_msix(sc);
6751 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6754 sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6755 &sc->bce_irq_rid, &irq_flags);
6757 sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6758 &sc->bce_irq_rid, irq_flags);
6759 if (sc->bce_res_irq == NULL) {
6760 device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6763 sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
6764 sc->bce_msix[0].msix_serialize = &sc->main_serialize;
6770 bce_try_alloc_msix(struct bce_softc *sc)
6772 struct bce_msix_data *msix;
6774 boolean_t setup = FALSE;
6776 if (sc->rx_ring_cnt == 1)
6779 msix = &sc->bce_msix[0];
6780 msix->msix_serialize = &sc->main_serialize;
6781 msix->msix_func = bce_intr_msi_oneshot;
6782 msix->msix_arg = sc;
6783 msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap, 0);
6784 KKASSERT(msix->msix_cpuid < netisr_ncpus);
6785 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6786 device_get_nameunit(sc->bce_dev));
6788 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6789 struct bce_rx_ring *rxr = &sc->rx_rings[i];
6791 msix = &sc->bce_msix[i];
6793 msix->msix_serialize = &rxr->rx_serialize;
6794 msix->msix_arg = rxr;
6795 msix->msix_cpuid = if_ringmap_cpumap(sc->rx_rmap,
6796 i % sc->rx_ring_cnt2);
6797 KKASSERT(msix->msix_cpuid < netisr_ncpus);
6799 if (i < sc->tx_ring_cnt) {
6800 msix->msix_func = bce_intr_msix_rxtx;
6801 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6802 "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6804 msix->msix_func = bce_intr_msix_rx;
6805 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6806 "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6813 bce_setup_msix_table(sc);
6814 REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6815 REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6816 REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6818 REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6820 error = pci_setup_msix(sc->bce_dev);
6822 device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6827 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6828 msix = &sc->bce_msix[i];
6830 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6833 device_printf(sc->bce_dev,
6834 "Unable to allocate MSI-X %d on cpu%d\n",
6835 i, msix->msix_cpuid);
6839 msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6840 SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6841 if (msix->msix_res == NULL) {
6842 device_printf(sc->bce_dev,
6843 "Unable to allocate MSI-X %d resource\n", i);
6849 pci_enable_msix(sc->bce_dev);
6850 sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6853 bce_free_msix(sc, setup);
6857 bce_setup_ring_cnt(struct bce_softc *sc)
6859 int msix_enable, msix_cnt, msix_ring;
6860 int ring_max, ring_cnt;
6862 sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, 1, 1);
6864 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709 &&
6865 BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5716)
6868 msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6873 if (netisr_ncpus == 1)
6877 * One extra RX ring will be needed (see below), so make sure
6878 * that there are enough MSI-X vectors.
6880 msix_cnt = pci_msix_count(sc->bce_dev);
6883 msix_ring = msix_cnt - 1;
6886 * Setup RX ring count
6888 ring_max = BCE_RX_RING_MAX;
6889 if (ring_max > msix_ring)
6890 ring_max = msix_ring;
6891 ring_cnt = device_getenv_int(sc->bce_dev, "rx_rings", bce_rx_rings);
6893 if_ringmap_free(sc->rx_rmap);
6894 sc->rx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6897 sc->rx_ring_cnt2 = if_ringmap_count(sc->rx_rmap);
6900 * Setup TX ring count
6903 * TX ring count must be less than the effective RSS RX ring
6904 * count, since we use RX ring software data struct to save
6905 * status index and various other MSI-X related stuffs.
6907 ring_max = BCE_TX_RING_MAX;
6908 if (ring_max > sc->rx_ring_cnt2)
6909 ring_max = sc->rx_ring_cnt2;
6910 ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings", bce_tx_rings);
6912 sc->tx_rmap = if_ringmap_alloc(sc->bce_dev, ring_cnt, ring_max);
6913 if_ringmap_align(sc->bce_dev, sc->rx_rmap, sc->tx_rmap);
6915 sc->tx_ring_cnt = if_ringmap_count(sc->tx_rmap);
6917 if (sc->rx_ring_cnt2 == 1) {
6919 * Don't use MSI-X, if the effective RX ring count is 1.
6920 * Since if the effective RX ring count is 1, the TX ring
6921 * count will be 1. This RX ring and the TX ring must be
6922 * bundled into one MSI-X vector, so the hot path will be
6923 * exact same as using MSI. Besides, the first RX ring
6924 * must be fully populated, which only accepts packets whose
6925 * RSS hash can't calculated, e.g. ARP packets; waste of
6926 * resource at least.
6928 sc->rx_ring_cnt = 1;
6931 * One extra RX ring is allocated, since the first RX ring
6932 * could not be used for RSS hashed packets whose masked
6933 * hash is 0. The first RX ring is only used for packets
6934 * whose RSS hash could not be calculated, e.g. ARP packets.
6935 * This extra RX ring will be used for packets whose masked
6936 * hash is 0. The effective RX ring count involved in RSS
6937 * is still sc->rx_ring_cnt2.
6939 sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6944 bce_free_msix(struct bce_softc *sc, boolean_t setup)
6948 KKASSERT(sc->rx_ring_cnt > 1);
6950 for (i = 0; i < sc->rx_ring_cnt; ++i) {
6951 struct bce_msix_data *msix = &sc->bce_msix[i];
6953 if (msix->msix_res != NULL) {
6954 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6955 msix->msix_rid, msix->msix_res);
6957 if (msix->msix_rid >= 0)
6958 pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
6961 pci_teardown_msix(sc->bce_dev);
6965 bce_free_intr(struct bce_softc *sc)
6967 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
6968 if (sc->bce_res_irq != NULL) {
6969 bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
6970 sc->bce_irq_rid, sc->bce_res_irq);
6972 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
6973 pci_release_msi(sc->bce_dev);
6975 bce_free_msix(sc, TRUE);
6980 bce_setup_msix_table(struct bce_softc *sc)
6982 REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
6983 REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
6984 REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
6988 bce_setup_intr(struct bce_softc *sc)
6990 void (*irq_handle)(void *);
6993 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6994 return bce_setup_msix(sc);
6996 if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
6997 irq_handle = bce_intr_legacy;
6998 } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
6999 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
7000 BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
7001 irq_handle = bce_intr_msi_oneshot;
7002 sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7004 irq_handle = bce_intr_msi;
7005 sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7008 panic("%s: unsupported intr type %d",
7009 device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7012 error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7013 irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7015 device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7023 bce_teardown_intr(struct bce_softc *sc)
7025 if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7026 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7028 bce_teardown_msix(sc, sc->rx_ring_cnt);
7032 bce_setup_msix(struct bce_softc *sc)
7036 for (i = 0; i < sc->rx_ring_cnt; ++i) {
7037 struct bce_msix_data *msix = &sc->bce_msix[i];
7040 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7041 INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7042 &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7044 device_printf(sc->bce_dev, "could not set up %s "
7045 "interrupt handler.\n", msix->msix_desc);
7046 bce_teardown_msix(sc, i);
7054 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7058 for (i = 0; i < msix_cnt; ++i) {
7059 struct bce_msix_data *msix = &sc->bce_msix[i];
7061 bus_teardown_intr(sc->bce_dev, msix->msix_res,
7067 bce_init_rss(struct bce_softc *sc)
7069 uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7073 KKASSERT(sc->rx_ring_cnt > 2);
7076 * Configure RSS keys
7078 toeplitz_get_key(key, sizeof(key));
7079 for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7082 rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7083 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7085 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7089 * Configure the redirect table
7092 * - The "queue ID" in redirect table is the software RX ring's
7093 * index _minus_ one.
7094 * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7095 * will be used for packets whose masked hash is 0.
7096 * (see also: comment in bce_setup_ring_cnt())
7098 if_ringmap_rdrtable(sc->rx_rmap, sc->rdr_table,
7099 BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
7100 for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7101 int shift = (i % 8) << 2, qid;
7103 qid = sc->rdr_table[i];
7104 KKASSERT(qid >= 0 && qid < sc->rx_ring_cnt2);
7108 qid = sc->rx_ring_cnt - 2;
7109 KKASSERT(qid < (sc->rx_ring_cnt - 1));
7111 tbl |= qid << shift;
7113 BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7114 REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7115 REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7116 BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7117 BCE_RLUP_RSS_COMMAND_WRITE |
7118 BCE_RLUP_RSS_COMMAND_HASH_MASK);
7122 REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7123 BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7127 bce_npoll_coal_change(struct bce_softc *sc)
7129 uint32_t old_rx_cons, old_tx_cons;
7131 old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7132 old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7133 sc->bce_rx_quick_cons_trip_int = 1;
7134 sc->bce_tx_quick_cons_trip_int = 1;
7136 sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7137 BCE_COALMASK_RX_BDS_INT;
7138 bce_coal_change(sc);
7140 sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7141 sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7144 static struct pktinfo *
7145 bce_rss_pktinfo(struct pktinfo *pi, uint32_t status,
7146 const struct l2_fhdr *l2fhdr)
7148 /* Check for an IP datagram. */
7149 if ((status & L2_FHDR_STATUS_IP_DATAGRAM) == 0)
7152 /* Check if the IP checksum is valid. */
7153 if (l2fhdr->l2_fhdr_ip_xsum != 0xffff)
7156 /* Check for a valid TCP/UDP frame. */
7157 if (status & L2_FHDR_STATUS_TCP_SEGMENT) {
7158 if (status & L2_FHDR_ERRORS_TCP_XSUM)
7160 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7162 pi->pi_l3proto = IPPROTO_TCP;
7163 } else if (status & L2_FHDR_STATUS_UDP_DATAGRAM) {
7164 if (status & L2_FHDR_ERRORS_UDP_XSUM)
7166 if (l2fhdr->l2_fhdr_tcp_udp_xsum != 0xffff)
7168 pi->pi_l3proto = IPPROTO_UDP;
7172 pi->pi_netisr = NETISR_IP;