2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
34 * The following controllers are supported by this driver:
38 * The following controllers are not supported by this driver:
40 * BCM5706S A0, A1, A2, A3
42 * BCM5708S A0, B0, B1, B2
46 #include "opt_polling.h"
48 #include <sys/param.h>
50 #include <sys/endian.h>
51 #include <sys/kernel.h>
52 #include <sys/interrupt.h>
54 #include <sys/malloc.h>
55 #include <sys/queue.h>
57 #include <sys/random.h>
60 #include <sys/serialize.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/sysctl.h>
66 #include <net/ethernet.h>
68 #include <net/if_arp.h>
69 #include <net/if_dl.h>
70 #include <net/if_media.h>
71 #include <net/if_types.h>
72 #include <net/ifq_var.h>
73 #include <net/vlan/if_vlan_var.h>
74 #include <net/vlan/if_vlan_ether.h>
76 #include <dev/netif/mii_layer/mii.h>
77 #include <dev/netif/mii_layer/miivar.h>
79 #include <bus/pci/pcireg.h>
80 #include <bus/pci/pcivar.h>
82 #include "miibus_if.h"
84 #include <dev/netif/bce/if_bcereg.h>
85 #include <dev/netif/bce/if_bcefw.h>
87 /****************************************************************************/
88 /* BCE Debug Options */
89 /****************************************************************************/
92 static uint32_t bce_debug = BCE_WARN;
96 * 1 = 1 in 2,147,483,648
97 * 256 = 1 in 8,388,608
98 * 2048 = 1 in 1,048,576
100 * 1048576 = 1 in 2,048
103 * 1073741824 = 1 in 2
105 * bce_debug_l2fhdr_status_check:
106 * How often the l2_fhdr frame error check will fail.
108 * bce_debug_unexpected_attention:
109 * How often the unexpected attention check will fail.
111 * bce_debug_mbuf_allocation_failure:
112 * How often to simulate an mbuf allocation failure.
114 * bce_debug_dma_map_addr_failure:
115 * How often to simulate a DMA mapping failure.
117 * bce_debug_bootcode_running_failure:
118 * How often to simulate a bootcode failure.
120 static int bce_debug_l2fhdr_status_check = 0;
121 static int bce_debug_unexpected_attention = 0;
122 static int bce_debug_mbuf_allocation_failure = 0;
123 static int bce_debug_dma_map_addr_failure = 0;
124 static int bce_debug_bootcode_running_failure = 0;
126 #endif /* BCE_DEBUG */
129 /****************************************************************************/
130 /* PCI Device ID Table */
132 /* Used by bce_probe() to identify the devices supported by this driver. */
133 /****************************************************************************/
134 #define BCE_DEVDESC_MAX 64
136 static struct bce_type bce_devs[] = {
137 /* BCM5706C Controllers and OEM boards. */
138 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
139 "HP NC370T Multifunction Gigabit Server Adapter" },
140 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
141 "HP NC370i Multifunction Gigabit Server Adapter" },
142 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
143 "Broadcom NetXtreme II BCM5706 1000Base-T" },
145 /* BCM5706S controllers and OEM boards. */
146 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
147 "HP NC370F Multifunction Gigabit Server Adapter" },
148 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
149 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
151 /* BCM5708C controllers and OEM boards. */
152 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
153 "Broadcom NetXtreme II BCM5708 1000Base-T" },
155 /* BCM5708S controllers and OEM boards. */
156 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
157 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
162 /****************************************************************************/
163 /* Supported Flash NVRAM device data. */
164 /****************************************************************************/
165 static const struct flash_spec flash_table[] =
168 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
169 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
170 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 /* Expansion entry 0001 */
173 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
174 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
175 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 /* Saifun SA25F010 (non-buffered flash) */
178 /* strap, cfg1, & write1 need updates */
179 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
180 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
181 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
182 "Non-buffered flash (128kB)"},
183 /* Saifun SA25F020 (non-buffered flash) */
184 /* strap, cfg1, & write1 need updates */
185 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
186 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
187 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
188 "Non-buffered flash (256kB)"},
189 /* Expansion entry 0100 */
190 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
191 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
192 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
195 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
196 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
197 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
198 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
199 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
200 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
201 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
202 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
203 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
204 /* Saifun SA25F005 (non-buffered flash) */
205 /* strap, cfg1, & write1 need updates */
206 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
207 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
208 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
209 "Non-buffered flash (64kB)"},
211 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
212 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
213 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
215 /* Expansion entry 1001 */
216 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
217 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 /* Expansion entry 1010 */
221 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
222 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 /* ATMEL AT45DB011B (buffered flash) */
226 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
227 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
228 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
229 "Buffered flash (128kB)"},
230 /* Expansion entry 1100 */
231 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
232 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
233 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 /* Expansion entry 1101 */
236 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
237 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
238 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 /* Ateml Expansion entry 1110 */
241 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
242 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
243 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
244 "Entry 1110 (Atmel)"},
245 /* ATMEL AT45DB021B (buffered flash) */
246 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
247 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
248 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
249 "Buffered flash (256kB)"},
253 /****************************************************************************/
254 /* DragonFly device entry points. */
255 /****************************************************************************/
256 static int bce_probe(device_t);
257 static int bce_attach(device_t);
258 static int bce_detach(device_t);
259 static void bce_shutdown(device_t);
261 /****************************************************************************/
262 /* BCE Debug Data Structure Dump Routines */
263 /****************************************************************************/
265 static void bce_dump_mbuf(struct bce_softc *, struct mbuf *);
266 static void bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
267 static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
268 static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
269 static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
270 static void bce_dump_l2fhdr(struct bce_softc *, int,
271 struct l2_fhdr *) __unused;
272 static void bce_dump_tx_chain(struct bce_softc *, int, int);
273 static void bce_dump_rx_chain(struct bce_softc *, int, int);
274 static void bce_dump_status_block(struct bce_softc *);
275 static void bce_dump_driver_state(struct bce_softc *);
276 static void bce_dump_stats_block(struct bce_softc *) __unused;
277 static void bce_dump_hw_state(struct bce_softc *);
278 static void bce_dump_txp_state(struct bce_softc *);
279 static void bce_dump_rxp_state(struct bce_softc *) __unused;
280 static void bce_dump_tpat_state(struct bce_softc *) __unused;
281 static void bce_freeze_controller(struct bce_softc *) __unused;
282 static void bce_unfreeze_controller(struct bce_softc *) __unused;
283 static void bce_breakpoint(struct bce_softc *);
284 #endif /* BCE_DEBUG */
287 /****************************************************************************/
288 /* BCE Register/Memory Access Routines */
289 /****************************************************************************/
290 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
291 static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
292 static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
293 static int bce_miibus_read_reg(device_t, int, int);
294 static int bce_miibus_write_reg(device_t, int, int, int);
295 static void bce_miibus_statchg(device_t);
298 /****************************************************************************/
299 /* BCE NVRAM Access Routines */
300 /****************************************************************************/
301 static int bce_acquire_nvram_lock(struct bce_softc *);
302 static int bce_release_nvram_lock(struct bce_softc *);
303 static void bce_enable_nvram_access(struct bce_softc *);
304 static void bce_disable_nvram_access(struct bce_softc *);
305 static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
307 static int bce_init_nvram(struct bce_softc *);
308 static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
309 static int bce_nvram_test(struct bce_softc *);
310 #ifdef BCE_NVRAM_WRITE_SUPPORT
311 static int bce_enable_nvram_write(struct bce_softc *);
312 static void bce_disable_nvram_write(struct bce_softc *);
313 static int bce_nvram_erase_page(struct bce_softc *, uint32_t);
314 static int bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,
316 static int bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
320 /****************************************************************************/
321 /* BCE DMA Allocate/Free Routines */
322 /****************************************************************************/
323 static int bce_dma_alloc(struct bce_softc *);
324 static void bce_dma_free(struct bce_softc *);
325 static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
327 /****************************************************************************/
328 /* BCE Firmware Synchronization and Load */
329 /****************************************************************************/
330 static int bce_fw_sync(struct bce_softc *, uint32_t);
331 static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
333 static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
335 static void bce_init_cpus(struct bce_softc *);
337 static void bce_stop(struct bce_softc *);
338 static int bce_reset(struct bce_softc *, uint32_t);
339 static int bce_chipinit(struct bce_softc *);
340 static int bce_blockinit(struct bce_softc *);
341 static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *,
343 static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *);
345 static int bce_init_tx_chain(struct bce_softc *);
346 static int bce_init_rx_chain(struct bce_softc *);
347 static void bce_free_rx_chain(struct bce_softc *);
348 static void bce_free_tx_chain(struct bce_softc *);
350 static int bce_encap(struct bce_softc *, struct mbuf **);
351 static void bce_start(struct ifnet *);
352 static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
353 static void bce_watchdog(struct ifnet *);
354 static int bce_ifmedia_upd(struct ifnet *);
355 static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
356 static void bce_init(void *);
357 static void bce_mgmt_init(struct bce_softc *);
359 static void bce_init_ctx(struct bce_softc *);
360 static void bce_get_mac_addr(struct bce_softc *);
361 static void bce_set_mac_addr(struct bce_softc *);
362 static void bce_phy_intr(struct bce_softc *);
363 static void bce_rx_intr(struct bce_softc *, int);
364 static void bce_tx_intr(struct bce_softc *);
365 static void bce_disable_intr(struct bce_softc *);
366 static void bce_enable_intr(struct bce_softc *);
368 #ifdef DEVICE_POLLING
369 static void bce_poll(struct ifnet *, enum poll_cmd, int);
371 static void bce_intr(void *);
372 static void bce_set_rx_mode(struct bce_softc *);
373 static void bce_stats_update(struct bce_softc *);
374 static void bce_tick(void *);
375 static void bce_tick_serialized(struct bce_softc *);
376 static void bce_add_sysctls(struct bce_softc *);
378 static void bce_coal_change(struct bce_softc *);
379 static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
380 static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
381 static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
382 static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
383 static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
384 static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
385 static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
386 static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
387 static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
388 uint32_t *, uint32_t);
392 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
393 * takes 1023 as the TX ticks limit. However, using 1023 will
394 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
395 * there is _no_ network activity on the NIC.
397 static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
398 static uint32_t bce_tx_bds = 255; /* bcm: 20 */
399 static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
400 static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
401 static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
402 static uint32_t bce_rx_bds = 128; /* bcm: 6 */
403 static uint32_t bce_rx_ticks_int = 125; /* bcm: 18 */
404 static uint32_t bce_rx_ticks = 125; /* bcm: 18 */
406 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
407 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
408 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
409 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
410 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
411 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
412 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
413 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
415 /****************************************************************************/
416 /* DragonFly device dispatch table. */
417 /****************************************************************************/
418 static device_method_t bce_methods[] = {
419 /* Device interface */
420 DEVMETHOD(device_probe, bce_probe),
421 DEVMETHOD(device_attach, bce_attach),
422 DEVMETHOD(device_detach, bce_detach),
423 DEVMETHOD(device_shutdown, bce_shutdown),
426 DEVMETHOD(bus_print_child, bus_generic_print_child),
427 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
430 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
431 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
432 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
437 static driver_t bce_driver = {
440 sizeof(struct bce_softc)
443 static devclass_t bce_devclass;
446 DECLARE_DUMMY_MODULE(if_xl);
447 MODULE_DEPEND(bce, miibus, 1, 1, 1);
448 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, 0, 0);
449 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
452 /****************************************************************************/
453 /* Device probe function. */
455 /* Compares the device to the driver's list of supported devices and */
456 /* reports back to the OS whether this is the right driver for the device. */
459 /* BUS_PROBE_DEFAULT on success, positive value on failure. */
460 /****************************************************************************/
462 bce_probe(device_t dev)
465 uint16_t vid, did, svid, sdid;
467 /* Get the data for the device to be probed. */
468 vid = pci_get_vendor(dev);
469 did = pci_get_device(dev);
470 svid = pci_get_subvendor(dev);
471 sdid = pci_get_subdevice(dev);
473 /* Look through the list of known devices for a match. */
474 for (t = bce_devs; t->bce_name != NULL; ++t) {
475 if (vid == t->bce_vid && did == t->bce_did &&
476 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
477 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
478 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
481 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
483 /* Print out the device identity. */
484 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
486 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
488 device_set_desc_copy(dev, descbuf);
489 kfree(descbuf, M_TEMP);
497 /****************************************************************************/
498 /* Device attach function. */
500 /* Allocates device resources, performs secondary chip identification, */
501 /* resets and initializes the hardware, and initializes driver instance */
505 /* 0 on success, positive value on failure. */
506 /****************************************************************************/
508 bce_attach(device_t dev)
510 struct bce_softc *sc = device_get_softc(dev);
511 struct ifnet *ifp = &sc->arpcom.ac_if;
519 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
521 pci_enable_busmaster(dev);
523 /* Allocate PCI memory resources. */
525 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
526 RF_ACTIVE | PCI_RF_DENSE);
527 if (sc->bce_res_mem == NULL) {
528 device_printf(dev, "PCI memory allocation failed\n");
531 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
532 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
534 /* Allocate PCI IRQ resources. */
536 count = pci_msi_count(dev);
537 if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
539 sc->bce_flags |= BCE_USING_MSI_FLAG;
543 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
544 RF_SHAREABLE | RF_ACTIVE);
545 if (sc->bce_res_irq == NULL) {
546 device_printf(dev, "PCI map interrupt failed\n");
552 * Configure byte swap and enable indirect register access.
553 * Rely on CPU to do target byte swapping on big endian systems.
554 * Access to registers outside of PCI configurtion space are not
555 * valid until this is done.
557 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
558 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
559 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
561 /* Save ASIC revsion info. */
562 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
564 /* Weed out any non-production controller revisions. */
565 switch(BCE_CHIP_ID(sc)) {
566 case BCE_CHIP_ID_5706_A0:
567 case BCE_CHIP_ID_5706_A1:
568 case BCE_CHIP_ID_5708_A0:
569 case BCE_CHIP_ID_5708_B0:
570 device_printf(dev, "Unsupported chip id 0x%08x!\n",
577 * The embedded PCIe to PCI-X bridge (EPB)
578 * in the 5708 cannot address memory above
579 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
581 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
582 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
584 sc->max_bus_addr = BUS_SPACE_MAXADDR;
587 * Find the base address for shared memory access.
588 * Newer versions of bootcode use a signature and offset
589 * while older versions use a fixed address.
591 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
592 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
593 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
595 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
597 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
599 /* Get PCI bus information (speed and type). */
600 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
601 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
604 sc->bce_flags |= BCE_PCIX_FLAG;
606 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
607 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
609 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
610 sc->bus_speed_mhz = 133;
613 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
614 sc->bus_speed_mhz = 100;
617 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
618 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
619 sc->bus_speed_mhz = 66;
622 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
623 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
624 sc->bus_speed_mhz = 50;
627 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
628 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
629 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
630 sc->bus_speed_mhz = 33;
634 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
635 sc->bus_speed_mhz = 66;
637 sc->bus_speed_mhz = 33;
640 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
641 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
643 device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
645 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
646 (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
647 (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
648 (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
649 "32-bit" : "64-bit", sc->bus_speed_mhz);
651 /* Reset the controller. */
652 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
656 /* Initialize the controller. */
657 rc = bce_chipinit(sc);
659 device_printf(dev, "Controller initialization failed!\n");
663 /* Perform NVRAM test. */
664 rc = bce_nvram_test(sc);
666 device_printf(dev, "NVRAM test failed!\n");
670 /* Fetch the permanent Ethernet MAC address. */
671 bce_get_mac_addr(sc);
674 * Trip points control how many BDs
675 * should be ready before generating an
676 * interrupt while ticks control how long
677 * a BD can sit in the chain before
678 * generating an interrupt. Set the default
679 * values for the RX and TX rings.
683 /* Force more frequent interrupts. */
684 sc->bce_tx_quick_cons_trip_int = 1;
685 sc->bce_tx_quick_cons_trip = 1;
686 sc->bce_tx_ticks_int = 0;
687 sc->bce_tx_ticks = 0;
689 sc->bce_rx_quick_cons_trip_int = 1;
690 sc->bce_rx_quick_cons_trip = 1;
691 sc->bce_rx_ticks_int = 0;
692 sc->bce_rx_ticks = 0;
694 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
695 sc->bce_tx_quick_cons_trip = bce_tx_bds;
696 sc->bce_tx_ticks_int = bce_tx_ticks_int;
697 sc->bce_tx_ticks = bce_tx_ticks;
699 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
700 sc->bce_rx_quick_cons_trip = bce_rx_bds;
701 sc->bce_rx_ticks_int = bce_rx_ticks_int;
702 sc->bce_rx_ticks = bce_rx_ticks;
705 /* Update statistics once every second. */
706 sc->bce_stats_ticks = 1000000 & 0xffff00;
709 * The copper based NetXtreme II controllers
710 * use an integrated PHY at address 1 while
711 * the SerDes controllers use a PHY at
714 sc->bce_phy_addr = 1;
716 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
717 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
718 sc->bce_flags |= BCE_NO_WOL_FLAG;
719 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
720 sc->bce_phy_addr = 2;
721 val = REG_RD_IND(sc, sc->bce_shmem_base +
722 BCE_SHARED_HW_CFG_CONFIG);
723 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
724 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
728 /* Allocate DMA memory resources. */
729 rc = bce_dma_alloc(sc);
731 device_printf(dev, "DMA resource allocation failed!\n");
735 /* Initialize the ifnet interface. */
737 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
738 ifp->if_ioctl = bce_ioctl;
739 ifp->if_start = bce_start;
740 ifp->if_init = bce_init;
741 ifp->if_watchdog = bce_watchdog;
742 #ifdef DEVICE_POLLING
743 ifp->if_poll = bce_poll;
745 ifp->if_mtu = ETHERMTU;
746 ifp->if_hwassist = BCE_IF_HWASSIST;
747 ifp->if_capabilities = BCE_IF_CAPABILITIES;
748 ifp->if_capenable = ifp->if_capabilities;
749 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
750 ifq_set_ready(&ifp->if_snd);
752 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
753 ifp->if_baudrate = IF_Gbps(2.5);
755 ifp->if_baudrate = IF_Gbps(1);
757 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
758 sc->mbuf_alloc_size = MCLBYTES;
760 /* Look for our PHY. */
761 rc = mii_phy_probe(dev, &sc->bce_miibus,
762 bce_ifmedia_upd, bce_ifmedia_sts);
764 device_printf(dev, "PHY probe failed!\n");
768 /* Attach to the Ethernet interface list. */
769 ether_ifattach(ifp, sc->eaddr, NULL);
771 callout_init(&sc->bce_stat_ch);
773 /* Hookup IRQ last. */
774 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc,
775 &sc->bce_intrhand, ifp->if_serializer);
777 device_printf(dev, "Failed to setup IRQ!\n");
782 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq));
783 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
785 /* Print some important debugging info. */
786 DBRUN(BCE_INFO, bce_dump_driver_state(sc));
788 /* Add the supported sysctls to the kernel. */
791 /* Get the firmware running so IPMI still works */
801 /****************************************************************************/
802 /* Device detach function. */
804 /* Stops the controller, resets the controller, and releases resources. */
807 /* 0 on success, positive value on failure. */
808 /****************************************************************************/
810 bce_detach(device_t dev)
812 struct bce_softc *sc = device_get_softc(dev);
814 if (device_is_attached(dev)) {
815 struct ifnet *ifp = &sc->arpcom.ac_if;
817 /* Stop and reset the controller. */
818 lwkt_serialize_enter(ifp->if_serializer);
820 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
821 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
822 lwkt_serialize_exit(ifp->if_serializer);
827 /* If we have a child device on the MII bus remove it too. */
829 device_delete_child(dev, sc->bce_miibus);
830 bus_generic_detach(dev);
832 if (sc->bce_res_irq != NULL) {
833 bus_release_resource(dev, SYS_RES_IRQ,
834 sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
839 if (sc->bce_flags & BCE_USING_MSI_FLAG)
840 pci_release_msi(dev);
843 if (sc->bce_res_mem != NULL) {
844 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
850 if (sc->bce_sysctl_tree != NULL)
851 sysctl_ctx_free(&sc->bce_sysctl_ctx);
857 /****************************************************************************/
858 /* Device shutdown function. */
860 /* Stops and resets the controller. */
864 /****************************************************************************/
866 bce_shutdown(device_t dev)
868 struct bce_softc *sc = device_get_softc(dev);
869 struct ifnet *ifp = &sc->arpcom.ac_if;
871 lwkt_serialize_enter(ifp->if_serializer);
873 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
874 lwkt_serialize_exit(ifp->if_serializer);
878 /****************************************************************************/
879 /* Indirect register read. */
881 /* Reads NetXtreme II registers using an index/data register pair in PCI */
882 /* configuration space. Using this mechanism avoids issues with posted */
883 /* reads but is much slower than memory-mapped I/O. */
886 /* The value of the register. */
887 /****************************************************************************/
889 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
891 device_t dev = sc->bce_dev;
893 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
897 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
898 DBPRINT(sc, BCE_EXCESSIVE,
899 "%s(); offset = 0x%08X, val = 0x%08X\n",
900 __func__, offset, val);
904 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
909 /****************************************************************************/
910 /* Indirect register write. */
912 /* Writes NetXtreme II registers using an index/data register pair in PCI */
913 /* configuration space. Using this mechanism avoids issues with posted */
914 /* writes but is muchh slower than memory-mapped I/O. */
918 /****************************************************************************/
920 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
922 device_t dev = sc->bce_dev;
924 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
925 __func__, offset, val);
927 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
928 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
932 /****************************************************************************/
933 /* Context memory write. */
935 /* The NetXtreme II controller uses context memory to track connection */
936 /* information for L2 and higher network protocols. */
940 /****************************************************************************/
942 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
945 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
946 "val = 0x%08X\n", __func__, cid_addr, offset, val);
949 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
950 REG_WR(sc, BCE_CTX_DATA, val);
954 /****************************************************************************/
955 /* PHY register read. */
957 /* Implements register reads on the MII bus. */
960 /* The value of the register. */
961 /****************************************************************************/
963 bce_miibus_read_reg(device_t dev, int phy, int reg)
965 struct bce_softc *sc = device_get_softc(dev);
969 /* Make sure we are accessing the correct PHY address. */
970 if (phy != sc->bce_phy_addr) {
971 DBPRINT(sc, BCE_VERBOSE,
972 "Invalid PHY address %d for PHY read!\n", phy);
976 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
977 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
978 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
980 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
981 REG_RD(sc, BCE_EMAC_MDIO_MODE);
986 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
987 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
988 BCE_EMAC_MDIO_COMM_START_BUSY;
989 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
991 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
994 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
995 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
998 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
999 val &= BCE_EMAC_MDIO_COMM_DATA;
1004 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1005 if_printf(&sc->arpcom.ac_if,
1006 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1010 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1013 DBPRINT(sc, BCE_EXCESSIVE,
1014 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1015 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
1017 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1018 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1019 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1021 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1022 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1026 return (val & 0xffff);
1030 /****************************************************************************/
1031 /* PHY register write. */
1033 /* Implements register writes on the MII bus. */
1036 /* The value of the register. */
1037 /****************************************************************************/
1039 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1041 struct bce_softc *sc = device_get_softc(dev);
1045 /* Make sure we are accessing the correct PHY address. */
1046 if (phy != sc->bce_phy_addr) {
1047 DBPRINT(sc, BCE_WARN,
1048 "Invalid PHY address %d for PHY write!\n", phy);
1052 DBPRINT(sc, BCE_EXCESSIVE,
1053 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1054 __func__, phy, (uint16_t)(reg & 0xffff),
1055 (uint16_t)(val & 0xffff));
1057 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1058 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1059 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1061 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1062 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1067 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1068 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1069 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1070 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1072 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1075 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1076 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1082 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1083 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1085 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1086 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1087 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1089 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1090 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1098 /****************************************************************************/
1099 /* MII bus status change. */
1101 /* Called by the MII bus driver when the PHY establishes link to set the */
1102 /* MAC interface registers. */
1106 /****************************************************************************/
1108 bce_miibus_statchg(device_t dev)
1110 struct bce_softc *sc = device_get_softc(dev);
1111 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1113 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1114 mii->mii_media_active);
1117 /* Decode the interface media flags. */
1118 if_printf(&sc->arpcom.ac_if, "Media: ( ");
1119 switch(IFM_TYPE(mii->mii_media_active)) {
1121 kprintf("Ethernet )");
1124 kprintf("Unknown )");
1128 kprintf(" Media Options: ( ");
1129 switch(IFM_SUBTYPE(mii->mii_media_active)) {
1131 kprintf("Autoselect )");
1134 kprintf("Manual )");
1140 kprintf("10Base-T )");
1143 kprintf("100Base-TX )");
1146 kprintf("1000Base-SX )");
1149 kprintf("1000Base-T )");
1156 kprintf(" Global Options: (");
1157 if (mii->mii_media_active & IFM_FDX)
1158 kprintf(" FullDuplex");
1159 if (mii->mii_media_active & IFM_HDX)
1160 kprintf(" HalfDuplex");
1161 if (mii->mii_media_active & IFM_LOOP)
1162 kprintf(" Loopback");
1163 if (mii->mii_media_active & IFM_FLAG0)
1165 if (mii->mii_media_active & IFM_FLAG1)
1167 if (mii->mii_media_active & IFM_FLAG2)
1172 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1175 * Set MII or GMII interface based on the speed negotiated
1178 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1179 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1180 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1181 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1183 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1184 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1188 * Set half or full duplex based on the duplicity negotiated
1191 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1192 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1193 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1195 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1196 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1201 /****************************************************************************/
1202 /* Acquire NVRAM lock. */
1204 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1205 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1206 /* for use by the driver. */
1209 /* 0 on success, positive value on failure. */
1210 /****************************************************************************/
1212 bce_acquire_nvram_lock(struct bce_softc *sc)
1217 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1219 /* Request access to the flash interface. */
1220 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1221 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1222 val = REG_RD(sc, BCE_NVM_SW_ARB);
1223 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1229 if (j >= NVRAM_TIMEOUT_COUNT) {
1230 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1237 /****************************************************************************/
1238 /* Release NVRAM lock. */
1240 /* When the caller is finished accessing NVRAM the lock must be released. */
1241 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1242 /* for use by the driver. */
1245 /* 0 on success, positive value on failure. */
1246 /****************************************************************************/
1248 bce_release_nvram_lock(struct bce_softc *sc)
1253 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1256 * Relinquish nvram interface.
1258 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1261 val = REG_RD(sc, BCE_NVM_SW_ARB);
1262 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1268 if (j >= NVRAM_TIMEOUT_COUNT) {
1269 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1276 #ifdef BCE_NVRAM_WRITE_SUPPORT
1277 /****************************************************************************/
1278 /* Enable NVRAM write access. */
1280 /* Before writing to NVRAM the caller must enable NVRAM writes. */
1283 /* 0 on success, positive value on failure. */
1284 /****************************************************************************/
1286 bce_enable_nvram_write(struct bce_softc *sc)
1290 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1292 val = REG_RD(sc, BCE_MISC_CFG);
1293 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1295 if (!sc->bce_flash_info->buffered) {
1298 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1299 REG_WR(sc, BCE_NVM_COMMAND,
1300 BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1302 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1305 val = REG_RD(sc, BCE_NVM_COMMAND);
1306 if (val & BCE_NVM_COMMAND_DONE)
1310 if (j >= NVRAM_TIMEOUT_COUNT) {
1311 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1319 /****************************************************************************/
1320 /* Disable NVRAM write access. */
1322 /* When the caller is finished writing to NVRAM write access must be */
1327 /****************************************************************************/
1329 bce_disable_nvram_write(struct bce_softc *sc)
1333 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1335 val = REG_RD(sc, BCE_MISC_CFG);
1336 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1338 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1341 /****************************************************************************/
1342 /* Enable NVRAM access. */
1344 /* Before accessing NVRAM for read or write operations the caller must */
1345 /* enabled NVRAM access. */
1349 /****************************************************************************/
1351 bce_enable_nvram_access(struct bce_softc *sc)
1355 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1357 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1358 /* Enable both bits, even on read. */
1359 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1360 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1364 /****************************************************************************/
1365 /* Disable NVRAM access. */
1367 /* When the caller is finished accessing NVRAM access must be disabled. */
1371 /****************************************************************************/
1373 bce_disable_nvram_access(struct bce_softc *sc)
1377 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1379 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1381 /* Disable both bits, even after read. */
1382 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1383 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1387 #ifdef BCE_NVRAM_WRITE_SUPPORT
1388 /****************************************************************************/
1389 /* Erase NVRAM page before writing. */
1391 /* Non-buffered flash parts require that a page be erased before it is */
1395 /* 0 on success, positive value on failure. */
1396 /****************************************************************************/
1398 bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1403 /* Buffered flash doesn't require an erase. */
1404 if (sc->bce_flash_info->buffered)
1407 DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1409 /* Build an erase command. */
1410 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1411 BCE_NVM_COMMAND_DOIT;
1414 * Clear the DONE bit separately, set the NVRAM adress to erase,
1415 * and issue the erase command.
1417 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1418 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1419 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1421 /* Wait for completion. */
1422 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1427 val = REG_RD(sc, BCE_NVM_COMMAND);
1428 if (val & BCE_NVM_COMMAND_DONE)
1432 if (j >= NVRAM_TIMEOUT_COUNT) {
1433 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1438 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1441 /****************************************************************************/
1442 /* Read a dword (32 bits) from NVRAM. */
1444 /* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1445 /* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1448 /* 0 on success and the 32 bit value read, positive value on failure. */
1449 /****************************************************************************/
1451 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1457 /* Build the command word. */
1458 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1460 /* Calculate the offset for buffered flash. */
1461 if (sc->bce_flash_info->buffered) {
1462 offset = ((offset / sc->bce_flash_info->page_size) <<
1463 sc->bce_flash_info->page_bits) +
1464 (offset % sc->bce_flash_info->page_size);
1468 * Clear the DONE bit separately, set the address to read,
1469 * and issue the read.
1471 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1472 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1473 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1475 /* Wait for completion. */
1476 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1481 val = REG_RD(sc, BCE_NVM_COMMAND);
1482 if (val & BCE_NVM_COMMAND_DONE) {
1483 val = REG_RD(sc, BCE_NVM_READ);
1486 memcpy(ret_val, &val, 4);
1491 /* Check for errors. */
1492 if (i >= NVRAM_TIMEOUT_COUNT) {
1493 if_printf(&sc->arpcom.ac_if,
1494 "Timeout error reading NVRAM at offset 0x%08X!\n",
1502 #ifdef BCE_NVRAM_WRITE_SUPPORT
1503 /****************************************************************************/
1504 /* Write a dword (32 bits) to NVRAM. */
1506 /* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1507 /* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1508 /* enabled NVRAM write access. */
1511 /* 0 on success, positive value on failure. */
1512 /****************************************************************************/
1514 bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1517 uint32_t cmd, val32;
1520 /* Build the command word. */
1521 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1523 /* Calculate the offset for buffered flash. */
1524 if (sc->bce_flash_info->buffered) {
1525 offset = ((offset / sc->bce_flash_info->page_size) <<
1526 sc->bce_flash_info->page_bits) +
1527 (offset % sc->bce_flash_info->page_size);
1531 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1532 * set the NVRAM address to write, and issue the write command
1534 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1535 memcpy(&val32, val, 4);
1536 val32 = htobe32(val32);
1537 REG_WR(sc, BCE_NVM_WRITE, val32);
1538 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1539 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1541 /* Wait for completion. */
1542 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1545 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1548 if (j >= NVRAM_TIMEOUT_COUNT) {
1549 if_printf(&sc->arpcom.ac_if,
1550 "Timeout error writing NVRAM at offset 0x%08X\n",
1556 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1559 /****************************************************************************/
1560 /* Initialize NVRAM access. */
1562 /* Identify the NVRAM device in use and prepare the NVRAM interface to */
1563 /* access that device. */
1566 /* 0 on success, positive value on failure. */
1567 /****************************************************************************/
1569 bce_init_nvram(struct bce_softc *sc)
1572 int j, entry_count, rc = 0;
1573 const struct flash_spec *flash;
1575 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1577 /* Determine the selected interface. */
1578 val = REG_RD(sc, BCE_NVM_CFG1);
1580 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1583 * Flash reconfiguration is required to support additional
1584 * NVRAM devices not directly supported in hardware.
1585 * Check if the flash interface was reconfigured
1589 if (val & 0x40000000) {
1590 /* Flash interface reconfigured by bootcode. */
1592 DBPRINT(sc, BCE_INFO_LOAD,
1593 "%s(): Flash WAS reconfigured.\n", __func__);
1595 for (j = 0, flash = flash_table; j < entry_count;
1597 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1598 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1599 sc->bce_flash_info = flash;
1604 /* Flash interface not yet reconfigured. */
1607 DBPRINT(sc, BCE_INFO_LOAD,
1608 "%s(): Flash was NOT reconfigured.\n", __func__);
1610 if (val & (1 << 23))
1611 mask = FLASH_BACKUP_STRAP_MASK;
1613 mask = FLASH_STRAP_MASK;
1615 /* Look for the matching NVRAM device configuration data. */
1616 for (j = 0, flash = flash_table; j < entry_count;
1618 /* Check if the device matches any of the known devices. */
1619 if ((val & mask) == (flash->strapping & mask)) {
1620 /* Found a device match. */
1621 sc->bce_flash_info = flash;
1623 /* Request access to the flash interface. */
1624 rc = bce_acquire_nvram_lock(sc);
1628 /* Reconfigure the flash interface. */
1629 bce_enable_nvram_access(sc);
1630 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1631 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1632 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1633 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1634 bce_disable_nvram_access(sc);
1635 bce_release_nvram_lock(sc);
1641 /* Check if a matching device was found. */
1642 if (j == entry_count) {
1643 sc->bce_flash_info = NULL;
1644 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1648 /* Write the flash config data to the shared memory interface. */
1649 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1650 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1652 sc->bce_flash_size = val;
1654 sc->bce_flash_size = sc->bce_flash_info->total_size;
1656 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1657 __func__, sc->bce_flash_info->total_size);
1659 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1665 /****************************************************************************/
1666 /* Read an arbitrary range of data from NVRAM. */
1668 /* Prepares the NVRAM interface for access and reads the requested data */
1669 /* into the supplied buffer. */
1672 /* 0 on success and the data read, positive value on failure. */
1673 /****************************************************************************/
1675 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1678 uint32_t cmd_flags, offset32, len32, extra;
1684 /* Request access to the flash interface. */
1685 rc = bce_acquire_nvram_lock(sc);
1689 /* Enable access to flash interface */
1690 bce_enable_nvram_access(sc);
1698 /* XXX should we release nvram lock if read_dword() fails? */
1704 pre_len = 4 - (offset & 3);
1706 if (pre_len >= len32) {
1708 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1710 cmd_flags = BCE_NVM_COMMAND_FIRST;
1713 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1717 memcpy(ret_buf, buf + (offset & 3), pre_len);
1725 extra = 4 - (len32 & 3);
1726 len32 = (len32 + 4) & ~3;
1733 cmd_flags = BCE_NVM_COMMAND_LAST;
1735 cmd_flags = BCE_NVM_COMMAND_FIRST |
1736 BCE_NVM_COMMAND_LAST;
1738 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1740 memcpy(ret_buf, buf, 4 - extra);
1741 } else if (len32 > 0) {
1744 /* Read the first word. */
1748 cmd_flags = BCE_NVM_COMMAND_FIRST;
1750 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1752 /* Advance to the next dword. */
1757 while (len32 > 4 && rc == 0) {
1758 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1760 /* Advance to the next dword. */
1769 cmd_flags = BCE_NVM_COMMAND_LAST;
1770 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1772 memcpy(ret_buf, buf, 4 - extra);
1775 /* Disable access to flash interface and release the lock. */
1776 bce_disable_nvram_access(sc);
1777 bce_release_nvram_lock(sc);
1783 #ifdef BCE_NVRAM_WRITE_SUPPORT
1784 /****************************************************************************/
1785 /* Write an arbitrary range of data from NVRAM. */
1787 /* Prepares the NVRAM interface for write access and writes the requested */
1788 /* data from the supplied buffer. The caller is responsible for */
1789 /* calculating any appropriate CRCs. */
1792 /* 0 on success, positive value on failure. */
1793 /****************************************************************************/
1795 bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1798 uint32_t written, offset32, len32;
1799 uint8_t *buf, start[4], end[4];
1801 int align_start, align_end;
1807 align_start = (offset32 & 3);
1811 len32 += align_start;
1812 rc = bce_nvram_read(sc, offset32, start, 4);
1818 if (len32 > 4 || !align_start) {
1819 align_end = 4 - (len32 & 3);
1821 rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1827 if (align_start || align_end) {
1828 buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1832 memcpy(buf, start, 4);
1834 memcpy(buf + len32 - 4, end, 4);
1835 memcpy(buf + align_start, data_buf, buf_size);
1839 while (written < len32 && rc == 0) {
1840 uint32_t page_start, page_end, data_start, data_end;
1841 uint32_t addr, cmd_flags;
1843 uint8_t flash_buffer[264];
1845 /* Find the page_start addr */
1846 page_start = offset32 + written;
1847 page_start -= (page_start % sc->bce_flash_info->page_size);
1848 /* Find the page_end addr */
1849 page_end = page_start + sc->bce_flash_info->page_size;
1850 /* Find the data_start addr */
1851 data_start = (written == 0) ? offset32 : page_start;
1852 /* Find the data_end addr */
1853 data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1856 /* Request access to the flash interface. */
1857 rc = bce_acquire_nvram_lock(sc);
1859 goto nvram_write_end;
1861 /* Enable access to flash interface */
1862 bce_enable_nvram_access(sc);
1864 cmd_flags = BCE_NVM_COMMAND_FIRST;
1865 if (sc->bce_flash_info->buffered == 0) {
1869 * Read the whole page into the buffer
1870 * (non-buffer flash only)
1872 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1873 if (j == (sc->bce_flash_info->page_size - 4))
1874 cmd_flags |= BCE_NVM_COMMAND_LAST;
1876 rc = bce_nvram_read_dword(sc, page_start + j,
1880 goto nvram_write_end;
1886 /* Enable writes to flash interface (unlock write-protect) */
1887 rc = bce_enable_nvram_write(sc);
1889 goto nvram_write_end;
1891 /* Erase the page */
1892 rc = bce_nvram_erase_page(sc, page_start);
1894 goto nvram_write_end;
1896 /* Re-enable the write again for the actual write */
1897 bce_enable_nvram_write(sc);
1899 /* Loop to write back the buffer data from page_start to
1902 if (sc->bce_flash_info->buffered == 0) {
1903 for (addr = page_start; addr < data_start;
1904 addr += 4, i += 4) {
1905 rc = bce_nvram_write_dword(sc, addr,
1909 goto nvram_write_end;
1915 /* Loop to write the new data from data_start to data_end */
1916 for (addr = data_start; addr < data_end; addr += 4, i++) {
1917 if (addr == page_end - 4 ||
1918 (sc->bce_flash_info->buffered &&
1919 addr == data_end - 4))
1920 cmd_flags |= BCE_NVM_COMMAND_LAST;
1922 rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1924 goto nvram_write_end;
1930 /* Loop to write back the buffer data from data_end
1932 if (sc->bce_flash_info->buffered == 0) {
1933 for (addr = data_end; addr < page_end;
1934 addr += 4, i += 4) {
1935 if (addr == page_end-4)
1936 cmd_flags = BCE_NVM_COMMAND_LAST;
1938 rc = bce_nvram_write_dword(sc, addr,
1939 &flash_buffer[i], cmd_flags);
1941 goto nvram_write_end;
1947 /* Disable writes to flash interface (lock write-protect) */
1948 bce_disable_nvram_write(sc);
1950 /* Disable access to flash interface */
1951 bce_disable_nvram_access(sc);
1952 bce_release_nvram_lock(sc);
1954 /* Increment written */
1955 written += data_end - data_start;
1959 if (align_start || align_end)
1960 kfree(buf, M_DEVBUF);
1963 #endif /* BCE_NVRAM_WRITE_SUPPORT */
1966 /****************************************************************************/
1967 /* Verifies that NVRAM is accessible and contains valid data. */
1969 /* Reads the configuration data from NVRAM and verifies that the CRC is */
1973 /* 0 on success, positive value on failure. */
1974 /****************************************************************************/
1976 bce_nvram_test(struct bce_softc *sc)
1978 uint32_t buf[BCE_NVRAM_SIZE / 4];
1979 uint32_t magic, csum;
1980 uint8_t *data = (uint8_t *)buf;
1984 * Check that the device NVRAM is valid by reading
1985 * the magic value at offset 0.
1987 rc = bce_nvram_read(sc, 0, data, 4);
1991 magic = be32toh(buf[0]);
1992 if (magic != BCE_NVRAM_MAGIC) {
1993 if_printf(&sc->arpcom.ac_if,
1994 "Invalid NVRAM magic value! Expected: 0x%08X, "
1995 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
2000 * Verify that the device NVRAM includes valid
2001 * configuration data.
2003 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
2007 csum = ether_crc32_le(data, 0x100);
2008 if (csum != BCE_CRC32_RESIDUAL) {
2009 if_printf(&sc->arpcom.ac_if,
2010 "Invalid Manufacturing Information NVRAM CRC! "
2011 "Expected: 0x%08X, Found: 0x%08X\n",
2012 BCE_CRC32_RESIDUAL, csum);
2016 csum = ether_crc32_le(data + 0x100, 0x100);
2017 if (csum != BCE_CRC32_RESIDUAL) {
2018 if_printf(&sc->arpcom.ac_if,
2019 "Invalid Feature Configuration Information "
2020 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2021 BCE_CRC32_RESIDUAL, csum);
2028 /****************************************************************************/
2029 /* Free any DMA memory owned by the driver. */
2031 /* Scans through each data structre that requires DMA memory and frees */
2032 /* the memory if allocated. */
2036 /****************************************************************************/
2038 bce_dma_free(struct bce_softc *sc)
2042 /* Destroy the status block. */
2043 if (sc->status_tag != NULL) {
2044 if (sc->status_block != NULL) {
2045 bus_dmamap_unload(sc->status_tag, sc->status_map);
2046 bus_dmamem_free(sc->status_tag, sc->status_block,
2049 bus_dma_tag_destroy(sc->status_tag);
2053 /* Destroy the statistics block. */
2054 if (sc->stats_tag != NULL) {
2055 if (sc->stats_block != NULL) {
2056 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2057 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2060 bus_dma_tag_destroy(sc->stats_tag);
2063 /* Destroy the TX buffer descriptor DMA stuffs. */
2064 if (sc->tx_bd_chain_tag != NULL) {
2065 for (i = 0; i < TX_PAGES; i++) {
2066 if (sc->tx_bd_chain[i] != NULL) {
2067 bus_dmamap_unload(sc->tx_bd_chain_tag,
2068 sc->tx_bd_chain_map[i]);
2069 bus_dmamem_free(sc->tx_bd_chain_tag,
2071 sc->tx_bd_chain_map[i]);
2074 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2077 /* Destroy the RX buffer descriptor DMA stuffs. */
2078 if (sc->rx_bd_chain_tag != NULL) {
2079 for (i = 0; i < RX_PAGES; i++) {
2080 if (sc->rx_bd_chain[i] != NULL) {
2081 bus_dmamap_unload(sc->rx_bd_chain_tag,
2082 sc->rx_bd_chain_map[i]);
2083 bus_dmamem_free(sc->rx_bd_chain_tag,
2085 sc->rx_bd_chain_map[i]);
2088 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2091 /* Destroy the TX mbuf DMA stuffs. */
2092 if (sc->tx_mbuf_tag != NULL) {
2093 for (i = 0; i < TOTAL_TX_BD; i++) {
2094 /* Must have been unloaded in bce_stop() */
2095 KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2096 bus_dmamap_destroy(sc->tx_mbuf_tag,
2097 sc->tx_mbuf_map[i]);
2099 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2102 /* Destroy the RX mbuf DMA stuffs. */
2103 if (sc->rx_mbuf_tag != NULL) {
2104 for (i = 0; i < TOTAL_RX_BD; i++) {
2105 /* Must have been unloaded in bce_stop() */
2106 KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2107 bus_dmamap_destroy(sc->rx_mbuf_tag,
2108 sc->rx_mbuf_map[i]);
2110 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap);
2111 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2114 /* Destroy the parent tag */
2115 if (sc->parent_tag != NULL)
2116 bus_dma_tag_destroy(sc->parent_tag);
2120 /****************************************************************************/
2121 /* Get DMA memory from the OS. */
2123 /* Validates that the OS has provided DMA buffers in response to a */
2124 /* bus_dmamap_load() call and saves the physical address of those buffers. */
2125 /* When the callback is used the OS will return 0 for the mapping function */
2126 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2127 /* failures back to the caller. */
2131 /****************************************************************************/
2133 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2135 bus_addr_t *busaddr = arg;
2138 * Simulate a mapping failure.
2141 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2142 kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2143 __FILE__, __LINE__);
2146 /* Check for an error and signal the caller that an error occurred. */
2150 KASSERT(nseg == 1, ("only one segment is allowed\n"));
2151 *busaddr = segs->ds_addr;
2155 /****************************************************************************/
2156 /* Allocate any DMA memory needed by the driver. */
2158 /* Allocates DMA memory needed for the various global structures needed by */
2162 /* 0 for success, positive value for failure. */
2163 /****************************************************************************/
2165 bce_dma_alloc(struct bce_softc *sc)
2167 struct ifnet *ifp = &sc->arpcom.ac_if;
2172 * Allocate the parent bus DMA tag appropriate for PCI.
2174 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2175 sc->max_bus_addr, BUS_SPACE_MAXADDR,
2177 BUS_SPACE_MAXSIZE_32BIT, 0,
2178 BUS_SPACE_MAXSIZE_32BIT,
2179 0, &sc->parent_tag);
2181 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2186 * Allocate status block.
2188 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2189 BCE_DMA_ALIGN, BCE_STATUS_BLK_SZ,
2190 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2191 &sc->status_tag, &sc->status_map,
2192 &sc->status_block_paddr);
2193 if (sc->status_block == NULL) {
2194 if_printf(ifp, "Could not allocate status block!\n");
2199 * Allocate statistics block.
2201 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2202 BCE_DMA_ALIGN, BCE_STATS_BLK_SZ,
2203 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2204 &sc->stats_tag, &sc->stats_map,
2205 &sc->stats_block_paddr);
2206 if (sc->stats_block == NULL) {
2207 if_printf(ifp, "Could not allocate statistics block!\n");
2212 * Create a DMA tag for the TX buffer descriptor chain,
2213 * allocate and clear the memory, and fetch the
2214 * physical address of the block.
2216 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2217 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2219 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2220 0, &sc->tx_bd_chain_tag);
2222 if_printf(ifp, "Could not allocate "
2223 "TX descriptor chain DMA tag!\n");
2227 for (i = 0; i < TX_PAGES; i++) {
2228 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2229 (void **)&sc->tx_bd_chain[i],
2230 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2232 &sc->tx_bd_chain_map[i]);
2234 if_printf(ifp, "Could not allocate %dth TX descriptor "
2235 "chain DMA memory!\n", i);
2239 rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2240 sc->tx_bd_chain_map[i],
2241 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2242 bce_dma_map_addr, &busaddr,
2245 if (rc == EINPROGRESS) {
2246 panic("%s coherent memory loading "
2247 "is still in progress!", ifp->if_xname);
2249 if_printf(ifp, "Could not map %dth TX descriptor "
2250 "chain DMA memory!\n", i);
2251 bus_dmamem_free(sc->tx_bd_chain_tag,
2253 sc->tx_bd_chain_map[i]);
2254 sc->tx_bd_chain[i] = NULL;
2258 sc->tx_bd_chain_paddr[i] = busaddr;
2259 /* DRC - Fix for 64 bit systems. */
2260 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2261 i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2264 /* Create a DMA tag for TX mbufs. */
2265 rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2266 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2268 /* BCE_MAX_JUMBO_ETHER_MTU_VLAN */MCLBYTES,
2269 BCE_MAX_SEGMENTS, MCLBYTES,
2270 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
2274 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2278 /* Create DMA maps for the TX mbufs clusters. */
2279 for (i = 0; i < TOTAL_TX_BD; i++) {
2280 rc = bus_dmamap_create(sc->tx_mbuf_tag,
2281 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2282 &sc->tx_mbuf_map[i]);
2284 for (j = 0; j < i; ++j) {
2285 bus_dmamap_destroy(sc->tx_mbuf_tag,
2286 sc->tx_mbuf_map[i]);
2288 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2289 sc->tx_mbuf_tag = NULL;
2291 if_printf(ifp, "Unable to create "
2292 "%dth TX mbuf DMA map!\n", i);
2298 * Create a DMA tag for the RX buffer descriptor chain,
2299 * allocate and clear the memory, and fetch the physical
2300 * address of the blocks.
2302 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2303 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2305 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2306 0, &sc->rx_bd_chain_tag);
2308 if_printf(ifp, "Could not allocate "
2309 "RX descriptor chain DMA tag!\n");
2313 for (i = 0; i < RX_PAGES; i++) {
2314 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2315 (void **)&sc->rx_bd_chain[i],
2316 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2318 &sc->rx_bd_chain_map[i]);
2320 if_printf(ifp, "Could not allocate %dth RX descriptor "
2321 "chain DMA memory!\n", i);
2325 rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2326 sc->rx_bd_chain_map[i],
2327 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2328 bce_dma_map_addr, &busaddr,
2331 if (rc == EINPROGRESS) {
2332 panic("%s coherent memory loading "
2333 "is still in progress!", ifp->if_xname);
2335 if_printf(ifp, "Could not map %dth RX descriptor "
2336 "chain DMA memory!\n", i);
2337 bus_dmamem_free(sc->rx_bd_chain_tag,
2339 sc->rx_bd_chain_map[i]);
2340 sc->rx_bd_chain[i] = NULL;
2344 sc->rx_bd_chain_paddr[i] = busaddr;
2345 /* DRC - Fix for 64 bit systems. */
2346 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2347 i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2350 /* Create a DMA tag for RX mbufs. */
2351 rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2352 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2354 MCLBYTES, 1, MCLBYTES,
2355 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
2358 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2362 /* Create tmp DMA map for RX mbuf clusters. */
2363 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2364 &sc->rx_mbuf_tmpmap);
2366 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2367 sc->rx_mbuf_tag = NULL;
2369 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n");
2373 /* Create DMA maps for the RX mbuf clusters. */
2374 for (i = 0; i < TOTAL_RX_BD; i++) {
2375 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2376 &sc->rx_mbuf_map[i]);
2378 for (j = 0; j < i; ++j) {
2379 bus_dmamap_destroy(sc->rx_mbuf_tag,
2380 sc->rx_mbuf_map[j]);
2382 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2383 sc->rx_mbuf_tag = NULL;
2385 if_printf(ifp, "Unable to create "
2386 "%dth RX mbuf DMA map!\n", i);
2394 /****************************************************************************/
2395 /* Firmware synchronization. */
2397 /* Before performing certain events such as a chip reset, synchronize with */
2398 /* the firmware first. */
2401 /* 0 for success, positive value for failure. */
2402 /****************************************************************************/
2404 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2409 /* Don't waste any time if we've timed out before. */
2410 if (sc->bce_fw_timed_out)
2413 /* Increment the message sequence number. */
2414 sc->bce_fw_wr_seq++;
2415 msg_data |= sc->bce_fw_wr_seq;
2417 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2419 /* Send the message to the bootcode driver mailbox. */
2420 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2422 /* Wait for the bootcode to acknowledge the message. */
2423 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2424 /* Check for a response in the bootcode firmware mailbox. */
2425 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2426 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2431 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2432 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2433 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2434 if_printf(&sc->arpcom.ac_if,
2435 "Firmware synchronization timeout! "
2436 "msg_data = 0x%08X\n", msg_data);
2438 msg_data &= ~BCE_DRV_MSG_CODE;
2439 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2441 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2443 sc->bce_fw_timed_out = 1;
2450 /****************************************************************************/
2451 /* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2455 /****************************************************************************/
2457 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2458 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2463 for (i = 0; i < rv2p_code_len; i += 8) {
2464 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2466 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2469 if (rv2p_proc == RV2P_PROC1) {
2470 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2471 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2473 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2474 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2478 /* Reset the processor, un-stall is done later. */
2479 if (rv2p_proc == RV2P_PROC1)
2480 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2482 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2486 /****************************************************************************/
2487 /* Load RISC processor firmware. */
2489 /* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2490 /* associated with a particular processor. */
2494 /****************************************************************************/
2496 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2499 uint32_t offset, val;
2503 val = REG_RD_IND(sc, cpu_reg->mode);
2504 val |= cpu_reg->mode_value_halt;
2505 REG_WR_IND(sc, cpu_reg->mode, val);
2506 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2508 /* Load the Text area. */
2509 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2511 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2512 REG_WR_IND(sc, offset, fw->text[j]);
2515 /* Load the Data area. */
2516 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2518 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2519 REG_WR_IND(sc, offset, fw->data[j]);
2522 /* Load the SBSS area. */
2523 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2525 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2526 REG_WR_IND(sc, offset, fw->sbss[j]);
2529 /* Load the BSS area. */
2530 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2532 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2533 REG_WR_IND(sc, offset, fw->bss[j]);
2536 /* Load the Read-Only area. */
2537 offset = cpu_reg->spad_base +
2538 (fw->rodata_addr - cpu_reg->mips_view_base);
2540 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2541 REG_WR_IND(sc, offset, fw->rodata[j]);
2544 /* Clear the pre-fetch instruction. */
2545 REG_WR_IND(sc, cpu_reg->inst, 0);
2546 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2548 /* Start the CPU. */
2549 val = REG_RD_IND(sc, cpu_reg->mode);
2550 val &= ~cpu_reg->mode_value_halt;
2551 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2552 REG_WR_IND(sc, cpu_reg->mode, val);
2556 /****************************************************************************/
2557 /* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2559 /* Loads the firmware for each CPU and starts the CPU. */
2563 /****************************************************************************/
2565 bce_init_cpus(struct bce_softc *sc)
2567 struct cpu_reg cpu_reg;
2570 /* Initialize the RV2P processor. */
2571 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2572 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2574 /* Initialize the RX Processor. */
2575 cpu_reg.mode = BCE_RXP_CPU_MODE;
2576 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2577 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2578 cpu_reg.state = BCE_RXP_CPU_STATE;
2579 cpu_reg.state_value_clear = 0xffffff;
2580 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2581 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2582 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2583 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2584 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2585 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2586 cpu_reg.mips_view_base = 0x8000000;
2588 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2589 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2590 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2591 fw.start_addr = bce_RXP_b06FwStartAddr;
2593 fw.text_addr = bce_RXP_b06FwTextAddr;
2594 fw.text_len = bce_RXP_b06FwTextLen;
2596 fw.text = bce_RXP_b06FwText;
2598 fw.data_addr = bce_RXP_b06FwDataAddr;
2599 fw.data_len = bce_RXP_b06FwDataLen;
2601 fw.data = bce_RXP_b06FwData;
2603 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2604 fw.sbss_len = bce_RXP_b06FwSbssLen;
2606 fw.sbss = bce_RXP_b06FwSbss;
2608 fw.bss_addr = bce_RXP_b06FwBssAddr;
2609 fw.bss_len = bce_RXP_b06FwBssLen;
2611 fw.bss = bce_RXP_b06FwBss;
2613 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2614 fw.rodata_len = bce_RXP_b06FwRodataLen;
2615 fw.rodata_index = 0;
2616 fw.rodata = bce_RXP_b06FwRodata;
2618 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2619 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2621 /* Initialize the TX Processor. */
2622 cpu_reg.mode = BCE_TXP_CPU_MODE;
2623 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2624 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2625 cpu_reg.state = BCE_TXP_CPU_STATE;
2626 cpu_reg.state_value_clear = 0xffffff;
2627 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2628 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2629 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2630 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2631 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2632 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2633 cpu_reg.mips_view_base = 0x8000000;
2635 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2636 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2637 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2638 fw.start_addr = bce_TXP_b06FwStartAddr;
2640 fw.text_addr = bce_TXP_b06FwTextAddr;
2641 fw.text_len = bce_TXP_b06FwTextLen;
2643 fw.text = bce_TXP_b06FwText;
2645 fw.data_addr = bce_TXP_b06FwDataAddr;
2646 fw.data_len = bce_TXP_b06FwDataLen;
2648 fw.data = bce_TXP_b06FwData;
2650 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2651 fw.sbss_len = bce_TXP_b06FwSbssLen;
2653 fw.sbss = bce_TXP_b06FwSbss;
2655 fw.bss_addr = bce_TXP_b06FwBssAddr;
2656 fw.bss_len = bce_TXP_b06FwBssLen;
2658 fw.bss = bce_TXP_b06FwBss;
2660 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2661 fw.rodata_len = bce_TXP_b06FwRodataLen;
2662 fw.rodata_index = 0;
2663 fw.rodata = bce_TXP_b06FwRodata;
2665 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2666 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2668 /* Initialize the TX Patch-up Processor. */
2669 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2670 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2671 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2672 cpu_reg.state = BCE_TPAT_CPU_STATE;
2673 cpu_reg.state_value_clear = 0xffffff;
2674 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2675 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2676 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2677 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2678 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2679 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2680 cpu_reg.mips_view_base = 0x8000000;
2682 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2683 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2684 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2685 fw.start_addr = bce_TPAT_b06FwStartAddr;
2687 fw.text_addr = bce_TPAT_b06FwTextAddr;
2688 fw.text_len = bce_TPAT_b06FwTextLen;
2690 fw.text = bce_TPAT_b06FwText;
2692 fw.data_addr = bce_TPAT_b06FwDataAddr;
2693 fw.data_len = bce_TPAT_b06FwDataLen;
2695 fw.data = bce_TPAT_b06FwData;
2697 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2698 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2700 fw.sbss = bce_TPAT_b06FwSbss;
2702 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2703 fw.bss_len = bce_TPAT_b06FwBssLen;
2705 fw.bss = bce_TPAT_b06FwBss;
2707 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2708 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2709 fw.rodata_index = 0;
2710 fw.rodata = bce_TPAT_b06FwRodata;
2712 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2713 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2715 /* Initialize the Completion Processor. */
2716 cpu_reg.mode = BCE_COM_CPU_MODE;
2717 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2718 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2719 cpu_reg.state = BCE_COM_CPU_STATE;
2720 cpu_reg.state_value_clear = 0xffffff;
2721 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2722 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2723 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2724 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2725 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2726 cpu_reg.spad_base = BCE_COM_SCRATCH;
2727 cpu_reg.mips_view_base = 0x8000000;
2729 fw.ver_major = bce_COM_b06FwReleaseMajor;
2730 fw.ver_minor = bce_COM_b06FwReleaseMinor;
2731 fw.ver_fix = bce_COM_b06FwReleaseFix;
2732 fw.start_addr = bce_COM_b06FwStartAddr;
2734 fw.text_addr = bce_COM_b06FwTextAddr;
2735 fw.text_len = bce_COM_b06FwTextLen;
2737 fw.text = bce_COM_b06FwText;
2739 fw.data_addr = bce_COM_b06FwDataAddr;
2740 fw.data_len = bce_COM_b06FwDataLen;
2742 fw.data = bce_COM_b06FwData;
2744 fw.sbss_addr = bce_COM_b06FwSbssAddr;
2745 fw.sbss_len = bce_COM_b06FwSbssLen;
2747 fw.sbss = bce_COM_b06FwSbss;
2749 fw.bss_addr = bce_COM_b06FwBssAddr;
2750 fw.bss_len = bce_COM_b06FwBssLen;
2752 fw.bss = bce_COM_b06FwBss;
2754 fw.rodata_addr = bce_COM_b06FwRodataAddr;
2755 fw.rodata_len = bce_COM_b06FwRodataLen;
2756 fw.rodata_index = 0;
2757 fw.rodata = bce_COM_b06FwRodata;
2759 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2760 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2764 /****************************************************************************/
2765 /* Initialize context memory. */
2767 /* Clears the memory associated with each Context ID (CID). */
2771 /****************************************************************************/
2773 bce_init_ctx(struct bce_softc *sc)
2778 uint32_t vcid_addr, pcid_addr, offset;
2783 vcid_addr = GET_CID_ADDR(vcid);
2784 pcid_addr = vcid_addr;
2786 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2787 vcid_addr += (i << PHY_CTX_SHIFT);
2788 pcid_addr += (i << PHY_CTX_SHIFT);
2790 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2791 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
2793 /* Zero out the context. */
2794 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2795 CTX_WR(sc, vcid_addr, offset, 0);
2801 /****************************************************************************/
2802 /* Fetch the permanent MAC address of the controller. */
2806 /****************************************************************************/
2808 bce_get_mac_addr(struct bce_softc *sc)
2810 uint32_t mac_lo = 0, mac_hi = 0;
2813 * The NetXtreme II bootcode populates various NIC
2814 * power-on and runtime configuration items in a
2815 * shared memory area. The factory configured MAC
2816 * address is available from both NVRAM and the
2817 * shared memory area so we'll read the value from
2818 * shared memory for speed.
2821 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2822 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2824 if (mac_lo == 0 && mac_hi == 0) {
2825 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2827 sc->eaddr[0] = (u_char)(mac_hi >> 8);
2828 sc->eaddr[1] = (u_char)(mac_hi >> 0);
2829 sc->eaddr[2] = (u_char)(mac_lo >> 24);
2830 sc->eaddr[3] = (u_char)(mac_lo >> 16);
2831 sc->eaddr[4] = (u_char)(mac_lo >> 8);
2832 sc->eaddr[5] = (u_char)(mac_lo >> 0);
2835 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2839 /****************************************************************************/
2840 /* Program the MAC address. */
2844 /****************************************************************************/
2846 bce_set_mac_addr(struct bce_softc *sc)
2848 const uint8_t *mac_addr = sc->eaddr;
2851 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2854 val = (mac_addr[0] << 8) | mac_addr[1];
2855 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2857 val = (mac_addr[2] << 24) |
2858 (mac_addr[3] << 16) |
2859 (mac_addr[4] << 8) |
2861 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2865 /****************************************************************************/
2866 /* Stop the controller. */
2870 /****************************************************************************/
2872 bce_stop(struct bce_softc *sc)
2874 struct ifnet *ifp = &sc->arpcom.ac_if;
2875 struct mii_data *mii = device_get_softc(sc->bce_miibus);
2876 struct ifmedia_entry *ifm;
2879 ASSERT_SERIALIZED(ifp->if_serializer);
2881 callout_stop(&sc->bce_stat_ch);
2883 /* Disable the transmit/receive blocks. */
2884 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2885 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2888 bce_disable_intr(sc);
2890 /* Tell firmware that the driver is going away. */
2891 bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2893 /* Free the RX lists. */
2894 bce_free_rx_chain(sc);
2896 /* Free TX buffers. */
2897 bce_free_tx_chain(sc);
2900 * Isolate/power down the PHY, but leave the media selection
2901 * unchanged so that things will be put back to normal when
2902 * we bring the interface back up.
2904 * 'mii' may be NULL if bce_stop() is called by bce_detach().
2907 itmp = ifp->if_flags;
2908 ifp->if_flags |= IFF_UP;
2909 ifm = mii->mii_media.ifm_cur;
2910 mtmp = ifm->ifm_media;
2911 ifm->ifm_media = IFM_ETHER | IFM_NONE;
2913 ifm->ifm_media = mtmp;
2914 ifp->if_flags = itmp;
2918 sc->bce_coalchg_mask = 0;
2920 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2928 bce_reset(struct bce_softc *sc, uint32_t reset_code)
2933 /* Wait for pending PCI transactions to complete. */
2934 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2935 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2936 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2937 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2938 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2939 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2942 /* Assume bootcode is running. */
2943 sc->bce_fw_timed_out = 0;
2945 /* Give the firmware a chance to prepare for the reset. */
2946 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2948 if_printf(&sc->arpcom.ac_if,
2949 "Firmware is not ready for reset\n");
2953 /* Set a firmware reminder that this is a soft reset. */
2954 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
2955 BCE_DRV_RESET_SIGNATURE_MAGIC);
2957 /* Dummy read to force the chip to complete all current transactions. */
2958 val = REG_RD(sc, BCE_MISC_ID);
2961 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2962 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2963 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2964 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
2966 /* Allow up to 30us for reset to complete. */
2967 for (i = 0; i < 10; i++) {
2968 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
2969 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2970 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2976 /* Check that reset completed successfully. */
2977 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2978 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2979 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
2983 /* Make sure byte swapping is properly configured. */
2984 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
2985 if (val != 0x01020304) {
2986 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
2990 /* Just completed a reset, assume that firmware is running again. */
2991 sc->bce_fw_timed_out = 0;
2993 /* Wait for the firmware to finish its initialization. */
2994 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
2996 if_printf(&sc->arpcom.ac_if,
2997 "Firmware did not complete initialization!\n");
3004 bce_chipinit(struct bce_softc *sc)
3009 /* Make sure the interrupt is not active. */
3010 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3013 * Initialize DMA byte/word swapping, configure the number of DMA
3014 * channels and PCI clock compensation delay.
3016 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3017 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3018 #if BYTE_ORDER == BIG_ENDIAN
3019 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3021 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3022 DMA_READ_CHANS << 12 |
3023 DMA_WRITE_CHANS << 16;
3025 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3027 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3028 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3031 * This setting resolves a problem observed on certain Intel PCI
3032 * chipsets that cannot handle multiple outstanding DMA operations.
3033 * See errata E9_5706A1_65.
3035 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3036 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3037 !(sc->bce_flags & BCE_PCIX_FLAG))
3038 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3040 REG_WR(sc, BCE_DMA_CONFIG, val);
3042 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3043 if (sc->bce_flags & BCE_PCIX_FLAG) {
3046 cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3047 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3050 /* Enable the RX_V2P and Context state machines before access. */
3051 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3052 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3053 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3054 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3056 /* Initialize context mapping and zero out the quick contexts. */
3059 /* Initialize the on-boards CPUs */
3062 /* Prepare NVRAM for access. */
3063 rc = bce_init_nvram(sc);
3067 /* Set the kernel bypass block size */
3068 val = REG_RD(sc, BCE_MQ_CONFIG);
3069 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3070 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3071 REG_WR(sc, BCE_MQ_CONFIG, val);
3073 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3074 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3075 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3077 /* Set the page size and clear the RV2P processor stall bits. */
3078 val = (BCM_PAGE_BITS - 8) << 24;
3079 REG_WR(sc, BCE_RV2P_CONFIG, val);
3081 /* Configure page size. */
3082 val = REG_RD(sc, BCE_TBDR_CONFIG);
3083 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3084 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3085 REG_WR(sc, BCE_TBDR_CONFIG, val);
3091 /****************************************************************************/
3092 /* Initialize the controller in preparation to send/receive traffic. */
3095 /* 0 for success, positive value for failure. */
3096 /****************************************************************************/
3098 bce_blockinit(struct bce_softc *sc)
3103 /* Load the hardware default MAC address. */
3104 bce_set_mac_addr(sc);
3106 /* Set the Ethernet backoff seed value */
3107 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3108 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3109 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3111 sc->last_status_idx = 0;
3112 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3114 /* Set up link change interrupt generation. */
3115 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3117 /* Program the physical address of the status block. */
3118 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3119 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3121 /* Program the physical address of the statistics block. */
3122 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3123 BCE_ADDR_LO(sc->stats_block_paddr));
3124 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3125 BCE_ADDR_HI(sc->stats_block_paddr));
3127 /* Program various host coalescing parameters. */
3128 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3129 (sc->bce_tx_quick_cons_trip_int << 16) |
3130 sc->bce_tx_quick_cons_trip);
3131 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3132 (sc->bce_rx_quick_cons_trip_int << 16) |
3133 sc->bce_rx_quick_cons_trip);
3134 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3135 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3136 REG_WR(sc, BCE_HC_TX_TICKS,
3137 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3138 REG_WR(sc, BCE_HC_RX_TICKS,
3139 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3140 REG_WR(sc, BCE_HC_COM_TICKS,
3141 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3142 REG_WR(sc, BCE_HC_CMD_TICKS,
3143 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3144 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3145 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3146 REG_WR(sc, BCE_HC_CONFIG,
3147 BCE_HC_CONFIG_TX_TMR_MODE |
3148 BCE_HC_CONFIG_COLLECT_STATS);
3150 /* Clear the internal statistics counters. */
3151 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3153 /* Verify that bootcode is running. */
3154 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3156 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3157 if_printf(&sc->arpcom.ac_if,
3158 "%s(%d): Simulating bootcode failure.\n",
3159 __FILE__, __LINE__);
3162 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3163 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3164 if_printf(&sc->arpcom.ac_if,
3165 "Bootcode not running! Found: 0x%08X, "
3166 "Expected: 08%08X\n",
3167 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3168 BCE_DEV_INFO_SIGNATURE_MAGIC);
3172 /* Check if any management firmware is running. */
3173 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3174 if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3175 BCE_PORT_FEATURE_IMD_ENABLED)) {
3176 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3177 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3181 REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3182 DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3184 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3185 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3187 /* Enable link state change interrupt generation. */
3188 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3190 /* Enable all remaining blocks in the MAC. */
3191 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3192 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3199 /****************************************************************************/
3200 /* Encapsulate an mbuf cluster into the rx_bd chain. */
3202 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3203 /* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3207 /* 0 for success, positive value for failure. */
3208 /****************************************************************************/
3210 bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod,
3211 uint32_t *prod_bseq, int init)
3214 bus_dma_segment_t seg;
3218 uint16_t debug_chain_prod = *chain_prod;
3221 /* Make sure the inputs are valid. */
3222 DBRUNIF((*chain_prod > MAX_RX_BD),
3223 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3224 "RX producer out of range: 0x%04X > 0x%04X\n",
3226 *chain_prod, (uint16_t)MAX_RX_BD));
3228 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3229 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3231 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3232 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3233 "Simulating mbuf allocation failure.\n",
3234 __FILE__, __LINE__);
3235 sc->mbuf_alloc_failed++;
3238 /* This is a new mbuf allocation. */
3239 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3242 DBRUNIF(1, sc->rx_mbuf_alloc++);
3244 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3246 /* Map the mbuf cluster into device memory. */
3247 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag,
3248 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg,
3253 if_printf(&sc->arpcom.ac_if,
3254 "Error mapping mbuf into RX chain!\n");
3256 DBRUNIF(1, sc->rx_mbuf_alloc--);
3260 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) {
3261 bus_dmamap_unload(sc->rx_mbuf_tag,
3262 sc->rx_mbuf_map[*chain_prod]);
3265 map = sc->rx_mbuf_map[*chain_prod];
3266 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap;
3267 sc->rx_mbuf_tmpmap = map;
3269 /* Watch for overflow. */
3270 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3271 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3272 "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3273 __FILE__, __LINE__, sc->free_rx_bd,
3274 (uint16_t)USABLE_RX_BD));
3276 /* Update some debug statistic counters */
3277 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3278 sc->rx_low_watermark = sc->free_rx_bd);
3279 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3281 /* Save the mbuf and update our counter. */
3282 sc->rx_mbuf_ptr[*chain_prod] = m_new;
3283 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3286 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq);
3288 DBRUN(BCE_VERBOSE_RECV,
3289 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3291 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3292 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3299 bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq)
3305 paddr = sc->rx_mbuf_paddr[chain_prod];
3306 len = sc->rx_mbuf_ptr[chain_prod]->m_len;
3308 /* Setup the rx_bd for the first segment. */
3309 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3311 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3312 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3313 rxbd->rx_bd_len = htole32(len);
3314 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3317 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3321 /****************************************************************************/
3322 /* Allocate memory and initialize the TX data structures. */
3325 /* 0 for success, positive value for failure. */
3326 /****************************************************************************/
3328 bce_init_tx_chain(struct bce_softc *sc)
3334 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3336 /* Set the initial TX producer/consumer indices. */
3339 sc->tx_prod_bseq = 0;
3341 sc->max_tx_bd = USABLE_TX_BD;
3342 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3343 DBRUNIF(1, sc->tx_full_count = 0);
3346 * The NetXtreme II supports a linked-list structre called
3347 * a Buffer Descriptor Chain (or BD chain). A BD chain
3348 * consists of a series of 1 or more chain pages, each of which
3349 * consists of a fixed number of BD entries.
3350 * The last BD entry on each page is a pointer to the next page
3351 * in the chain, and the last pointer in the BD chain
3352 * points back to the beginning of the chain.
3355 /* Set the TX next pointer chain entries. */
3356 for (i = 0; i < TX_PAGES; i++) {
3359 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3361 /* Check if we've reached the last page. */
3362 if (i == (TX_PAGES - 1))
3367 txbd->tx_bd_haddr_hi =
3368 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3369 txbd->tx_bd_haddr_lo =
3370 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3373 /* Initialize the context ID for an L2 TX chain. */
3374 val = BCE_L2CTX_TYPE_TYPE_L2;
3375 val |= BCE_L2CTX_TYPE_SIZE_L2;
3376 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3378 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3379 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3381 /* Point the hardware to the first page in the chain. */
3382 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3383 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3384 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3385 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3387 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3389 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3395 /****************************************************************************/
3396 /* Free memory and clear the TX data structures. */
3400 /****************************************************************************/
3402 bce_free_tx_chain(struct bce_softc *sc)
3406 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3408 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3409 for (i = 0; i < TOTAL_TX_BD; i++) {
3410 if (sc->tx_mbuf_ptr[i] != NULL) {
3411 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3412 m_freem(sc->tx_mbuf_ptr[i]);
3413 sc->tx_mbuf_ptr[i] = NULL;
3414 DBRUNIF(1, sc->tx_mbuf_alloc--);
3418 /* Clear each TX chain page. */
3419 for (i = 0; i < TX_PAGES; i++)
3420 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3423 /* Check if we lost any mbufs in the process. */
3424 DBRUNIF((sc->tx_mbuf_alloc),
3425 if_printf(&sc->arpcom.ac_if,
3426 "%s(%d): Memory leak! "
3427 "Lost %d mbufs from tx chain!\n",
3428 __FILE__, __LINE__, sc->tx_mbuf_alloc));
3430 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3434 /****************************************************************************/
3435 /* Allocate memory and initialize the RX data structures. */
3438 /* 0 for success, positive value for failure. */
3439 /****************************************************************************/
3441 bce_init_rx_chain(struct bce_softc *sc)
3445 uint16_t prod, chain_prod;
3446 uint32_t prod_bseq, val;
3448 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3450 /* Initialize the RX producer and consumer indices. */
3453 sc->rx_prod_bseq = 0;
3454 sc->free_rx_bd = USABLE_RX_BD;
3455 sc->max_rx_bd = USABLE_RX_BD;
3456 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3457 DBRUNIF(1, sc->rx_empty_count = 0);
3459 /* Initialize the RX next pointer chain entries. */
3460 for (i = 0; i < RX_PAGES; i++) {
3463 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3465 /* Check if we've reached the last page. */
3466 if (i == (RX_PAGES - 1))
3471 /* Setup the chain page pointers. */
3472 rxbd->rx_bd_haddr_hi =
3473 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3474 rxbd->rx_bd_haddr_lo =
3475 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3478 /* Initialize the context ID for an L2 RX chain. */
3479 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3480 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3482 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3484 /* Point the hardware to the first page in the chain. */
3485 /* XXX shouldn't this after RX descriptor initialization? */
3486 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3487 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3488 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3489 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3491 /* Allocate mbuf clusters for the rx_bd chain. */
3492 prod = prod_bseq = 0;
3493 while (prod < TOTAL_RX_BD) {
3494 chain_prod = RX_CHAIN_IDX(prod);
3495 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) {
3496 if_printf(&sc->arpcom.ac_if,
3497 "Error filling RX chain: rx_bd[0x%04X]!\n",
3502 prod = NEXT_RX_BD(prod);
3505 /* Save the RX chain producer index. */
3507 sc->rx_prod_bseq = prod_bseq;
3509 /* Tell the chip about the waiting rx_bd's. */
3510 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3511 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3513 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3515 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3521 /****************************************************************************/
3522 /* Free memory and clear the RX data structures. */
3526 /****************************************************************************/
3528 bce_free_rx_chain(struct bce_softc *sc)
3532 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3534 /* Free any mbufs still in the RX mbuf chain. */
3535 for (i = 0; i < TOTAL_RX_BD; i++) {
3536 if (sc->rx_mbuf_ptr[i] != NULL) {
3537 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3538 m_freem(sc->rx_mbuf_ptr[i]);
3539 sc->rx_mbuf_ptr[i] = NULL;
3540 DBRUNIF(1, sc->rx_mbuf_alloc--);
3544 /* Clear each RX chain page. */
3545 for (i = 0; i < RX_PAGES; i++)
3546 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3548 /* Check if we lost any mbufs in the process. */
3549 DBRUNIF((sc->rx_mbuf_alloc),
3550 if_printf(&sc->arpcom.ac_if,
3551 "%s(%d): Memory leak! "
3552 "Lost %d mbufs from rx chain!\n",
3553 __FILE__, __LINE__, sc->rx_mbuf_alloc));
3555 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3559 /****************************************************************************/
3560 /* Set media options. */
3563 /* 0 for success, positive value for failure. */
3564 /****************************************************************************/
3566 bce_ifmedia_upd(struct ifnet *ifp)
3568 struct bce_softc *sc = ifp->if_softc;
3569 struct mii_data *mii = device_get_softc(sc->bce_miibus);
3572 * 'mii' will be NULL, when this function is called on following
3573 * code path: bce_attach() -> bce_mgmt_init()
3576 /* Make sure the MII bus has been enumerated. */
3578 if (mii->mii_instance) {
3579 struct mii_softc *miisc;
3581 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3582 mii_phy_reset(miisc);
3590 /****************************************************************************/
3591 /* Reports current media status. */
3595 /****************************************************************************/
3597 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3599 struct bce_softc *sc = ifp->if_softc;
3600 struct mii_data *mii = device_get_softc(sc->bce_miibus);
3603 ifmr->ifm_active = mii->mii_media_active;
3604 ifmr->ifm_status = mii->mii_media_status;
3608 /****************************************************************************/
3609 /* Handles PHY generated interrupt events. */
3613 /****************************************************************************/
3615 bce_phy_intr(struct bce_softc *sc)
3617 uint32_t new_link_state, old_link_state;
3618 struct ifnet *ifp = &sc->arpcom.ac_if;
3620 ASSERT_SERIALIZED(ifp->if_serializer);
3622 new_link_state = sc->status_block->status_attn_bits &
3623 STATUS_ATTN_BITS_LINK_STATE;
3624 old_link_state = sc->status_block->status_attn_bits_ack &
3625 STATUS_ATTN_BITS_LINK_STATE;
3627 /* Handle any changes if the link state has changed. */
3628 if (new_link_state != old_link_state) { /* XXX redundant? */
3629 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3632 callout_stop(&sc->bce_stat_ch);
3633 bce_tick_serialized(sc);
3635 /* Update the status_attn_bits_ack field in the status block. */
3636 if (new_link_state) {
3637 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3638 STATUS_ATTN_BITS_LINK_STATE);
3640 if_printf(ifp, "Link is now UP.\n");
3642 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3643 STATUS_ATTN_BITS_LINK_STATE);
3645 if_printf(ifp, "Link is now DOWN.\n");
3649 /* Acknowledge the link change interrupt. */
3650 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3654 /****************************************************************************/
3655 /* Reads the receive consumer value from the status block (skipping over */
3656 /* chain page pointer if necessary). */
3660 /****************************************************************************/
3661 static __inline uint16_t
3662 bce_get_hw_rx_cons(struct bce_softc *sc)
3664 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;