network code: Convert if_multiaddrs from LIST to TAILQ.
[dragonfly.git] / sys / dev / netif / bce / if_bce.c
CommitLineData
43c2aeb0
SZ
1/*-
2 * Copyright (c) 2006-2007 Broadcom Corporation
3 * David Christensen <davidch@broadcom.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written consent.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
1af951ab 31 * $DragonFly: src/sys/dev/netif/bce/if_bce.c,v 1.21 2008/11/19 13:57:49 sephe Exp $
43c2aeb0
SZ
32 */
33
34/*
35 * The following controllers are supported by this driver:
36 * BCM5706C A2, A3
37 * BCM5708C B1, B2
38 *
39 * The following controllers are not supported by this driver:
40 * BCM5706C A0, A1
41 * BCM5706S A0, A1, A2, A3
42 * BCM5708C A0, B0
43 * BCM5708S A0, B0, B1, B2
44 */
45
46#include "opt_bce.h"
47#include "opt_polling.h"
48
49#include <sys/param.h>
50#include <sys/bus.h>
51#include <sys/endian.h>
52#include <sys/kernel.h>
9db4b353 53#include <sys/interrupt.h>
43c2aeb0
SZ
54#include <sys/mbuf.h>
55#include <sys/malloc.h>
56#include <sys/queue.h>
57#ifdef BCE_DEBUG
58#include <sys/random.h>
59#endif
60#include <sys/rman.h>
61#include <sys/serialize.h>
62#include <sys/socket.h>
63#include <sys/sockio.h>
64#include <sys/sysctl.h>
65
66#include <net/bpf.h>
67#include <net/ethernet.h>
68#include <net/if.h>
69#include <net/if_arp.h>
70#include <net/if_dl.h>
71#include <net/if_media.h>
72#include <net/if_types.h>
73#include <net/ifq_var.h>
74#include <net/vlan/if_vlan_var.h>
b637f170 75#include <net/vlan/if_vlan_ether.h>
43c2aeb0
SZ
76
77#include <dev/netif/mii_layer/mii.h>
78#include <dev/netif/mii_layer/miivar.h>
79
80#include <bus/pci/pcireg.h>
81#include <bus/pci/pcivar.h>
82
83#include "miibus_if.h"
84
9382dc55
SZ
85#include <dev/netif/bce/if_bcereg.h>
86#include <dev/netif/bce/if_bcefw.h>
43c2aeb0
SZ
87
88/****************************************************************************/
89/* BCE Debug Options */
90/****************************************************************************/
91#ifdef BCE_DEBUG
92
93static uint32_t bce_debug = BCE_WARN;
94
95/*
96 * 0 = Never
97 * 1 = 1 in 2,147,483,648
98 * 256 = 1 in 8,388,608
99 * 2048 = 1 in 1,048,576
100 * 65536 = 1 in 32,768
101 * 1048576 = 1 in 2,048
102 * 268435456 = 1 in 8
103 * 536870912 = 1 in 4
104 * 1073741824 = 1 in 2
105 *
106 * bce_debug_l2fhdr_status_check:
107 * How often the l2_fhdr frame error check will fail.
108 *
109 * bce_debug_unexpected_attention:
110 * How often the unexpected attention check will fail.
111 *
112 * bce_debug_mbuf_allocation_failure:
113 * How often to simulate an mbuf allocation failure.
114 *
115 * bce_debug_dma_map_addr_failure:
116 * How often to simulate a DMA mapping failure.
117 *
118 * bce_debug_bootcode_running_failure:
119 * How often to simulate a bootcode failure.
120 */
121static int bce_debug_l2fhdr_status_check = 0;
122static int bce_debug_unexpected_attention = 0;
123static int bce_debug_mbuf_allocation_failure = 0;
124static int bce_debug_dma_map_addr_failure = 0;
125static int bce_debug_bootcode_running_failure = 0;
126
127#endif /* BCE_DEBUG */
128
129
130/****************************************************************************/
131/* PCI Device ID Table */
132/* */
133/* Used by bce_probe() to identify the devices supported by this driver. */
134/****************************************************************************/
135#define BCE_DEVDESC_MAX 64
136
137static struct bce_type bce_devs[] = {
138 /* BCM5706C Controllers and OEM boards. */
139 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3101,
140 "HP NC370T Multifunction Gigabit Server Adapter" },
141 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, HP_VENDORID, 0x3106,
142 "HP NC370i Multifunction Gigabit Server Adapter" },
143 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706, PCI_ANY_ID, PCI_ANY_ID,
144 "Broadcom NetXtreme II BCM5706 1000Base-T" },
145
146 /* BCM5706S controllers and OEM boards. */
147 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
148 "HP NC370F Multifunction Gigabit Server Adapter" },
149 { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID, PCI_ANY_ID,
150 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
151
152 /* BCM5708C controllers and OEM boards. */
153 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708, PCI_ANY_ID, PCI_ANY_ID,
154 "Broadcom NetXtreme II BCM5708 1000Base-T" },
155
156 /* BCM5708S controllers and OEM boards. */
157 { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S, PCI_ANY_ID, PCI_ANY_ID,
158 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
159 { 0, 0, 0, 0, NULL }
160};
161
162
163/****************************************************************************/
164/* Supported Flash NVRAM device data. */
165/****************************************************************************/
166static const struct flash_spec flash_table[] =
167{
168 /* Slow EEPROM */
169 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
170 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
171 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
172 "EEPROM - slow"},
173 /* Expansion entry 0001 */
174 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
175 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
176 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
177 "Entry 0001"},
178 /* Saifun SA25F010 (non-buffered flash) */
179 /* strap, cfg1, & write1 need updates */
180 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
181 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
182 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
183 "Non-buffered flash (128kB)"},
184 /* Saifun SA25F020 (non-buffered flash) */
185 /* strap, cfg1, & write1 need updates */
186 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
187 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
189 "Non-buffered flash (256kB)"},
190 /* Expansion entry 0100 */
191 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
192 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 0100"},
195 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
196 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
197 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
198 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
199 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
200 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
201 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
202 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
203 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
204 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
205 /* Saifun SA25F005 (non-buffered flash) */
206 /* strap, cfg1, & write1 need updates */
207 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
208 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
210 "Non-buffered flash (64kB)"},
211 /* Fast EEPROM */
212 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
213 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
214 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
215 "EEPROM - fast"},
216 /* Expansion entry 1001 */
217 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
218 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1001"},
221 /* Expansion entry 1010 */
222 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
223 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1010"},
226 /* ATMEL AT45DB011B (buffered flash) */
227 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
228 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
230 "Buffered flash (128kB)"},
231 /* Expansion entry 1100 */
232 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
233 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235 "Entry 1100"},
236 /* Expansion entry 1101 */
237 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
238 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
239 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
240 "Entry 1101"},
241 /* Ateml Expansion entry 1110 */
242 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
243 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
244 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
245 "Entry 1110 (Atmel)"},
246 /* ATMEL AT45DB021B (buffered flash) */
247 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
248 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
249 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
250 "Buffered flash (256kB)"},
251};
252
253
254/****************************************************************************/
255/* DragonFly device entry points. */
256/****************************************************************************/
257static int bce_probe(device_t);
258static int bce_attach(device_t);
259static int bce_detach(device_t);
260static void bce_shutdown(device_t);
261
262/****************************************************************************/
263/* BCE Debug Data Structure Dump Routines */
264/****************************************************************************/
265#ifdef BCE_DEBUG
266static void bce_dump_mbuf(struct bce_softc *, struct mbuf *);
267static void bce_dump_tx_mbuf_chain(struct bce_softc *, int, int);
268static void bce_dump_rx_mbuf_chain(struct bce_softc *, int, int);
269static void bce_dump_txbd(struct bce_softc *, int, struct tx_bd *);
270static void bce_dump_rxbd(struct bce_softc *, int, struct rx_bd *);
271static void bce_dump_l2fhdr(struct bce_softc *, int,
272 struct l2_fhdr *) __unused;
273static void bce_dump_tx_chain(struct bce_softc *, int, int);
274static void bce_dump_rx_chain(struct bce_softc *, int, int);
275static void bce_dump_status_block(struct bce_softc *);
276static void bce_dump_driver_state(struct bce_softc *);
277static void bce_dump_stats_block(struct bce_softc *) __unused;
278static void bce_dump_hw_state(struct bce_softc *);
279static void bce_dump_txp_state(struct bce_softc *);
280static void bce_dump_rxp_state(struct bce_softc *) __unused;
281static void bce_dump_tpat_state(struct bce_softc *) __unused;
282static void bce_freeze_controller(struct bce_softc *) __unused;
283static void bce_unfreeze_controller(struct bce_softc *) __unused;
284static void bce_breakpoint(struct bce_softc *);
285#endif /* BCE_DEBUG */
286
287
288/****************************************************************************/
289/* BCE Register/Memory Access Routines */
290/****************************************************************************/
291static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
292static void bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
293static void bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
294static int bce_miibus_read_reg(device_t, int, int);
295static int bce_miibus_write_reg(device_t, int, int, int);
296static void bce_miibus_statchg(device_t);
297
298
299/****************************************************************************/
300/* BCE NVRAM Access Routines */
301/****************************************************************************/
302static int bce_acquire_nvram_lock(struct bce_softc *);
303static int bce_release_nvram_lock(struct bce_softc *);
304static void bce_enable_nvram_access(struct bce_softc *);
305static void bce_disable_nvram_access(struct bce_softc *);
306static int bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
307 uint32_t);
308static int bce_init_nvram(struct bce_softc *);
309static int bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
310static int bce_nvram_test(struct bce_softc *);
311#ifdef BCE_NVRAM_WRITE_SUPPORT
312static int bce_enable_nvram_write(struct bce_softc *);
313static void bce_disable_nvram_write(struct bce_softc *);
314static int bce_nvram_erase_page(struct bce_softc *, uint32_t);
19081c78
SZ
315static int bce_nvram_write_dword(struct bce_softc *, uint32_t, uint8_t *,
316 uint32_t);
43c2aeb0
SZ
317static int bce_nvram_write(struct bce_softc *, uint32_t, uint8_t *,
318 int) __unused;
319#endif
320
321/****************************************************************************/
322/* BCE DMA Allocate/Free Routines */
323/****************************************************************************/
324static int bce_dma_alloc(struct bce_softc *);
325static void bce_dma_free(struct bce_softc *);
326static void bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
43c2aeb0
SZ
327
328/****************************************************************************/
329/* BCE Firmware Synchronization and Load */
330/****************************************************************************/
331static int bce_fw_sync(struct bce_softc *, uint32_t);
332static void bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
333 uint32_t, uint32_t);
334static void bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
335 struct fw_info *);
336static void bce_init_cpus(struct bce_softc *);
337
338static void bce_stop(struct bce_softc *);
339static int bce_reset(struct bce_softc *, uint32_t);
340static int bce_chipinit(struct bce_softc *);
341static int bce_blockinit(struct bce_softc *);
c36fd9ee
SZ
342static int bce_newbuf_std(struct bce_softc *, uint16_t *, uint16_t *,
343 uint32_t *, int);
314a2fcc 344static void bce_setup_rxdesc_std(struct bce_softc *, uint16_t, uint32_t *);
43c2aeb0
SZ
345
346static int bce_init_tx_chain(struct bce_softc *);
347static int bce_init_rx_chain(struct bce_softc *);
348static void bce_free_rx_chain(struct bce_softc *);
349static void bce_free_tx_chain(struct bce_softc *);
350
351static int bce_encap(struct bce_softc *, struct mbuf **);
352static void bce_start(struct ifnet *);
353static int bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
354static void bce_watchdog(struct ifnet *);
355static int bce_ifmedia_upd(struct ifnet *);
356static void bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
357static void bce_init(void *);
358static void bce_mgmt_init(struct bce_softc *);
359
3a41a80b 360static void bce_init_ctx(struct bce_softc *);
43c2aeb0
SZ
361static void bce_get_mac_addr(struct bce_softc *);
362static void bce_set_mac_addr(struct bce_softc *);
363static void bce_phy_intr(struct bce_softc *);
364static void bce_rx_intr(struct bce_softc *, int);
365static void bce_tx_intr(struct bce_softc *);
366static void bce_disable_intr(struct bce_softc *);
367static void bce_enable_intr(struct bce_softc *);
368
369#ifdef DEVICE_POLLING
370static void bce_poll(struct ifnet *, enum poll_cmd, int);
371#endif
372static void bce_intr(void *);
373static void bce_set_rx_mode(struct bce_softc *);
374static void bce_stats_update(struct bce_softc *);
375static void bce_tick(void *);
376static void bce_tick_serialized(struct bce_softc *);
377static void bce_add_sysctls(struct bce_softc *);
378
bdeb8fff
SZ
379static void bce_coal_change(struct bce_softc *);
380static int bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
381static int bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
382static int bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
383static int bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
384static int bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
385static int bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
386static int bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
387static int bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
388static int bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
389 uint32_t *, uint32_t);
390
3fb4bb6c
SZ
391/*
392 * NOTE:
393 * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023. Linux's bnx2
394 * takes 1023 as the TX ticks limit. However, using 1023 will
395 * cause 5708(B2) to generate extra interrupts (~2000/s) even when
396 * there is _no_ network activity on the NIC.
3fb4bb6c
SZ
397 */
398static uint32_t bce_tx_bds_int = 255; /* bcm: 20 */
399static uint32_t bce_tx_bds = 255; /* bcm: 20 */
400static uint32_t bce_tx_ticks_int = 1022; /* bcm: 80 */
401static uint32_t bce_tx_ticks = 1022; /* bcm: 80 */
1af951ab
SZ
402static uint32_t bce_rx_bds_int = 128; /* bcm: 6 */
403static uint32_t bce_rx_bds = 128; /* bcm: 6 */
404static uint32_t bce_rx_ticks_int = 125; /* bcm: 18 */
405static uint32_t bce_rx_ticks = 125; /* bcm: 18 */
bdeb8fff
SZ
406
407TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
408TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
409TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
410TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
411TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
412TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
413TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
414TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
43c2aeb0
SZ
415
416/****************************************************************************/
417/* DragonFly device dispatch table. */
418/****************************************************************************/
419static device_method_t bce_methods[] = {
420 /* Device interface */
421 DEVMETHOD(device_probe, bce_probe),
422 DEVMETHOD(device_attach, bce_attach),
423 DEVMETHOD(device_detach, bce_detach),
424 DEVMETHOD(device_shutdown, bce_shutdown),
425
426 /* bus interface */
427 DEVMETHOD(bus_print_child, bus_generic_print_child),
428 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
429
430 /* MII interface */
431 DEVMETHOD(miibus_readreg, bce_miibus_read_reg),
432 DEVMETHOD(miibus_writereg, bce_miibus_write_reg),
433 DEVMETHOD(miibus_statchg, bce_miibus_statchg),
434
435 { 0, 0 }
436};
437
438static driver_t bce_driver = {
439 "bce",
440 bce_methods,
441 sizeof(struct bce_softc)
442};
443
444static devclass_t bce_devclass;
445
43c2aeb0 446
1be78fa8
SZ
447DECLARE_DUMMY_MODULE(if_xl);
448MODULE_DEPEND(bce, miibus, 1, 1, 1);
449DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, 0, 0);
43c2aeb0
SZ
450DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, 0, 0);
451
452
453/****************************************************************************/
454/* Device probe function. */
455/* */
456/* Compares the device to the driver's list of supported devices and */
457/* reports back to the OS whether this is the right driver for the device. */
458/* */
459/* Returns: */
460/* BUS_PROBE_DEFAULT on success, positive value on failure. */
461/****************************************************************************/
462static int
463bce_probe(device_t dev)
464{
465 struct bce_type *t;
466 uint16_t vid, did, svid, sdid;
467
468 /* Get the data for the device to be probed. */
469 vid = pci_get_vendor(dev);
470 did = pci_get_device(dev);
471 svid = pci_get_subvendor(dev);
472 sdid = pci_get_subdevice(dev);
473
474 /* Look through the list of known devices for a match. */
475 for (t = bce_devs; t->bce_name != NULL; ++t) {
476 if (vid == t->bce_vid && did == t->bce_did &&
477 (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
478 (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
479 uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
480 char *descbuf;
481
482 descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
483
484 /* Print out the device identity. */
485 ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
486 t->bce_name,
487 ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
488
489 device_set_desc_copy(dev, descbuf);
490 kfree(descbuf, M_TEMP);
491 return 0;
492 }
493 }
494 return ENXIO;
495}
496
497
498/****************************************************************************/
499/* Device attach function. */
500/* */
501/* Allocates device resources, performs secondary chip identification, */
502/* resets and initializes the hardware, and initializes driver instance */
503/* variables. */
504/* */
505/* Returns: */
506/* 0 on success, positive value on failure. */
507/****************************************************************************/
508static int
509bce_attach(device_t dev)
510{
511 struct bce_softc *sc = device_get_softc(dev);
512 struct ifnet *ifp = &sc->arpcom.ac_if;
513 uint32_t val;
514 int rid, rc = 0;
515#ifdef notyet
516 int count;
517#endif
518
519 sc->bce_dev = dev;
520 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
521
522 pci_enable_busmaster(dev);
523
524 /* Allocate PCI memory resources. */
525 rid = PCIR_BAR(0);
526 sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
527 RF_ACTIVE | PCI_RF_DENSE);
528 if (sc->bce_res_mem == NULL) {
529 device_printf(dev, "PCI memory allocation failed\n");
530 return ENXIO;
531 }
532 sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
533 sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
534
535 /* Allocate PCI IRQ resources. */
536#ifdef notyet
537 count = pci_msi_count(dev);
538 if (count == 1 && pci_alloc_msi(dev, &count) == 0) {
539 rid = 1;
540 sc->bce_flags |= BCE_USING_MSI_FLAG;
541 } else
542#endif
543 rid = 0;
544 sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
545 RF_SHAREABLE | RF_ACTIVE);
546 if (sc->bce_res_irq == NULL) {
547 device_printf(dev, "PCI map interrupt failed\n");
548 rc = ENXIO;
549 goto fail;
550 }
551
552 /*
553 * Configure byte swap and enable indirect register access.
554 * Rely on CPU to do target byte swapping on big endian systems.
555 * Access to registers outside of PCI configurtion space are not
556 * valid until this is done.
557 */
558 pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
559 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
560 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
561
562 /* Save ASIC revsion info. */
563 sc->bce_chipid = REG_RD(sc, BCE_MISC_ID);
564
565 /* Weed out any non-production controller revisions. */
566 switch(BCE_CHIP_ID(sc)) {
567 case BCE_CHIP_ID_5706_A0:
568 case BCE_CHIP_ID_5706_A1:
569 case BCE_CHIP_ID_5708_A0:
570 case BCE_CHIP_ID_5708_B0:
571 device_printf(dev, "Unsupported chip id 0x%08x!\n",
572 BCE_CHIP_ID(sc));
573 rc = ENODEV;
574 goto fail;
575 }
576
577 /*
578 * The embedded PCIe to PCI-X bridge (EPB)
579 * in the 5708 cannot address memory above
580 * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043).
581 */
582 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
583 sc->max_bus_addr = BCE_BUS_SPACE_MAXADDR;
584 else
585 sc->max_bus_addr = BUS_SPACE_MAXADDR;
586
587 /*
588 * Find the base address for shared memory access.
589 * Newer versions of bootcode use a signature and offset
590 * while older versions use a fixed address.
591 */
592 val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
593 if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) == BCE_SHM_HDR_SIGNATURE_SIG)
594 sc->bce_shmem_base = REG_RD_IND(sc, BCE_SHM_HDR_ADDR_0);
595 else
596 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
597
598 DBPRINT(sc, BCE_INFO, "bce_shmem_base = 0x%08X\n", sc->bce_shmem_base);
599
600 /* Get PCI bus information (speed and type). */
601 val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
602 if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
603 uint32_t clkreg;
604
605 sc->bce_flags |= BCE_PCIX_FLAG;
606
607 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
608 BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
609 switch (clkreg) {
610 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
611 sc->bus_speed_mhz = 133;
612 break;
613
614 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
615 sc->bus_speed_mhz = 100;
616 break;
617
618 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
619 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
620 sc->bus_speed_mhz = 66;
621 break;
622
623 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
624 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
625 sc->bus_speed_mhz = 50;
626 break;
627
628 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
629 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
630 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
631 sc->bus_speed_mhz = 33;
632 break;
633 }
634 } else {
635 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
636 sc->bus_speed_mhz = 66;
637 else
638 sc->bus_speed_mhz = 33;
639 }
640
641 if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
642 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
643
644 device_printf(dev, "ASIC ID 0x%08X; Revision (%c%d); PCI%s %s %dMHz\n",
645 sc->bce_chipid,
646 ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
647 (BCE_CHIP_ID(sc) & 0x0ff0) >> 4,
648 (sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : "",
649 (sc->bce_flags & BCE_PCI_32BIT_FLAG) ?
650 "32-bit" : "64-bit", sc->bus_speed_mhz);
651
652 /* Reset the controller. */
653 rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
654 if (rc != 0)
655 goto fail;
656
657 /* Initialize the controller. */
658 rc = bce_chipinit(sc);
659 if (rc != 0) {
660 device_printf(dev, "Controller initialization failed!\n");
661 goto fail;
662 }
663
664 /* Perform NVRAM test. */
665 rc = bce_nvram_test(sc);
666 if (rc != 0) {
667 device_printf(dev, "NVRAM test failed!\n");
668 goto fail;
669 }
670
671 /* Fetch the permanent Ethernet MAC address. */
672 bce_get_mac_addr(sc);
673
674 /*
675 * Trip points control how many BDs
676 * should be ready before generating an
677 * interrupt while ticks control how long
678 * a BD can sit in the chain before
679 * generating an interrupt. Set the default
680 * values for the RX and TX rings.
681 */
682
683#ifdef BCE_DRBUG
684 /* Force more frequent interrupts. */
685 sc->bce_tx_quick_cons_trip_int = 1;
686 sc->bce_tx_quick_cons_trip = 1;
687 sc->bce_tx_ticks_int = 0;
688 sc->bce_tx_ticks = 0;
689
690 sc->bce_rx_quick_cons_trip_int = 1;
691 sc->bce_rx_quick_cons_trip = 1;
692 sc->bce_rx_ticks_int = 0;
693 sc->bce_rx_ticks = 0;
694#else
bdeb8fff
SZ
695 sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
696 sc->bce_tx_quick_cons_trip = bce_tx_bds;
697 sc->bce_tx_ticks_int = bce_tx_ticks_int;
698 sc->bce_tx_ticks = bce_tx_ticks;
699
700 sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
701 sc->bce_rx_quick_cons_trip = bce_rx_bds;
702 sc->bce_rx_ticks_int = bce_rx_ticks_int;
703 sc->bce_rx_ticks = bce_rx_ticks;
43c2aeb0
SZ
704#endif
705
706 /* Update statistics once every second. */
707 sc->bce_stats_ticks = 1000000 & 0xffff00;
708
709 /*
710 * The copper based NetXtreme II controllers
711 * use an integrated PHY at address 1 while
712 * the SerDes controllers use a PHY at
713 * address 2.
714 */
715 sc->bce_phy_addr = 1;
716
717 if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
718 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
719 sc->bce_flags |= BCE_NO_WOL_FLAG;
720 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708) {
721 sc->bce_phy_addr = 2;
722 val = REG_RD_IND(sc, sc->bce_shmem_base +
723 BCE_SHARED_HW_CFG_CONFIG);
724 if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
725 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
726 }
727 }
728
729 /* Allocate DMA memory resources. */
730 rc = bce_dma_alloc(sc);
731 if (rc != 0) {
732 device_printf(dev, "DMA resource allocation failed!\n");
733 goto fail;
734 }
735
736 /* Initialize the ifnet interface. */
737 ifp->if_softc = sc;
738 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
739 ifp->if_ioctl = bce_ioctl;
740 ifp->if_start = bce_start;
741 ifp->if_init = bce_init;
742 ifp->if_watchdog = bce_watchdog;
743#ifdef DEVICE_POLLING
744 ifp->if_poll = bce_poll;
745#endif
746 ifp->if_mtu = ETHERMTU;
747 ifp->if_hwassist = BCE_IF_HWASSIST;
748 ifp->if_capabilities = BCE_IF_CAPABILITIES;
749 ifp->if_capenable = ifp->if_capabilities;
750 ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD);
751 ifq_set_ready(&ifp->if_snd);
752
753 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
754 ifp->if_baudrate = IF_Gbps(2.5);
755 else
756 ifp->if_baudrate = IF_Gbps(1);
757
758 /* Assume a standard 1500 byte MTU size for mbuf allocations. */
759 sc->mbuf_alloc_size = MCLBYTES;
760
761 /* Look for our PHY. */
762 rc = mii_phy_probe(dev, &sc->bce_miibus,
763 bce_ifmedia_upd, bce_ifmedia_sts);
764 if (rc != 0) {
765 device_printf(dev, "PHY probe failed!\n");
766 goto fail;
767 }
768
769 /* Attach to the Ethernet interface list. */
770 ether_ifattach(ifp, sc->eaddr, NULL);
771
772 callout_init(&sc->bce_stat_ch);
773
774 /* Hookup IRQ last. */
95893fe4 775 rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, bce_intr, sc,
43c2aeb0
SZ
776 &sc->bce_intrhand, ifp->if_serializer);
777 if (rc != 0) {
778 device_printf(dev, "Failed to setup IRQ!\n");
779 ether_ifdetach(ifp);
780 goto fail;
781 }
782
9db4b353
SZ
783 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->bce_res_irq));
784 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);
785
43c2aeb0
SZ
786 /* Print some important debugging info. */
787 DBRUN(BCE_INFO, bce_dump_driver_state(sc));
788
789 /* Add the supported sysctls to the kernel. */
790 bce_add_sysctls(sc);
791
792 /* Get the firmware running so IPMI still works */
793 bce_mgmt_init(sc);
794
795 return 0;
796fail:
797 bce_detach(dev);
798 return(rc);
799}
800
801
802/****************************************************************************/
803/* Device detach function. */
804/* */
805/* Stops the controller, resets the controller, and releases resources. */
806/* */
807/* Returns: */
808/* 0 on success, positive value on failure. */
809/****************************************************************************/
810static int
811bce_detach(device_t dev)
812{
813 struct bce_softc *sc = device_get_softc(dev);
814
815 if (device_is_attached(dev)) {
816 struct ifnet *ifp = &sc->arpcom.ac_if;
817
818 /* Stop and reset the controller. */
819 lwkt_serialize_enter(ifp->if_serializer);
820 bce_stop(sc);
821 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
822 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
823 lwkt_serialize_exit(ifp->if_serializer);
824
825 ether_ifdetach(ifp);
826 }
827
828 /* If we have a child device on the MII bus remove it too. */
829 if (sc->bce_miibus)
830 device_delete_child(dev, sc->bce_miibus);
831 bus_generic_detach(dev);
832
833 if (sc->bce_res_irq != NULL) {
834 bus_release_resource(dev, SYS_RES_IRQ,
835 sc->bce_flags & BCE_USING_MSI_FLAG ? 1 : 0,
836 sc->bce_res_irq);
837 }
838
839#ifdef notyet
840 if (sc->bce_flags & BCE_USING_MSI_FLAG)
841 pci_release_msi(dev);
842#endif
843
844 if (sc->bce_res_mem != NULL) {
845 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
846 sc->bce_res_mem);
847 }
848
849 bce_dma_free(sc);
850
851 if (sc->bce_sysctl_tree != NULL)
852 sysctl_ctx_free(&sc->bce_sysctl_ctx);
853
854 return 0;
855}
856
857
858/****************************************************************************/
859/* Device shutdown function. */
860/* */
861/* Stops and resets the controller. */
862/* */
863/* Returns: */
864/* Nothing */
865/****************************************************************************/
866static void
867bce_shutdown(device_t dev)
868{
869 struct bce_softc *sc = device_get_softc(dev);
870 struct ifnet *ifp = &sc->arpcom.ac_if;
871
872 lwkt_serialize_enter(ifp->if_serializer);
873 bce_stop(sc);
874 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
875 lwkt_serialize_exit(ifp->if_serializer);
876}
877
878
879/****************************************************************************/
880/* Indirect register read. */
881/* */
882/* Reads NetXtreme II registers using an index/data register pair in PCI */
883/* configuration space. Using this mechanism avoids issues with posted */
884/* reads but is much slower than memory-mapped I/O. */
885/* */
886/* Returns: */
887/* The value of the register. */
888/****************************************************************************/
889static uint32_t
890bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
891{
892 device_t dev = sc->bce_dev;
893
894 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
895#ifdef BCE_DEBUG
896 {
897 uint32_t val;
898 val = pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
899 DBPRINT(sc, BCE_EXCESSIVE,
900 "%s(); offset = 0x%08X, val = 0x%08X\n",
901 __func__, offset, val);
902 return val;
903 }
904#else
905 return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
906#endif
907}
908
909
910/****************************************************************************/
911/* Indirect register write. */
912/* */
913/* Writes NetXtreme II registers using an index/data register pair in PCI */
914/* configuration space. Using this mechanism avoids issues with posted */
915/* writes but is muchh slower than memory-mapped I/O. */
916/* */
917/* Returns: */
918/* Nothing. */
919/****************************************************************************/
920static void
921bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
922{
923 device_t dev = sc->bce_dev;
924
925 DBPRINT(sc, BCE_EXCESSIVE, "%s(); offset = 0x%08X, val = 0x%08X\n",
926 __func__, offset, val);
927
928 pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
929 pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
930}
931
932
933/****************************************************************************/
934/* Context memory write. */
935/* */
936/* The NetXtreme II controller uses context memory to track connection */
937/* information for L2 and higher network protocols. */
938/* */
939/* Returns: */
940/* Nothing. */
941/****************************************************************************/
942static void
943bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t offset,
944 uint32_t val)
945{
946 DBPRINT(sc, BCE_EXCESSIVE, "%s(); cid_addr = 0x%08X, offset = 0x%08X, "
947 "val = 0x%08X\n", __func__, cid_addr, offset, val);
948
949 offset += cid_addr;
950 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
951 REG_WR(sc, BCE_CTX_DATA, val);
952}
953
954
955/****************************************************************************/
956/* PHY register read. */
957/* */
958/* Implements register reads on the MII bus. */
959/* */
960/* Returns: */
961/* The value of the register. */
962/****************************************************************************/
963static int
964bce_miibus_read_reg(device_t dev, int phy, int reg)
965{
966 struct bce_softc *sc = device_get_softc(dev);
967 uint32_t val;
968 int i;
969
970 /* Make sure we are accessing the correct PHY address. */
971 if (phy != sc->bce_phy_addr) {
972 DBPRINT(sc, BCE_VERBOSE,
973 "Invalid PHY address %d for PHY read!\n", phy);
974 return 0;
975 }
976
977 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
978 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
979 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
980
981 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
982 REG_RD(sc, BCE_EMAC_MDIO_MODE);
983
984 DELAY(40);
985 }
986
987 val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
988 BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
989 BCE_EMAC_MDIO_COMM_START_BUSY;
990 REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
991
992 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
993 DELAY(10);
994
995 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
996 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
997 DELAY(5);
998
999 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1000 val &= BCE_EMAC_MDIO_COMM_DATA;
1001 break;
1002 }
1003 }
1004
1005 if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1006 if_printf(&sc->arpcom.ac_if,
1007 "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1008 phy, reg);
1009 val = 0x0;
1010 } else {
1011 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1012 }
1013
1014 DBPRINT(sc, BCE_EXCESSIVE,
1015 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1016 __func__, phy, (uint16_t)reg & 0xffff, (uint16_t) val & 0xffff);
1017
1018 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1019 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1020 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1021
1022 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1023 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1024
1025 DELAY(40);
1026 }
1027 return (val & 0xffff);
1028}
1029
1030
1031/****************************************************************************/
1032/* PHY register write. */
1033/* */
1034/* Implements register writes on the MII bus. */
1035/* */
1036/* Returns: */
1037/* The value of the register. */
1038/****************************************************************************/
1039static int
1040bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1041{
1042 struct bce_softc *sc = device_get_softc(dev);
1043 uint32_t val1;
1044 int i;
1045
1046 /* Make sure we are accessing the correct PHY address. */
1047 if (phy != sc->bce_phy_addr) {
1048 DBPRINT(sc, BCE_WARN,
1049 "Invalid PHY address %d for PHY write!\n", phy);
1050 return(0);
1051 }
1052
1053 DBPRINT(sc, BCE_EXCESSIVE,
1054 "%s(): phy = %d, reg = 0x%04X, val = 0x%04X\n",
1055 __func__, phy, (uint16_t)(reg & 0xffff),
1056 (uint16_t)(val & 0xffff));
1057
1058 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1059 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1060 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1061
1062 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1063 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1064
1065 DELAY(40);
1066 }
1067
1068 val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1069 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1070 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1071 REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1072
1073 for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1074 DELAY(10);
1075
1076 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1077 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1078 DELAY(5);
1079 break;
1080 }
1081 }
1082
1083 if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1084 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1085
1086 if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1087 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1088 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1089
1090 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1091 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1092
1093 DELAY(40);
1094 }
1095 return 0;
1096}
1097
1098
1099/****************************************************************************/
1100/* MII bus status change. */
1101/* */
1102/* Called by the MII bus driver when the PHY establishes link to set the */
1103/* MAC interface registers. */
1104/* */
1105/* Returns: */
1106/* Nothing. */
1107/****************************************************************************/
1108static void
1109bce_miibus_statchg(device_t dev)
1110{
1111 struct bce_softc *sc = device_get_softc(dev);
1112 struct mii_data *mii = device_get_softc(sc->bce_miibus);
1113
1114 DBPRINT(sc, BCE_INFO, "mii_media_active = 0x%08X\n",
1115 mii->mii_media_active);
1116
1117#ifdef BCE_DEBUG
1118 /* Decode the interface media flags. */
1119 if_printf(&sc->arpcom.ac_if, "Media: ( ");
1120 switch(IFM_TYPE(mii->mii_media_active)) {
1121 case IFM_ETHER:
1122 kprintf("Ethernet )");
1123 break;
1124 default:
1125 kprintf("Unknown )");
1126 break;
1127 }
1128
1129 kprintf(" Media Options: ( ");
1130 switch(IFM_SUBTYPE(mii->mii_media_active)) {
1131 case IFM_AUTO:
1132 kprintf("Autoselect )");
1133 break;
1134 case IFM_MANUAL:
1135 kprintf("Manual )");
1136 break;
1137 case IFM_NONE:
1138 kprintf("None )");
1139 break;
1140 case IFM_10_T:
1141 kprintf("10Base-T )");
1142 break;
1143 case IFM_100_TX:
1144 kprintf("100Base-TX )");
1145 break;
1146 case IFM_1000_SX:
1147 kprintf("1000Base-SX )");
1148 break;
1149 case IFM_1000_T:
1150 kprintf("1000Base-T )");
1151 break;
1152 default:
1153 kprintf("Other )");
1154 break;
1155 }
1156
1157 kprintf(" Global Options: (");
1158 if (mii->mii_media_active & IFM_FDX)
1159 kprintf(" FullDuplex");
1160 if (mii->mii_media_active & IFM_HDX)
1161 kprintf(" HalfDuplex");
1162 if (mii->mii_media_active & IFM_LOOP)
1163 kprintf(" Loopback");
1164 if (mii->mii_media_active & IFM_FLAG0)
1165 kprintf(" Flag0");
1166 if (mii->mii_media_active & IFM_FLAG1)
1167 kprintf(" Flag1");
1168 if (mii->mii_media_active & IFM_FLAG2)
1169 kprintf(" Flag2");
1170 kprintf(" )\n");
1171#endif
1172
1173 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1174
1175 /*
1176 * Set MII or GMII interface based on the speed negotiated
1177 * by the PHY.
1178 */
1179 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
1180 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1181 DBPRINT(sc, BCE_INFO, "Setting GMII interface.\n");
1182 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1183 } else {
1184 DBPRINT(sc, BCE_INFO, "Setting MII interface.\n");
1185 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1186 }
1187
1188 /*
1189 * Set half or full duplex based on the duplicity negotiated
1190 * by the PHY.
1191 */
1192 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1193 DBPRINT(sc, BCE_INFO, "Setting Full-Duplex interface.\n");
1194 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1195 } else {
1196 DBPRINT(sc, BCE_INFO, "Setting Half-Duplex interface.\n");
1197 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1198 }
1199}
1200
1201
1202/****************************************************************************/
1203/* Acquire NVRAM lock. */
1204/* */
1205/* Before the NVRAM can be accessed the caller must acquire an NVRAM lock. */
1206/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1207/* for use by the driver. */
1208/* */
1209/* Returns: */
1210/* 0 on success, positive value on failure. */
1211/****************************************************************************/
1212static int
1213bce_acquire_nvram_lock(struct bce_softc *sc)
1214{
1215 uint32_t val;
1216 int j;
1217
1218 DBPRINT(sc, BCE_VERBOSE, "Acquiring NVRAM lock.\n");
1219
1220 /* Request access to the flash interface. */
1221 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1222 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1223 val = REG_RD(sc, BCE_NVM_SW_ARB);
1224 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1225 break;
1226
1227 DELAY(5);
1228 }
1229
1230 if (j >= NVRAM_TIMEOUT_COUNT) {
1231 DBPRINT(sc, BCE_WARN, "Timeout acquiring NVRAM lock!\n");
1232 return EBUSY;
1233 }
1234 return 0;
1235}
1236
1237
1238/****************************************************************************/
1239/* Release NVRAM lock. */
1240/* */
1241/* When the caller is finished accessing NVRAM the lock must be released. */
1242/* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is */
1243/* for use by the driver. */
1244/* */
1245/* Returns: */
1246/* 0 on success, positive value on failure. */
1247/****************************************************************************/
1248static int
1249bce_release_nvram_lock(struct bce_softc *sc)
1250{
1251 int j;
1252 uint32_t val;
1253
1254 DBPRINT(sc, BCE_VERBOSE, "Releasing NVRAM lock.\n");
1255
1256 /*
1257 * Relinquish nvram interface.
1258 */
1259 REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1260
1261 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1262 val = REG_RD(sc, BCE_NVM_SW_ARB);
1263 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1264 break;
1265
1266 DELAY(5);
1267 }
1268
1269 if (j >= NVRAM_TIMEOUT_COUNT) {
1270 DBPRINT(sc, BCE_WARN, "Timeout reeasing NVRAM lock!\n");
1271 return EBUSY;
1272 }
1273 return 0;
1274}
1275
1276
1277#ifdef BCE_NVRAM_WRITE_SUPPORT
1278/****************************************************************************/
1279/* Enable NVRAM write access. */
1280/* */
1281/* Before writing to NVRAM the caller must enable NVRAM writes. */
1282/* */
1283/* Returns: */
1284/* 0 on success, positive value on failure. */
1285/****************************************************************************/
1286static int
1287bce_enable_nvram_write(struct bce_softc *sc)
1288{
1289 uint32_t val;
1290
1291 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM write.\n");
1292
1293 val = REG_RD(sc, BCE_MISC_CFG);
1294 REG_WR(sc, BCE_MISC_CFG, val | BCE_MISC_CFG_NVM_WR_EN_PCI);
1295
1296 if (!sc->bce_flash_info->buffered) {
1297 int j;
1298
1299 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1300 REG_WR(sc, BCE_NVM_COMMAND,
1301 BCE_NVM_COMMAND_WREN | BCE_NVM_COMMAND_DOIT);
1302
1303 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1304 DELAY(5);
1305
1306 val = REG_RD(sc, BCE_NVM_COMMAND);
1307 if (val & BCE_NVM_COMMAND_DONE)
1308 break;
1309 }
1310
1311 if (j >= NVRAM_TIMEOUT_COUNT) {
1312 DBPRINT(sc, BCE_WARN, "Timeout writing NVRAM!\n");
1313 return EBUSY;
1314 }
1315 }
1316 return 0;
1317}
1318
1319
1320/****************************************************************************/
1321/* Disable NVRAM write access. */
1322/* */
1323/* When the caller is finished writing to NVRAM write access must be */
1324/* disabled. */
1325/* */
1326/* Returns: */
1327/* Nothing. */
1328/****************************************************************************/
1329static void
1330bce_disable_nvram_write(struct bce_softc *sc)
1331{
1332 uint32_t val;
1333
1334 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM write.\n");
1335
1336 val = REG_RD(sc, BCE_MISC_CFG);
1337 REG_WR(sc, BCE_MISC_CFG, val & ~BCE_MISC_CFG_NVM_WR_EN);
1338}
1339#endif /* BCE_NVRAM_WRITE_SUPPORT */
1340
1341
1342/****************************************************************************/
1343/* Enable NVRAM access. */
1344/* */
1345/* Before accessing NVRAM for read or write operations the caller must */
1346/* enabled NVRAM access. */
1347/* */
1348/* Returns: */
1349/* Nothing. */
1350/****************************************************************************/
1351static void
1352bce_enable_nvram_access(struct bce_softc *sc)
1353{
1354 uint32_t val;
1355
1356 DBPRINT(sc, BCE_VERBOSE, "Enabling NVRAM access.\n");
1357
1358 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1359 /* Enable both bits, even on read. */
1360 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1361 val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1362}
1363
1364
1365/****************************************************************************/
1366/* Disable NVRAM access. */
1367/* */
1368/* When the caller is finished accessing NVRAM access must be disabled. */
1369/* */
1370/* Returns: */
1371/* Nothing. */
1372/****************************************************************************/
1373static void
1374bce_disable_nvram_access(struct bce_softc *sc)
1375{
1376 uint32_t val;
1377
1378 DBPRINT(sc, BCE_VERBOSE, "Disabling NVRAM access.\n");
1379
1380 val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1381
1382 /* Disable both bits, even after read. */
1383 REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1384 val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1385}
1386
1387
1388#ifdef BCE_NVRAM_WRITE_SUPPORT
1389/****************************************************************************/
1390/* Erase NVRAM page before writing. */
1391/* */
1392/* Non-buffered flash parts require that a page be erased before it is */
1393/* written. */
1394/* */
1395/* Returns: */
1396/* 0 on success, positive value on failure. */
1397/****************************************************************************/
1398static int
1399bce_nvram_erase_page(struct bce_softc *sc, uint32_t offset)
1400{
1401 uint32_t cmd;
1402 int j;
1403
1404 /* Buffered flash doesn't require an erase. */
1405 if (sc->bce_flash_info->buffered)
1406 return 0;
1407
1408 DBPRINT(sc, BCE_VERBOSE, "Erasing NVRAM page.\n");
1409
1410 /* Build an erase command. */
1411 cmd = BCE_NVM_COMMAND_ERASE | BCE_NVM_COMMAND_WR |
1412 BCE_NVM_COMMAND_DOIT;
1413
1414 /*
1415 * Clear the DONE bit separately, set the NVRAM adress to erase,
1416 * and issue the erase command.
1417 */
1418 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1419 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1420 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1421
1422 /* Wait for completion. */
1423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1424 uint32_t val;
1425
1426 DELAY(5);
1427
1428 val = REG_RD(sc, BCE_NVM_COMMAND);
1429 if (val & BCE_NVM_COMMAND_DONE)
1430 break;
1431 }
1432
1433 if (j >= NVRAM_TIMEOUT_COUNT) {
1434 DBPRINT(sc, BCE_WARN, "Timeout erasing NVRAM.\n");
1435 return EBUSY;
1436 }
1437 return 0;
1438}
1439#endif /* BCE_NVRAM_WRITE_SUPPORT */
1440
1441
1442/****************************************************************************/
1443/* Read a dword (32 bits) from NVRAM. */
1444/* */
1445/* Read a 32 bit word from NVRAM. The caller is assumed to have already */
1446/* obtained the NVRAM lock and enabled the controller for NVRAM access. */
1447/* */
1448/* Returns: */
1449/* 0 on success and the 32 bit value read, positive value on failure. */
1450/****************************************************************************/
1451static int
1452bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1453 uint32_t cmd_flags)
1454{
1455 uint32_t cmd;
1456 int i, rc = 0;
1457
1458 /* Build the command word. */
1459 cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1460
1461 /* Calculate the offset for buffered flash. */
1462 if (sc->bce_flash_info->buffered) {
1463 offset = ((offset / sc->bce_flash_info->page_size) <<
1464 sc->bce_flash_info->page_bits) +
1465 (offset % sc->bce_flash_info->page_size);
1466 }
1467
1468 /*
1469 * Clear the DONE bit separately, set the address to read,
1470 * and issue the read.
1471 */
1472 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1473 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1474 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1475
1476 /* Wait for completion. */
1477 for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1478 uint32_t val;
1479
1480 DELAY(5);
1481
1482 val = REG_RD(sc, BCE_NVM_COMMAND);
1483 if (val & BCE_NVM_COMMAND_DONE) {
1484 val = REG_RD(sc, BCE_NVM_READ);
1485
1486 val = be32toh(val);
1487 memcpy(ret_val, &val, 4);
1488 break;
1489 }
1490 }
1491
1492 /* Check for errors. */
1493 if (i >= NVRAM_TIMEOUT_COUNT) {
1494 if_printf(&sc->arpcom.ac_if,
1495 "Timeout error reading NVRAM at offset 0x%08X!\n",
1496 offset);
1497 rc = EBUSY;
1498 }
1499 return rc;
1500}
1501
1502
1503#ifdef BCE_NVRAM_WRITE_SUPPORT
1504/****************************************************************************/
1505/* Write a dword (32 bits) to NVRAM. */
1506/* */
1507/* Write a 32 bit word to NVRAM. The caller is assumed to have already */
1508/* obtained the NVRAM lock, enabled the controller for NVRAM access, and */
1509/* enabled NVRAM write access. */
1510/* */
1511/* Returns: */
1512/* 0 on success, positive value on failure. */
1513/****************************************************************************/
1514static int
1515bce_nvram_write_dword(struct bce_softc *sc, uint32_t offset, uint8_t *val,
1516 uint32_t cmd_flags)
1517{
1518 uint32_t cmd, val32;
1519 int j;
1520
1521 /* Build the command word. */
1522 cmd = BCE_NVM_COMMAND_DOIT | BCE_NVM_COMMAND_WR | cmd_flags;
1523
1524 /* Calculate the offset for buffered flash. */
1525 if (sc->bce_flash_info->buffered) {
1526 offset = ((offset / sc->bce_flash_info->page_size) <<
1527 sc->bce_flash_info->page_bits) +
1528 (offset % sc->bce_flash_info->page_size);
1529 }
1530
1531 /*
1532 * Clear the DONE bit separately, convert NVRAM data to big-endian,
1533 * set the NVRAM address to write, and issue the write command
1534 */
1535 REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1536 memcpy(&val32, val, 4);
1537 val32 = htobe32(val32);
1538 REG_WR(sc, BCE_NVM_WRITE, val32);
1539 REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1540 REG_WR(sc, BCE_NVM_COMMAND, cmd);
1541
1542 /* Wait for completion. */
1543 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1544 DELAY(5);
1545
1546 if (REG_RD(sc, BCE_NVM_COMMAND) & BCE_NVM_COMMAND_DONE)
1547 break;
1548 }
1549 if (j >= NVRAM_TIMEOUT_COUNT) {
1550 if_printf(&sc->arpcom.ac_if,
1551 "Timeout error writing NVRAM at offset 0x%08X\n",
1552 offset);
1553 return EBUSY;
1554 }
1555 return 0;
1556}
1557#endif /* BCE_NVRAM_WRITE_SUPPORT */
1558
1559
1560/****************************************************************************/
1561/* Initialize NVRAM access. */
1562/* */
1563/* Identify the NVRAM device in use and prepare the NVRAM interface to */
1564/* access that device. */
1565/* */
1566/* Returns: */
1567/* 0 on success, positive value on failure. */
1568/****************************************************************************/
1569static int
1570bce_init_nvram(struct bce_softc *sc)
1571{
1572 uint32_t val;
1573 int j, entry_count, rc = 0;
1574 const struct flash_spec *flash;
1575
1576 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
1577
1578 /* Determine the selected interface. */
1579 val = REG_RD(sc, BCE_NVM_CFG1);
1580
1581 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1582
1583 /*
1584 * Flash reconfiguration is required to support additional
1585 * NVRAM devices not directly supported in hardware.
1586 * Check if the flash interface was reconfigured
1587 * by the bootcode.
1588 */
1589
1590 if (val & 0x40000000) {
1591 /* Flash interface reconfigured by bootcode. */
1592
1593 DBPRINT(sc, BCE_INFO_LOAD,
1594 "%s(): Flash WAS reconfigured.\n", __func__);
1595
1596 for (j = 0, flash = flash_table; j < entry_count;
1597 j++, flash++) {
1598 if ((val & FLASH_BACKUP_STRAP_MASK) ==
1599 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1600 sc->bce_flash_info = flash;
1601 break;
1602 }
1603 }
1604 } else {
1605 /* Flash interface not yet reconfigured. */
1606 uint32_t mask;
1607
1608 DBPRINT(sc, BCE_INFO_LOAD,
1609 "%s(): Flash was NOT reconfigured.\n", __func__);
1610
1611 if (val & (1 << 23))
1612 mask = FLASH_BACKUP_STRAP_MASK;
1613 else
1614 mask = FLASH_STRAP_MASK;
1615
1616 /* Look for the matching NVRAM device configuration data. */
1617 for (j = 0, flash = flash_table; j < entry_count;
1618 j++, flash++) {
1619 /* Check if the device matches any of the known devices. */
1620 if ((val & mask) == (flash->strapping & mask)) {
1621 /* Found a device match. */
1622 sc->bce_flash_info = flash;
1623
1624 /* Request access to the flash interface. */
1625 rc = bce_acquire_nvram_lock(sc);
1626 if (rc != 0)
1627 return rc;
1628
1629 /* Reconfigure the flash interface. */
1630 bce_enable_nvram_access(sc);
1631 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1632 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1633 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1634 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1635 bce_disable_nvram_access(sc);
1636 bce_release_nvram_lock(sc);
1637 break;
1638 }
1639 }
1640 }
1641
1642 /* Check if a matching device was found. */
1643 if (j == entry_count) {
1644 sc->bce_flash_info = NULL;
1645 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1646 rc = ENODEV;
1647 }
1648
1649 /* Write the flash config data to the shared memory interface. */
1650 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_SHARED_HW_CFG_CONFIG2) &
1651 BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1652 if (val)
1653 sc->bce_flash_size = val;
1654 else
1655 sc->bce_flash_size = sc->bce_flash_info->total_size;
1656
1657 DBPRINT(sc, BCE_INFO_LOAD, "%s() flash->total_size = 0x%08X\n",
1658 __func__, sc->bce_flash_info->total_size);
1659
1660 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
1661
1662 return rc;
1663}
1664
1665
1666/****************************************************************************/
1667/* Read an arbitrary range of data from NVRAM. */
1668/* */
1669/* Prepares the NVRAM interface for access and reads the requested data */
1670/* into the supplied buffer. */
1671/* */
1672/* Returns: */
1673/* 0 on success and the data read, positive value on failure. */
1674/****************************************************************************/
1675static int
1676bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1677 int buf_size)
1678{
1679 uint32_t cmd_flags, offset32, len32, extra;
1680 int rc = 0;
1681
1682 if (buf_size == 0)
1683 return 0;
1684
1685 /* Request access to the flash interface. */
1686 rc = bce_acquire_nvram_lock(sc);
1687 if (rc != 0)
1688 return rc;
1689
1690 /* Enable access to flash interface */
1691 bce_enable_nvram_access(sc);
1692
1693 len32 = buf_size;
1694 offset32 = offset;
1695 extra = 0;
1696
1697 cmd_flags = 0;
1698
1699 /* XXX should we release nvram lock if read_dword() fails? */
1700 if (offset32 & 3) {
1701 uint8_t buf[4];
1702 uint32_t pre_len;
1703
1704 offset32 &= ~3;
1705 pre_len = 4 - (offset & 3);
1706
1707 if (pre_len >= len32) {
1708 pre_len = len32;
1709 cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1710 } else {
1711 cmd_flags = BCE_NVM_COMMAND_FIRST;
1712 }
1713
1714 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1715 if (rc)
1716 return rc;
1717
1718 memcpy(ret_buf, buf + (offset & 3), pre_len);
1719
1720 offset32 += 4;
1721 ret_buf += pre_len;
1722 len32 -= pre_len;
1723 }
1724
1725 if (len32 & 3) {
1726 extra = 4 - (len32 & 3);
1727 len32 = (len32 + 4) & ~3;
1728 }
1729
1730 if (len32 == 4) {
1731 uint8_t buf[4];
1732
1733 if (cmd_flags)
1734 cmd_flags = BCE_NVM_COMMAND_LAST;
1735 else
1736 cmd_flags = BCE_NVM_COMMAND_FIRST |
1737 BCE_NVM_COMMAND_LAST;
1738
1739 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1740
1741 memcpy(ret_buf, buf, 4 - extra);
1742 } else if (len32 > 0) {
1743 uint8_t buf[4];
1744
1745 /* Read the first word. */
1746 if (cmd_flags)
1747 cmd_flags = 0;
1748 else
1749 cmd_flags = BCE_NVM_COMMAND_FIRST;
1750
1751 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1752
1753 /* Advance to the next dword. */
1754 offset32 += 4;
1755 ret_buf += 4;
1756 len32 -= 4;
1757
1758 while (len32 > 4 && rc == 0) {
1759 rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1760
1761 /* Advance to the next dword. */
1762 offset32 += 4;
1763 ret_buf += 4;
1764 len32 -= 4;
1765 }
1766
1767 if (rc)
1768 return rc;
1769
1770 cmd_flags = BCE_NVM_COMMAND_LAST;
1771 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1772
1773 memcpy(ret_buf, buf, 4 - extra);
1774 }
1775
1776 /* Disable access to flash interface and release the lock. */
1777 bce_disable_nvram_access(sc);
1778 bce_release_nvram_lock(sc);
1779
1780 return rc;
1781}
1782
1783
1784#ifdef BCE_NVRAM_WRITE_SUPPORT
1785/****************************************************************************/
1786/* Write an arbitrary range of data from NVRAM. */
1787/* */
1788/* Prepares the NVRAM interface for write access and writes the requested */
1789/* data from the supplied buffer. The caller is responsible for */
1790/* calculating any appropriate CRCs. */
1791/* */
1792/* Returns: */
1793/* 0 on success, positive value on failure. */
1794/****************************************************************************/
1795static int
1796bce_nvram_write(struct bce_softc *sc, uint32_t offset, uint8_t *data_buf,
1797 int buf_size)
1798{
1799 uint32_t written, offset32, len32;
1800 uint8_t *buf, start[4], end[4];
1801 int rc = 0;
1802 int align_start, align_end;
1803
1804 buf = data_buf;
1805 offset32 = offset;
1806 len32 = buf_size;
1807 align_end = 0;
1808 align_start = (offset32 & 3);
1809
1810 if (align_start) {
1811 offset32 &= ~3;
1812 len32 += align_start;
1813 rc = bce_nvram_read(sc, offset32, start, 4);
1814 if (rc)
1815 return rc;
1816 }
1817
1818 if (len32 & 3) {
1819 if (len32 > 4 || !align_start) {
1820 align_end = 4 - (len32 & 3);
1821 len32 += align_end;
1822 rc = bce_nvram_read(sc, offset32 + len32 - 4, end, 4);
1823 if (rc)
1824 return rc;
1825 }
1826 }
1827
1828 if (align_start || align_end) {
1829 buf = kmalloc(len32, M_DEVBUF, M_NOWAIT);
1830 if (buf == NULL)
1831 return ENOMEM;
1832 if (align_start)
1833 memcpy(buf, start, 4);
1834 if (align_end)
1835 memcpy(buf + len32 - 4, end, 4);
1836 memcpy(buf + align_start, data_buf, buf_size);
1837 }
1838
1839 written = 0;
1840 while (written < len32 && rc == 0) {
1841 uint32_t page_start, page_end, data_start, data_end;
1842 uint32_t addr, cmd_flags;
1843 int i;
1844 uint8_t flash_buffer[264];
1845
1846 /* Find the page_start addr */
1847 page_start = offset32 + written;
1848 page_start -= (page_start % sc->bce_flash_info->page_size);
1849 /* Find the page_end addr */
1850 page_end = page_start + sc->bce_flash_info->page_size;
1851 /* Find the data_start addr */
1852 data_start = (written == 0) ? offset32 : page_start;
1853 /* Find the data_end addr */
1854 data_end = (page_end > offset32 + len32) ? (offset32 + len32)
1855 : page_end;
1856
1857 /* Request access to the flash interface. */
1858 rc = bce_acquire_nvram_lock(sc);
1859 if (rc != 0)
1860 goto nvram_write_end;
1861
1862 /* Enable access to flash interface */
1863 bce_enable_nvram_access(sc);
1864
1865 cmd_flags = BCE_NVM_COMMAND_FIRST;
1866 if (sc->bce_flash_info->buffered == 0) {
1867 int j;
1868
1869 /*
1870 * Read the whole page into the buffer
1871 * (non-buffer flash only)
1872 */
1873 for (j = 0; j < sc->bce_flash_info->page_size; j += 4) {
1874 if (j == (sc->bce_flash_info->page_size - 4))
1875 cmd_flags |= BCE_NVM_COMMAND_LAST;
1876
1877 rc = bce_nvram_read_dword(sc, page_start + j,
1878 &flash_buffer[j],
1879 cmd_flags);
1880 if (rc)
1881 goto nvram_write_end;
1882
1883 cmd_flags = 0;
1884 }
1885 }
1886
1887 /* Enable writes to flash interface (unlock write-protect) */
1888 rc = bce_enable_nvram_write(sc);
1889 if (rc != 0)
1890 goto nvram_write_end;
1891
1892 /* Erase the page */
1893 rc = bce_nvram_erase_page(sc, page_start);
1894 if (rc != 0)
1895 goto nvram_write_end;
1896
1897 /* Re-enable the write again for the actual write */
1898 bce_enable_nvram_write(sc);
1899
1900 /* Loop to write back the buffer data from page_start to
1901 * data_start */
1902 i = 0;
1903 if (sc->bce_flash_info->buffered == 0) {
1904 for (addr = page_start; addr < data_start;
1905 addr += 4, i += 4) {
1906 rc = bce_nvram_write_dword(sc, addr,
1907 &flash_buffer[i],
1908 cmd_flags);
1909 if (rc != 0)
1910 goto nvram_write_end;
1911
1912 cmd_flags = 0;
1913 }
1914 }
1915
1916 /* Loop to write the new data from data_start to data_end */
1917 for (addr = data_start; addr < data_end; addr += 4, i++) {
1918 if (addr == page_end - 4 ||
1919 (sc->bce_flash_info->buffered &&
1920 addr == data_end - 4))
1921 cmd_flags |= BCE_NVM_COMMAND_LAST;
1922
1923 rc = bce_nvram_write_dword(sc, addr, buf, cmd_flags);
1924 if (rc != 0)
1925 goto nvram_write_end;
1926
1927 cmd_flags = 0;
1928 buf += 4;
1929 }
1930
1931 /* Loop to write back the buffer data from data_end
1932 * to page_end */
1933 if (sc->bce_flash_info->buffered == 0) {
1934 for (addr = data_end; addr < page_end;
1935 addr += 4, i += 4) {
1936 if (addr == page_end-4)
1937 cmd_flags = BCE_NVM_COMMAND_LAST;
1938
1939 rc = bce_nvram_write_dword(sc, addr,
1940 &flash_buffer[i], cmd_flags);
1941 if (rc != 0)
1942 goto nvram_write_end;
1943
1944 cmd_flags = 0;
1945 }
1946 }
1947
1948 /* Disable writes to flash interface (lock write-protect) */
1949 bce_disable_nvram_write(sc);
1950
1951 /* Disable access to flash interface */
1952 bce_disable_nvram_access(sc);
1953 bce_release_nvram_lock(sc);
1954
1955 /* Increment written */
1956 written += data_end - data_start;
1957 }
1958
1959nvram_write_end:
1960 if (align_start || align_end)
1961 kfree(buf, M_DEVBUF);
1962 return rc;
1963}
1964#endif /* BCE_NVRAM_WRITE_SUPPORT */
1965
1966
1967/****************************************************************************/
1968/* Verifies that NVRAM is accessible and contains valid data. */
1969/* */
1970/* Reads the configuration data from NVRAM and verifies that the CRC is */
1971/* correct. */
1972/* */
1973/* Returns: */
1974/* 0 on success, positive value on failure. */
1975/****************************************************************************/
1976static int
1977bce_nvram_test(struct bce_softc *sc)
1978{
1979 uint32_t buf[BCE_NVRAM_SIZE / 4];
1980 uint32_t magic, csum;
1981 uint8_t *data = (uint8_t *)buf;
1982 int rc = 0;
1983
1984 /*
1985 * Check that the device NVRAM is valid by reading
1986 * the magic value at offset 0.
1987 */
1988 rc = bce_nvram_read(sc, 0, data, 4);
1989 if (rc != 0)
1990 return rc;
1991
1992 magic = be32toh(buf[0]);
1993 if (magic != BCE_NVRAM_MAGIC) {
1994 if_printf(&sc->arpcom.ac_if,
1995 "Invalid NVRAM magic value! Expected: 0x%08X, "
1996 "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1997 return ENODEV;
1998 }
1999
2000 /*
2001 * Verify that the device NVRAM includes valid
2002 * configuration data.
2003 */
2004 rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
2005 if (rc != 0)
2006 return rc;
2007
2008 csum = ether_crc32_le(data, 0x100);
2009 if (csum != BCE_CRC32_RESIDUAL) {
2010 if_printf(&sc->arpcom.ac_if,
2011 "Invalid Manufacturing Information NVRAM CRC! "
2012 "Expected: 0x%08X, Found: 0x%08X\n",
2013 BCE_CRC32_RESIDUAL, csum);
2014 return ENODEV;
2015 }
2016
2017 csum = ether_crc32_le(data + 0x100, 0x100);
2018 if (csum != BCE_CRC32_RESIDUAL) {
2019 if_printf(&sc->arpcom.ac_if,
2020 "Invalid Feature Configuration Information "
2021 "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
2022 BCE_CRC32_RESIDUAL, csum);
2023 rc = ENODEV;
2024 }
2025 return rc;
2026}
2027
2028
2029/****************************************************************************/
2030/* Free any DMA memory owned by the driver. */
2031/* */
2032/* Scans through each data structre that requires DMA memory and frees */
2033/* the memory if allocated. */
2034/* */
2035/* Returns: */
2036/* Nothing. */
2037/****************************************************************************/
2038static void
2039bce_dma_free(struct bce_softc *sc)
2040{
2041 int i;
2042
2043 /* Destroy the status block. */
2044 if (sc->status_tag != NULL) {
2045 if (sc->status_block != NULL) {
2046 bus_dmamap_unload(sc->status_tag, sc->status_map);
2047 bus_dmamem_free(sc->status_tag, sc->status_block,
2048 sc->status_map);
2049 }
2050 bus_dma_tag_destroy(sc->status_tag);
2051 }
2052
2053
2054 /* Destroy the statistics block. */
2055 if (sc->stats_tag != NULL) {
2056 if (sc->stats_block != NULL) {
2057 bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2058 bus_dmamem_free(sc->stats_tag, sc->stats_block,
2059 sc->stats_map);
2060 }
2061 bus_dma_tag_destroy(sc->stats_tag);
2062 }
2063
2064 /* Destroy the TX buffer descriptor DMA stuffs. */
2065 if (sc->tx_bd_chain_tag != NULL) {
2066 for (i = 0; i < TX_PAGES; i++) {
2067 if (sc->tx_bd_chain[i] != NULL) {
2068 bus_dmamap_unload(sc->tx_bd_chain_tag,
2069 sc->tx_bd_chain_map[i]);
2070 bus_dmamem_free(sc->tx_bd_chain_tag,
2071 sc->tx_bd_chain[i],
2072 sc->tx_bd_chain_map[i]);
2073 }
2074 }
2075 bus_dma_tag_destroy(sc->tx_bd_chain_tag);
2076 }
2077
2078 /* Destroy the RX buffer descriptor DMA stuffs. */
2079 if (sc->rx_bd_chain_tag != NULL) {
2080 for (i = 0; i < RX_PAGES; i++) {
2081 if (sc->rx_bd_chain[i] != NULL) {
2082 bus_dmamap_unload(sc->rx_bd_chain_tag,
2083 sc->rx_bd_chain_map[i]);
2084 bus_dmamem_free(sc->rx_bd_chain_tag,
2085 sc->rx_bd_chain[i],
2086 sc->rx_bd_chain_map[i]);
2087 }
2088 }
2089 bus_dma_tag_destroy(sc->rx_bd_chain_tag);
2090 }
2091
2092 /* Destroy the TX mbuf DMA stuffs. */
2093 if (sc->tx_mbuf_tag != NULL) {
2094 for (i = 0; i < TOTAL_TX_BD; i++) {
2095 /* Must have been unloaded in bce_stop() */
2096 KKASSERT(sc->tx_mbuf_ptr[i] == NULL);
2097 bus_dmamap_destroy(sc->tx_mbuf_tag,
2098 sc->tx_mbuf_map[i]);
2099 }
2100 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2101 }
2102
2103 /* Destroy the RX mbuf DMA stuffs. */
2104 if (sc->rx_mbuf_tag != NULL) {
2105 for (i = 0; i < TOTAL_RX_BD; i++) {
2106 /* Must have been unloaded in bce_stop() */
2107 KKASSERT(sc->rx_mbuf_ptr[i] == NULL);
2108 bus_dmamap_destroy(sc->rx_mbuf_tag,
2109 sc->rx_mbuf_map[i]);
2110 }
c36fd9ee 2111 bus_dmamap_destroy(sc->rx_mbuf_tag, sc->rx_mbuf_tmpmap);
43c2aeb0
SZ
2112 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2113 }
2114
2115 /* Destroy the parent tag */
2116 if (sc->parent_tag != NULL)
2117 bus_dma_tag_destroy(sc->parent_tag);
2118}
2119
2120
2121/****************************************************************************/
2122/* Get DMA memory from the OS. */
2123/* */
2124/* Validates that the OS has provided DMA buffers in response to a */
2125/* bus_dmamap_load() call and saves the physical address of those buffers. */
2126/* When the callback is used the OS will return 0 for the mapping function */
2127/* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any */
2128/* failures back to the caller. */
2129/* */
2130/* Returns: */
2131/* Nothing. */
2132/****************************************************************************/
2133static void
2134bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2135{
2136 bus_addr_t *busaddr = arg;
2137
2138 /*
2139 * Simulate a mapping failure.
2140 * XXX not correct.
2141 */
2142 DBRUNIF(DB_RANDOMTRUE(bce_debug_dma_map_addr_failure),
2143 kprintf("bce: %s(%d): Simulating DMA mapping error.\n",
2144 __FILE__, __LINE__);
2145 error = ENOMEM);
2146
2147 /* Check for an error and signal the caller that an error occurred. */
2148 if (error)
2149 return;
2150
2151 KASSERT(nseg == 1, ("only one segment is allowed\n"));
2152 *busaddr = segs->ds_addr;
2153}
2154
2155
43c2aeb0
SZ
2156/****************************************************************************/
2157/* Allocate any DMA memory needed by the driver. */
2158/* */
2159/* Allocates DMA memory needed for the various global structures needed by */
2160/* hardware. */
2161/* */
2162/* Returns: */
2163/* 0 for success, positive value for failure. */
2164/****************************************************************************/
2165static int
2166bce_dma_alloc(struct bce_softc *sc)
2167{
2168 struct ifnet *ifp = &sc->arpcom.ac_if;
2169 int i, j, rc = 0;
2170 bus_addr_t busaddr;
2171
2172 /*
2173 * Allocate the parent bus DMA tag appropriate for PCI.
2174 */
2175 rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2176 sc->max_bus_addr, BUS_SPACE_MAXADDR,
2177 NULL, NULL,
45010e4d 2178 BUS_SPACE_MAXSIZE_32BIT, 0,
43c2aeb0
SZ
2179 BUS_SPACE_MAXSIZE_32BIT,
2180 0, &sc->parent_tag);
2181 if (rc != 0) {
2182 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2183 return rc;
2184 }
2185
2186 /*
4a458e9d 2187 * Allocate status block.
43c2aeb0 2188 */
4a458e9d
SZ
2189 sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2190 BCE_DMA_ALIGN, BCE_STATUS_BLK_SZ,
2191 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2192 &sc->status_tag, &sc->status_map,
2193 &sc->status_block_paddr);
2194 if (sc->status_block == NULL) {
2195 if_printf(ifp, "Could not allocate status block!\n");
2196 return ENOMEM;
43c2aeb0
SZ
2197 }
2198
43c2aeb0 2199 /*
4a458e9d 2200 * Allocate statistics block.
43c2aeb0 2201 */
4a458e9d
SZ
2202 sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2203 BCE_DMA_ALIGN, BCE_STATS_BLK_SZ,
2204 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2205 &sc->stats_tag, &sc->stats_map,
2206 &sc->stats_block_paddr);
2207 if (sc->stats_block == NULL) {
2208 if_printf(ifp, "Could not allocate statistics block!\n");
2209 return ENOMEM;
43c2aeb0
SZ
2210 }
2211
43c2aeb0
SZ
2212 /*
2213 * Create a DMA tag for the TX buffer descriptor chain,
2214 * allocate and clear the memory, and fetch the
2215 * physical address of the block.
2216 */
4a458e9d
SZ
2217 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2218 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
43c2aeb0
SZ
2219 NULL, NULL,
2220 BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2221 0, &sc->tx_bd_chain_tag);
2222 if (rc != 0) {
2223 if_printf(ifp, "Could not allocate "
2224 "TX descriptor chain DMA tag!\n");
2225 return rc;
2226 }
2227
2228 for (i = 0; i < TX_PAGES; i++) {
2229 rc = bus_dmamem_alloc(sc->tx_bd_chain_tag,
2230 (void **)&sc->tx_bd_chain[i],
4a458e9d
SZ
2231 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2232 BUS_DMA_COHERENT,
2233 &sc->tx_bd_chain_map[i]);
43c2aeb0
SZ
2234 if (rc != 0) {
2235 if_printf(ifp, "Could not allocate %dth TX descriptor "
2236 "chain DMA memory!\n", i);
2237 return rc;
2238 }
2239
2240 rc = bus_dmamap_load(sc->tx_bd_chain_tag,
2241 sc->tx_bd_chain_map[i],
2242 sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ,
2243 bce_dma_map_addr, &busaddr,
2244 BUS_DMA_WAITOK);
2245 if (rc != 0) {
4a458e9d
SZ
2246 if (rc == EINPROGRESS) {
2247 panic("%s coherent memory loading "
2248 "is still in progress!", ifp->if_xname);
2249 }
43c2aeb0
SZ
2250 if_printf(ifp, "Could not map %dth TX descriptor "
2251 "chain DMA memory!\n", i);
2252 bus_dmamem_free(sc->tx_bd_chain_tag,
2253 sc->tx_bd_chain[i],
2254 sc->tx_bd_chain_map[i]);
2255 sc->tx_bd_chain[i] = NULL;
2256 return rc;
2257 }
2258
2259 sc->tx_bd_chain_paddr[i] = busaddr;
2260 /* DRC - Fix for 64 bit systems. */
2261 DBPRINT(sc, BCE_INFO, "tx_bd_chain_paddr[%d] = 0x%08X\n",
2262 i, (uint32_t)sc->tx_bd_chain_paddr[i]);
2263 }
2264
2265 /* Create a DMA tag for TX mbufs. */
45010e4d
SZ
2266 rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2267 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
43c2aeb0 2268 NULL, NULL,
45010e4d 2269 /* BCE_MAX_JUMBO_ETHER_MTU_VLAN */MCLBYTES,
43c2aeb0 2270 BCE_MAX_SEGMENTS, MCLBYTES,
45010e4d
SZ
2271 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
2272 BUS_DMA_ONEBPAGE,
2273 &sc->tx_mbuf_tag);
43c2aeb0
SZ
2274 if (rc != 0) {
2275 if_printf(ifp, "Could not allocate TX mbuf DMA tag!\n");
2276 return rc;
2277 }
2278
2279 /* Create DMA maps for the TX mbufs clusters. */
2280 for (i = 0; i < TOTAL_TX_BD; i++) {
45010e4d
SZ
2281 rc = bus_dmamap_create(sc->tx_mbuf_tag,
2282 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
43c2aeb0
SZ
2283 &sc->tx_mbuf_map[i]);
2284 if (rc != 0) {
2285 for (j = 0; j < i; ++j) {
2286 bus_dmamap_destroy(sc->tx_mbuf_tag,
2287 sc->tx_mbuf_map[i]);
2288 }
2289 bus_dma_tag_destroy(sc->tx_mbuf_tag);
2290 sc->tx_mbuf_tag = NULL;
2291
2292 if_printf(ifp, "Unable to create "
2293 "%dth TX mbuf DMA map!\n", i);
2294 return rc;
2295 }
2296 }
2297
2298 /*
2299 * Create a DMA tag for the RX buffer descriptor chain,
2300 * allocate and clear the memory, and fetch the physical
2301 * address of the blocks.
2302 */
4a458e9d
SZ
2303 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2304 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
43c2aeb0
SZ
2305 NULL, NULL,
2306 BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2307 0, &sc->rx_bd_chain_tag);
2308 if (rc != 0) {
2309 if_printf(ifp, "Could not allocate "
2310 "RX descriptor chain DMA tag!\n");
2311 return rc;
2312 }
2313
2314 for (i = 0; i < RX_PAGES; i++) {
2315 rc = bus_dmamem_alloc(sc->rx_bd_chain_tag,
2316 (void **)&sc->rx_bd_chain[i],
4a458e9d
SZ
2317 BUS_DMA_WAITOK | BUS_DMA_ZERO |
2318 BUS_DMA_COHERENT,
43c2aeb0
SZ
2319 &sc->rx_bd_chain_map[i]);
2320 if (rc != 0) {
2321 if_printf(ifp, "Could not allocate %dth RX descriptor "
2322 "chain DMA memory!\n", i);
2323 return rc;
2324 }
2325
2326 rc = bus_dmamap_load(sc->rx_bd_chain_tag,
2327 sc->rx_bd_chain_map[i],
2328 sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ,
2329 bce_dma_map_addr, &busaddr,
2330 BUS_DMA_WAITOK);
2331 if (rc != 0) {
4a458e9d
SZ
2332 if (rc == EINPROGRESS) {
2333 panic("%s coherent memory loading "
2334 "is still in progress!", ifp->if_xname);
2335 }
43c2aeb0
SZ
2336 if_printf(ifp, "Could not map %dth RX descriptor "
2337 "chain DMA memory!\n", i);
2338 bus_dmamem_free(sc->rx_bd_chain_tag,
2339 sc->rx_bd_chain[i],
2340 sc->rx_bd_chain_map[i]);
2341 sc->rx_bd_chain[i] = NULL;
2342 return rc;
2343 }
2344
2345 sc->rx_bd_chain_paddr[i] = busaddr;
2346 /* DRC - Fix for 64 bit systems. */
2347 DBPRINT(sc, BCE_INFO, "rx_bd_chain_paddr[%d] = 0x%08X\n",
2348 i, (uint32_t)sc->rx_bd_chain_paddr[i]);
2349 }
2350
2351 /* Create a DMA tag for RX mbufs. */
45010e4d
SZ
2352 rc = bus_dma_tag_create(sc->parent_tag, 1, 0,
2353 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
43c2aeb0 2354 NULL, NULL,
45010e4d
SZ
2355 MCLBYTES, 1, MCLBYTES,
2356 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK,
2357 &sc->rx_mbuf_tag);
43c2aeb0
SZ
2358 if (rc != 0) {
2359 if_printf(ifp, "Could not allocate RX mbuf DMA tag!\n");
2360 return rc;
2361 }
2362
c36fd9ee
SZ
2363 /* Create tmp DMA map for RX mbuf clusters. */
2364 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2365 &sc->rx_mbuf_tmpmap);
2366 if (rc != 0) {
2367 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2368 sc->rx_mbuf_tag = NULL;
2369
2370 if_printf(ifp, "Could not create RX mbuf tmp DMA map!\n");
2371 return rc;
2372 }
2373
43c2aeb0
SZ
2374 /* Create DMA maps for the RX mbuf clusters. */
2375 for (i = 0; i < TOTAL_RX_BD; i++) {
2376 rc = bus_dmamap_create(sc->rx_mbuf_tag, BUS_DMA_WAITOK,
2377 &sc->rx_mbuf_map[i]);
2378 if (rc != 0) {
2379 for (j = 0; j < i; ++j) {
2380 bus_dmamap_destroy(sc->rx_mbuf_tag,
2381 sc->rx_mbuf_map[j]);
2382 }
2383 bus_dma_tag_destroy(sc->rx_mbuf_tag);
2384 sc->rx_mbuf_tag = NULL;
2385
2386 if_printf(ifp, "Unable to create "
2387 "%dth RX mbuf DMA map!\n", i);
2388 return rc;
2389 }
2390 }
2391 return 0;
2392}
2393
2394
2395/****************************************************************************/
2396/* Firmware synchronization. */
2397/* */
2398/* Before performing certain events such as a chip reset, synchronize with */
2399/* the firmware first. */
2400/* */
2401/* Returns: */
2402/* 0 for success, positive value for failure. */
2403/****************************************************************************/
2404static int
2405bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2406{
2407 int i, rc = 0;
2408 uint32_t val;
2409
2410 /* Don't waste any time if we've timed out before. */
2411 if (sc->bce_fw_timed_out)
2412 return EBUSY;
2413
2414 /* Increment the message sequence number. */
2415 sc->bce_fw_wr_seq++;
2416 msg_data |= sc->bce_fw_wr_seq;
2417
2418 DBPRINT(sc, BCE_VERBOSE, "bce_fw_sync(): msg_data = 0x%08X\n", msg_data);
2419
2420 /* Send the message to the bootcode driver mailbox. */
2421 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2422
2423 /* Wait for the bootcode to acknowledge the message. */
2424 for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2425 /* Check for a response in the bootcode firmware mailbox. */
2426 val = REG_RD_IND(sc, sc->bce_shmem_base + BCE_FW_MB);
2427 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2428 break;
2429 DELAY(1000);
2430 }
2431
2432 /* If we've timed out, tell the bootcode that we've stopped waiting. */
2433 if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2434 (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2435 if_printf(&sc->arpcom.ac_if,
2436 "Firmware synchronization timeout! "
2437 "msg_data = 0x%08X\n", msg_data);
2438
2439 msg_data &= ~BCE_DRV_MSG_CODE;
2440 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2441
2442 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_MB, msg_data);
2443
2444 sc->bce_fw_timed_out = 1;
2445 rc = EBUSY;
2446 }
2447 return rc;
2448}
2449
2450
2451/****************************************************************************/
2452/* Load Receive Virtual 2 Physical (RV2P) processor firmware. */
2453/* */
2454/* Returns: */
2455/* Nothing. */
2456/****************************************************************************/
2457static void
2458bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2459 uint32_t rv2p_code_len, uint32_t rv2p_proc)
2460{
2461 int i;
2462 uint32_t val;
2463
2464 for (i = 0; i < rv2p_code_len; i += 8) {
2465 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2466 rv2p_code++;
2467 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2468 rv2p_code++;
2469
2470 if (rv2p_proc == RV2P_PROC1) {
2471 val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2472 REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2473 } else {
2474 val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2475 REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2476 }
2477 }
2478
2479 /* Reset the processor, un-stall is done later. */
2480 if (rv2p_proc == RV2P_PROC1)
2481 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2482 else
2483 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2484}
2485
2486
2487/****************************************************************************/
2488/* Load RISC processor firmware. */
2489/* */
2490/* Loads firmware from the file if_bcefw.h into the scratchpad memory */
2491/* associated with a particular processor. */
2492/* */
2493/* Returns: */
2494/* Nothing. */
2495/****************************************************************************/
2496static void
2497bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2498 struct fw_info *fw)
2499{
2500 uint32_t offset, val;
2501 int j;
2502
2503 /* Halt the CPU. */
2504 val = REG_RD_IND(sc, cpu_reg->mode);
2505 val |= cpu_reg->mode_value_halt;
2506 REG_WR_IND(sc, cpu_reg->mode, val);
2507 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2508
2509 /* Load the Text area. */
2510 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2511 if (fw->text) {
2512 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2513 REG_WR_IND(sc, offset, fw->text[j]);
2514 }
2515
2516 /* Load the Data area. */
2517 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2518 if (fw->data) {
2519 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2520 REG_WR_IND(sc, offset, fw->data[j]);
2521 }
2522
2523 /* Load the SBSS area. */
2524 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2525 if (fw->sbss) {
2526 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2527 REG_WR_IND(sc, offset, fw->sbss[j]);
2528 }
2529
2530 /* Load the BSS area. */
2531 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2532 if (fw->bss) {
2533 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2534 REG_WR_IND(sc, offset, fw->bss[j]);
2535 }
2536
2537 /* Load the Read-Only area. */
2538 offset = cpu_reg->spad_base +
2539 (fw->rodata_addr - cpu_reg->mips_view_base);
2540 if (fw->rodata) {
2541 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2542 REG_WR_IND(sc, offset, fw->rodata[j]);
2543 }
2544
2545 /* Clear the pre-fetch instruction. */
2546 REG_WR_IND(sc, cpu_reg->inst, 0);
2547 REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2548
2549 /* Start the CPU. */
2550 val = REG_RD_IND(sc, cpu_reg->mode);
2551 val &= ~cpu_reg->mode_value_halt;
2552 REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2553 REG_WR_IND(sc, cpu_reg->mode, val);
2554}
2555
2556
2557/****************************************************************************/
2558/* Initialize the RV2P, RX, TX, TPAT, and COM CPUs. */
2559/* */
2560/* Loads the firmware for each CPU and starts the CPU. */
2561/* */
2562/* Returns: */
2563/* Nothing. */
2564/****************************************************************************/
2565static void
2566bce_init_cpus(struct bce_softc *sc)
2567{
2568 struct cpu_reg cpu_reg;
2569 struct fw_info fw;
2570
2571 /* Initialize the RV2P processor. */
2572 bce_load_rv2p_fw(sc, bce_rv2p_proc1, sizeof(bce_rv2p_proc1), RV2P_PROC1);
2573 bce_load_rv2p_fw(sc, bce_rv2p_proc2, sizeof(bce_rv2p_proc2), RV2P_PROC2);
2574
2575 /* Initialize the RX Processor. */
2576 cpu_reg.mode = BCE_RXP_CPU_MODE;
2577 cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2578 cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2579 cpu_reg.state = BCE_RXP_CPU_STATE;
2580 cpu_reg.state_value_clear = 0xffffff;
2581 cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2582 cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2583 cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2584 cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2585 cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2586 cpu_reg.spad_base = BCE_RXP_SCRATCH;
2587 cpu_reg.mips_view_base = 0x8000000;
2588
2589 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2590 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2591 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2592 fw.start_addr = bce_RXP_b06FwStartAddr;
2593
2594 fw.text_addr = bce_RXP_b06FwTextAddr;
2595 fw.text_len = bce_RXP_b06FwTextLen;
2596 fw.text_index = 0;
2597 fw.text = bce_RXP_b06FwText;
2598
2599 fw.data_addr = bce_RXP_b06FwDataAddr;
2600 fw.data_len = bce_RXP_b06FwDataLen;
2601 fw.data_index = 0;
2602 fw.data = bce_RXP_b06FwData;
2603
2604 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2605 fw.sbss_len = bce_RXP_b06FwSbssLen;
2606 fw.sbss_index = 0;
2607 fw.sbss = bce_RXP_b06FwSbss;
2608
2609 fw.bss_addr = bce_RXP_b06FwBssAddr;
2610 fw.bss_len = bce_RXP_b06FwBssLen;
2611 fw.bss_index = 0;
2612 fw.bss = bce_RXP_b06FwBss;
2613
2614 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2615 fw.rodata_len = bce_RXP_b06FwRodataLen;
2616 fw.rodata_index = 0;
2617 fw.rodata = bce_RXP_b06FwRodata;
2618
2619 DBPRINT(sc, BCE_INFO_RESET, "Loading RX firmware.\n");
2620 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2621
2622 /* Initialize the TX Processor. */
2623 cpu_reg.mode = BCE_TXP_CPU_MODE;
2624 cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2625 cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2626 cpu_reg.state = BCE_TXP_CPU_STATE;
2627 cpu_reg.state_value_clear = 0xffffff;
2628 cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2629 cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2630 cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2631 cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2632 cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2633 cpu_reg.spad_base = BCE_TXP_SCRATCH;
2634 cpu_reg.mips_view_base = 0x8000000;
2635
2636 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2637 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2638 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2639 fw.start_addr = bce_TXP_b06FwStartAddr;
2640
2641 fw.text_addr = bce_TXP_b06FwTextAddr;
2642 fw.text_len = bce_TXP_b06FwTextLen;
2643 fw.text_index = 0;
2644 fw.text = bce_TXP_b06FwText;
2645
2646 fw.data_addr = bce_TXP_b06FwDataAddr;
2647 fw.data_len = bce_TXP_b06FwDataLen;
2648 fw.data_index = 0;
2649 fw.data = bce_TXP_b06FwData;
2650
2651 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2652 fw.sbss_len = bce_TXP_b06FwSbssLen;
2653 fw.sbss_index = 0;
2654 fw.sbss = bce_TXP_b06FwSbss;
2655
2656 fw.bss_addr = bce_TXP_b06FwBssAddr;
2657 fw.bss_len = bce_TXP_b06FwBssLen;
2658 fw.bss_index = 0;
2659 fw.bss = bce_TXP_b06FwBss;
2660
2661 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2662 fw.rodata_len = bce_TXP_b06FwRodataLen;
2663 fw.rodata_index = 0;
2664 fw.rodata = bce_TXP_b06FwRodata;
2665
2666 DBPRINT(sc, BCE_INFO_RESET, "Loading TX firmware.\n");
2667 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2668
2669 /* Initialize the TX Patch-up Processor. */
2670 cpu_reg.mode = BCE_TPAT_CPU_MODE;
2671 cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2672 cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2673 cpu_reg.state = BCE_TPAT_CPU_STATE;
2674 cpu_reg.state_value_clear = 0xffffff;
2675 cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2676 cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2677 cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2678 cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2679 cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2680 cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2681 cpu_reg.mips_view_base = 0x8000000;
2682
2683 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2684 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2685 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2686 fw.start_addr = bce_TPAT_b06FwStartAddr;
2687
2688 fw.text_addr = bce_TPAT_b06FwTextAddr;
2689 fw.text_len = bce_TPAT_b06FwTextLen;
2690 fw.text_index = 0;
2691 fw.text = bce_TPAT_b06FwText;
2692
2693 fw.data_addr = bce_TPAT_b06FwDataAddr;
2694 fw.data_len = bce_TPAT_b06FwDataLen;
2695 fw.data_index = 0;
2696 fw.data = bce_TPAT_b06FwData;
2697
2698 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2699 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2700 fw.sbss_index = 0;
2701 fw.sbss = bce_TPAT_b06FwSbss;
2702
2703 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2704 fw.bss_len = bce_TPAT_b06FwBssLen;
2705 fw.bss_index = 0;
2706 fw.bss = bce_TPAT_b06FwBss;
2707
2708 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2709 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2710 fw.rodata_index = 0;
2711 fw.rodata = bce_TPAT_b06FwRodata;
2712
2713 DBPRINT(sc, BCE_INFO_RESET, "Loading TPAT firmware.\n");
2714 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2715
2716 /* Initialize the Completion Processor. */
2717 cpu_reg.mode = BCE_COM_CPU_MODE;
2718 cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
2719 cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
2720 cpu_reg.state = BCE_COM_CPU_STATE;
2721 cpu_reg.state_value_clear = 0xffffff;
2722 cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
2723 cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
2724 cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
2725 cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
2726 cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
2727 cpu_reg.spad_base = BCE_COM_SCRATCH;
2728 cpu_reg.mips_view_base = 0x8000000;
2729
2730 fw.ver_major = bce_COM_b06FwReleaseMajor;
2731 fw.ver_minor = bce_COM_b06FwReleaseMinor;
2732 fw.ver_fix = bce_COM_b06FwReleaseFix;
2733 fw.start_addr = bce_COM_b06FwStartAddr;
2734
2735 fw.text_addr = bce_COM_b06FwTextAddr;
2736 fw.text_len = bce_COM_b06FwTextLen;
2737 fw.text_index = 0;
2738 fw.text = bce_COM_b06FwText;
2739
2740 fw.data_addr = bce_COM_b06FwDataAddr;
2741 fw.data_len = bce_COM_b06FwDataLen;
2742 fw.data_index = 0;
2743 fw.data = bce_COM_b06FwData;
2744
2745 fw.sbss_addr = bce_COM_b06FwSbssAddr;
2746 fw.sbss_len = bce_COM_b06FwSbssLen;
2747 fw.sbss_index = 0;
2748 fw.sbss = bce_COM_b06FwSbss;
2749
2750 fw.bss_addr = bce_COM_b06FwBssAddr;
2751 fw.bss_len = bce_COM_b06FwBssLen;
2752 fw.bss_index = 0;
2753 fw.bss = bce_COM_b06FwBss;
2754
2755 fw.rodata_addr = bce_COM_b06FwRodataAddr;
2756 fw.rodata_len = bce_COM_b06FwRodataLen;
2757 fw.rodata_index = 0;
2758 fw.rodata = bce_COM_b06FwRodata;
2759
2760 DBPRINT(sc, BCE_INFO_RESET, "Loading COM firmware.\n");
2761 bce_load_cpu_fw(sc, &cpu_reg, &fw);
2762}
2763
2764
2765/****************************************************************************/
2766/* Initialize context memory. */
2767/* */
2768/* Clears the memory associated with each Context ID (CID). */
2769/* */
2770/* Returns: */
2771/* Nothing. */
2772/****************************************************************************/
2773static void
3a41a80b 2774bce_init_ctx(struct bce_softc *sc)
43c2aeb0 2775{
3a41a80b 2776 uint32_t vcid = 96;
43c2aeb0 2777
43c2aeb0
SZ
2778 while (vcid) {
2779 uint32_t vcid_addr, pcid_addr, offset;
3a41a80b 2780 int i;
43c2aeb0
SZ
2781
2782 vcid--;
2783
2784 vcid_addr = GET_CID_ADDR(vcid);
2785 pcid_addr = vcid_addr;
2786
3a41a80b
SZ
2787 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2788 vcid_addr += (i << PHY_CTX_SHIFT);
2789 pcid_addr += (i << PHY_CTX_SHIFT);
43c2aeb0 2790
3a41a80b
SZ
2791 REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
2792 REG_WR(sc, BCE_CTX_PAGE_TBL, pcid_addr);
43c2aeb0 2793
3a41a80b
SZ
2794 /* Zero out the context. */
2795 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2796 CTX_WR(sc, vcid_addr, offset, 0);
2797 }
43c2aeb0
SZ
2798 }
2799}
2800
2801
2802/****************************************************************************/
2803/* Fetch the permanent MAC address of the controller. */
2804/* */
2805/* Returns: */
2806/* Nothing. */
2807/****************************************************************************/
2808static void
2809bce_get_mac_addr(struct bce_softc *sc)
2810{
2811 uint32_t mac_lo = 0, mac_hi = 0;
2812
2813 /*
2814 * The NetXtreme II bootcode populates various NIC
2815 * power-on and runtime configuration items in a
2816 * shared memory area. The factory configured MAC
2817 * address is available from both NVRAM and the
2818 * shared memory area so we'll read the value from
2819 * shared memory for speed.
2820 */
2821
2822 mac_hi = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_UPPER);
2823 mac_lo = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_HW_CFG_MAC_LOWER);
2824
2825 if (mac_lo == 0 && mac_hi == 0) {
2826 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
2827 } else {
2828 sc->eaddr[0] = (u_char)(mac_hi >> 8);
2829 sc->eaddr[1] = (u_char)(mac_hi >> 0);
2830 sc->eaddr[2] = (u_char)(mac_lo >> 24);
2831 sc->eaddr[3] = (u_char)(mac_lo >> 16);
2832 sc->eaddr[4] = (u_char)(mac_lo >> 8);
2833 sc->eaddr[5] = (u_char)(mac_lo >> 0);
2834 }
2835
2836 DBPRINT(sc, BCE_INFO, "Permanent Ethernet address = %6D\n", sc->eaddr, ":");
2837}
2838
2839
2840/****************************************************************************/
2841/* Program the MAC address. */
2842/* */
2843/* Returns: */
2844/* Nothing. */
2845/****************************************************************************/
2846static void
2847bce_set_mac_addr(struct bce_softc *sc)
2848{
2849 const uint8_t *mac_addr = sc->eaddr;
2850 uint32_t val;
2851
2852 DBPRINT(sc, BCE_INFO, "Setting Ethernet address = %6D\n",
2853 sc->eaddr, ":");
2854
2855 val = (mac_addr[0] << 8) | mac_addr[1];
2856 REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
2857
2858 val = (mac_addr[2] << 24) |
2859 (mac_addr[3] << 16) |
2860 (mac_addr[4] << 8) |
2861 mac_addr[5];
2862 REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
2863}
2864
2865
2866/****************************************************************************/
2867/* Stop the controller. */
2868/* */
2869/* Returns: */
2870/* Nothing. */
2871/****************************************************************************/
2872static void
2873bce_stop(struct bce_softc *sc)
2874{
2875 struct ifnet *ifp = &sc->arpcom.ac_if;
2876 struct mii_data *mii = device_get_softc(sc->bce_miibus);
2877 struct ifmedia_entry *ifm;
2878 int mtmp, itmp;
2879
2880 ASSERT_SERIALIZED(ifp->if_serializer);
2881
2882 callout_stop(&sc->bce_stat_ch);
2883
2884 /* Disable the transmit/receive blocks. */
2885 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, 0x5ffffff);
2886 REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2887 DELAY(20);
2888
2889 bce_disable_intr(sc);
2890
2891 /* Tell firmware that the driver is going away. */
2892 bce_reset(sc, BCE_DRV_MSG_CODE_SUSPEND_NO_WOL);
2893
2894 /* Free the RX lists. */
2895 bce_free_rx_chain(sc);
2896
2897 /* Free TX buffers. */
2898 bce_free_tx_chain(sc);
2899
2900 /*
2901 * Isolate/power down the PHY, but leave the media selection
2902 * unchanged so that things will be put back to normal when
2903 * we bring the interface back up.
db1e7fc4
SZ
2904 *
2905 * 'mii' may be NULL if bce_stop() is called by bce_detach().
43c2aeb0 2906 */
db1e7fc4
SZ
2907 if (mii != NULL) {
2908 itmp = ifp->if_flags;
2909 ifp->if_flags |= IFF_UP;
2910 ifm = mii->mii_media.ifm_cur;
2911 mtmp = ifm->ifm_media;
2912 ifm->ifm_media = IFM_ETHER | IFM_NONE;
2913 mii_mediachg(mii);
2914 ifm->ifm_media = mtmp;
2915 ifp->if_flags = itmp;
2916 }
43c2aeb0
SZ
2917
2918 sc->bce_link = 0;
bdeb8fff 2919 sc->bce_coalchg_mask = 0;
43c2aeb0
SZ
2920
2921 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2922 ifp->if_timer = 0;
2923
2924 bce_mgmt_init(sc);
2925}
2926
2927
2928static int
2929bce_reset(struct bce_softc *sc, uint32_t reset_code)
2930{
2931 uint32_t val;
2932 int i, rc = 0;
2933
2934 /* Wait for pending PCI transactions to complete. */
2935 REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
2936 BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
2937 BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
2938 BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
2939 BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
2940 val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
2941 DELAY(5);
2942
2943 /* Assume bootcode is running. */
2944 sc->bce_fw_timed_out = 0;
2945
2946 /* Give the firmware a chance to prepare for the reset. */
2947 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
2948 if (rc) {
2949 if_printf(&sc->arpcom.ac_if,
2950 "Firmware is not ready for reset\n");
2951 return rc;
2952 }
2953
2954 /* Set a firmware reminder that this is a soft reset. */
2955 REG_WR_IND(sc, sc->bce_shmem_base + BCE_DRV_RESET_SIGNATURE,
2956 BCE_DRV_RESET_SIGNATURE_MAGIC);
2957
2958 /* Dummy read to force the chip to complete all current transactions. */
2959 val = REG_RD(sc, BCE_MISC_ID);
2960
2961 /* Chip reset. */
2962 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2963 BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
2964 BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
2965 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
2966
2967 /* Allow up to 30us for reset to complete. */
2968 for (i = 0; i < 10; i++) {
2969 val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
2970 if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2971 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
2972 break;
2973 }
2974 DELAY(10);
2975 }
2976
2977 /* Check that reset completed successfully. */
2978 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
2979 BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
2980 if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
2981 return EBUSY;
2982 }
2983
2984 /* Make sure byte swapping is properly configured. */
2985 val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
2986 if (val != 0x01020304) {
2987 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
2988 return ENODEV;
2989 }
2990
2991 /* Just completed a reset, assume that firmware is running again. */
2992 sc->bce_fw_timed_out = 0;
2993
2994 /* Wait for the firmware to finish its initialization. */
2995 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
2996 if (rc) {
2997 if_printf(&sc->arpcom.ac_if,
2998 "Firmware did not complete initialization!\n");
2999 }
3000 return rc;
3001}
3002
3003
3004static int
3005bce_chipinit(struct bce_softc *sc)
3006{
3007 uint32_t val;
3008 int rc = 0;
3009
3010 /* Make sure the interrupt is not active. */
3011 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3012
3013 /*
3014 * Initialize DMA byte/word swapping, configure the number of DMA
3015 * channels and PCI clock compensation delay.
3016 */
3017 val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3018 BCE_DMA_CONFIG_DATA_WORD_SWAP |
3019#if BYTE_ORDER == BIG_ENDIAN
3020 BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3021#endif
3022 BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3023 DMA_READ_CHANS << 12 |
3024 DMA_WRITE_CHANS << 16;
3025
3026 val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3027
3028 if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3029 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3030
3031 /*
3032 * This setting resolves a problem observed on certain Intel PCI
3033 * chipsets that cannot handle multiple outstanding DMA operations.
3034 * See errata E9_5706A1_65.
3035 */
3036 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3037 BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3038 !(sc->bce_flags & BCE_PCIX_FLAG))
3039 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3040
3041 REG_WR(sc, BCE_DMA_CONFIG, val);
3042
3043 /* Clear the PCI-X relaxed ordering bit. See errata E3_5708CA0_570. */
3044 if (sc->bce_flags & BCE_PCIX_FLAG) {
3045 uint16_t cmd;
3046
3047 cmd = pci_read_config(sc->bce_dev, BCE_PCI_PCIX_CMD, 2);
3048 pci_write_config(sc->bce_dev, BCE_PCI_PCIX_CMD, cmd & ~0x2, 2);
3049 }
3050
3051 /* Enable the RX_V2P and Context state machines before access. */
3052 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3053 BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3054 BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3055 BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3056
3057 /* Initialize context mapping and zero out the quick contexts. */
3a41a80b 3058 bce_init_ctx(sc);
43c2aeb0
SZ
3059
3060 /* Initialize the on-boards CPUs */
3061 bce_init_cpus(sc);
3062
3063 /* Prepare NVRAM for access. */
3064 rc = bce_init_nvram(sc);
3065 if (rc != 0)
3066 return rc;
3067
3068 /* Set the kernel bypass block size */
3069 val = REG_RD(sc, BCE_MQ_CONFIG);
3070 val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3071 val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3072 REG_WR(sc, BCE_MQ_CONFIG, val);
3073
3074 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3075 REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3076 REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3077
3078 /* Set the page size and clear the RV2P processor stall bits. */
3079 val = (BCM_PAGE_BITS - 8) << 24;
3080 REG_WR(sc, BCE_RV2P_CONFIG, val);
3081
3082 /* Configure page size. */
3083 val = REG_RD(sc, BCE_TBDR_CONFIG);
3084 val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3085 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3086 REG_WR(sc, BCE_TBDR_CONFIG, val);
3087
3088 return 0;
3089}
3090
3091
3092/****************************************************************************/
3093/* Initialize the controller in preparation to send/receive traffic. */
3094/* */
3095/* Returns: */
3096/* 0 for success, positive value for failure. */
3097/****************************************************************************/
3098static int
3099bce_blockinit(struct bce_softc *sc)
3100{
3101 uint32_t reg, val;
3102 int rc = 0;
3103
3104 /* Load the hardware default MAC address. */
3105 bce_set_mac_addr(sc);
3106
3107 /* Set the Ethernet backoff seed value */
3108 val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3109 sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3110 REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3111
3112 sc->last_status_idx = 0;
3113 sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3114
3115 /* Set up link change interrupt generation. */
3116 REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3117
3118 /* Program the physical address of the status block. */
3119 REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3120 REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3121
3122 /* Program the physical address of the statistics block. */
3123 REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3124 BCE_ADDR_LO(sc->stats_block_paddr));
3125 REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3126 BCE_ADDR_HI(sc->stats_block_paddr));
3127
3128 /* Program various host coalescing parameters. */
3129 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3130 (sc->bce_tx_quick_cons_trip_int << 16) |
3131 sc->bce_tx_quick_cons_trip);
3132 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3133 (sc->bce_rx_quick_cons_trip_int << 16) |
3134 sc->bce_rx_quick_cons_trip);
3135 REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3136 (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3137 REG_WR(sc, BCE_HC_TX_TICKS,
3138 (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3139 REG_WR(sc, BCE_HC_RX_TICKS,
3140 (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3141 REG_WR(sc, BCE_HC_COM_TICKS,
3142 (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3143 REG_WR(sc, BCE_HC_CMD_TICKS,
3144 (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3145 REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3146 REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3147 REG_WR(sc, BCE_HC_CONFIG,
43c2aeb0
SZ
3148 BCE_HC_CONFIG_TX_TMR_MODE |
3149 BCE_HC_CONFIG_COLLECT_STATS);
3150
3151 /* Clear the internal statistics counters. */
3152 REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3153
3154 /* Verify that bootcode is running. */
3155 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_SIGNATURE);
3156
3157 DBRUNIF(DB_RANDOMTRUE(bce_debug_bootcode_running_failure),
3158 if_printf(&sc->arpcom.ac_if,
3159 "%s(%d): Simulating bootcode failure.\n",
3160 __FILE__, __LINE__);
3161 reg = 0);
3162
3163 if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3164 BCE_DEV_INFO_SIGNATURE_MAGIC) {
3165 if_printf(&sc->arpcom.ac_if,
3166 "Bootcode not running! Found: 0x%08X, "
3167 "Expected: 08%08X\n",
3168 reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3169 BCE_DEV_INFO_SIGNATURE_MAGIC);
3170 return ENODEV;
3171 }
3172
3173 /* Check if any management firmware is running. */
3174 reg = REG_RD_IND(sc, sc->bce_shmem_base + BCE_PORT_FEATURE);
3175 if (reg & (BCE_PORT_FEATURE_ASF_ENABLED |
3176 BCE_PORT_FEATURE_IMD_ENABLED)) {
3177 DBPRINT(sc, BCE_INFO, "Management F/W Enabled.\n");
3178 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
3179 }
3180
3181 sc->bce_fw_ver =
3182 REG_RD_IND(sc, sc->bce_shmem_base + BCE_DEV_INFO_BC_REV);
3183 DBPRINT(sc, BCE_INFO, "bootcode rev = 0x%08X\n", sc->bce_fw_ver);
3184
3185 /* Allow bootcode to apply any additional fixes before enabling MAC. */
3186 rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3187
3188 /* Enable link state change interrupt generation. */
3189 REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3190
3191 /* Enable all remaining blocks in the MAC. */
3192 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, 0x5ffffff);
3193 REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3194 DELAY(20);
3195
3196 return 0;
3197}
3198
3199
3200/****************************************************************************/
3201/* Encapsulate an mbuf cluster into the rx_bd chain. */
3202/* */
3203/* The NetXtreme II can support Jumbo frames by using multiple rx_bd's. */
3204/* This routine will map an mbuf cluster into 1 or more rx_bd's as */
3205/* necessary. */
3206/* */
3207/* Returns: */
3208/* 0 for success, positive value for failure. */
3209/****************************************************************************/
3210static int
c36fd9ee
SZ
3211bce_newbuf_std(struct bce_softc *sc, uint16_t *prod, uint16_t *chain_prod,
3212 uint32_t *prod_bseq, int init)
43c2aeb0
SZ
3213{
3214 bus_dmamap_t map;
43c2aeb0
SZ
3215 bus_dma_segment_t seg;
3216 struct mbuf *m_new;
c36fd9ee 3217 int error, nseg;
43c2aeb0
SZ
3218#ifdef BCE_DEBUG
3219 uint16_t debug_chain_prod = *chain_prod;
3220#endif
3221
3222 /* Make sure the inputs are valid. */
3223 DBRUNIF((*chain_prod > MAX_RX_BD),
3224 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3225 "RX producer out of range: 0x%04X > 0x%04X\n",
3226 __FILE__, __LINE__,
3227 *chain_prod, (uint16_t)MAX_RX_BD));
3228
3229 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(enter): prod = 0x%04X, chain_prod = 0x%04X, "
3230 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3231
c36fd9ee
SZ
3232 DBRUNIF(DB_RANDOMTRUE(bce_debug_mbuf_allocation_failure),
3233 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3234 "Simulating mbuf allocation failure.\n",
3235 __FILE__, __LINE__);
3236 sc->mbuf_alloc_failed++;
3237 return ENOBUFS);
43c2aeb0 3238
c36fd9ee
SZ
3239 /* This is a new mbuf allocation. */
3240 m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3241 if (m_new == NULL)
3242 return ENOBUFS;
3243 DBRUNIF(1, sc->rx_mbuf_alloc++);
43c2aeb0 3244
c36fd9ee 3245 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
43c2aeb0 3246
c36fd9ee
SZ
3247 /* Map the mbuf cluster into device memory. */
3248 error = bus_dmamap_load_mbuf_segment(sc->rx_mbuf_tag,
3249 sc->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg,
3250 BUS_DMA_NOWAIT);
3251 if (error) {
3252 m_freem(m_new);
3253 if (init) {
3254 if_printf(&sc->arpcom.ac_if,
3255 "Error mapping mbuf into RX chain!\n");
3256 }
43c2aeb0 3257 DBRUNIF(1, sc->rx_mbuf_alloc--);
c36fd9ee 3258 return error;
43c2aeb0
SZ
3259 }
3260
c36fd9ee
SZ
3261 if (sc->rx_mbuf_ptr[*chain_prod] != NULL) {
3262 bus_dmamap_unload(sc->rx_mbuf_tag,
3263 sc->rx_mbuf_map[*chain_prod]);
3264 }
3265
3266 map = sc->rx_mbuf_map[*chain_prod];
3267 sc->rx_mbuf_map[*chain_prod] = sc->rx_mbuf_tmpmap;
3268 sc->rx_mbuf_tmpmap = map;
3269
43c2aeb0
SZ
3270 /* Watch for overflow. */
3271 DBRUNIF((sc->free_rx_bd > USABLE_RX_BD),
3272 if_printf(&sc->arpcom.ac_if, "%s(%d): "
3273 "Too many free rx_bd (0x%04X > 0x%04X)!\n",
3274 __FILE__, __LINE__, sc->free_rx_bd,
3275 (uint16_t)USABLE_RX_BD));
3276
3277 /* Update some debug statistic counters */
3278 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3279 sc->rx_low_watermark = sc->free_rx_bd);
3280 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3281
43c2aeb0
SZ
3282 /* Save the mbuf and update our counter. */
3283 sc->rx_mbuf_ptr[*chain_prod] = m_new;
314a2fcc 3284 sc->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
43c2aeb0
SZ
3285 sc->free_rx_bd--;
3286
314a2fcc
SZ
3287 bce_setup_rxdesc_std(sc, *chain_prod, prod_bseq);
3288
43c2aeb0
SZ
3289 DBRUN(BCE_VERBOSE_RECV,
3290 bce_dump_rx_mbuf_chain(sc, debug_chain_prod, 1));
3291
3292 DBPRINT(sc, BCE_VERBOSE_RECV, "%s(exit): prod = 0x%04X, chain_prod = 0x%04X, "
3293 "prod_bseq = 0x%08X\n", __func__, *prod, *chain_prod, *prod_bseq);
3294
3295 return 0;
3296}
3297
3298
314a2fcc
SZ
3299static void
3300bce_setup_rxdesc_std(struct bce_softc *sc, uint16_t chain_prod, uint32_t *prod_bseq)
3301{
3302 struct rx_bd *rxbd;
3303 bus_addr_t paddr;
3304 int len;
3305
3306 paddr = sc->rx_mbuf_paddr[chain_prod];
3307 len = sc->rx_mbuf_ptr[chain_prod]->m_len;
3308
3309 /* Setup the rx_bd for the first segment. */
3310 rxbd = &sc->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3311
3312 rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3313 rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3314 rxbd->rx_bd_len = htole32(len);
3315 rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3316 *prod_bseq += len;
3317
3318 rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3319}
3320
3321
43c2aeb0
SZ
3322/****************************************************************************/
3323/* Allocate memory and initialize the TX data structures. */
3324/* */
3325/* Returns: */
3326/* 0 for success, positive value for failure. */
3327/****************************************************************************/
3328static int
3329bce_init_tx_chain(struct bce_softc *sc)
3330{
3331 struct tx_bd *txbd;
3332 uint32_t val;
3333 int i, rc = 0;
3334
3335 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3336
3337 /* Set the initial TX producer/consumer indices. */
3338 sc->tx_prod = 0;
3339 sc->tx_cons = 0;
3340 sc->tx_prod_bseq = 0;
3341 sc->used_tx_bd = 0;
3342 sc->max_tx_bd = USABLE_TX_BD;
3343 DBRUNIF(1, sc->tx_hi_watermark = USABLE_TX_BD);
3344 DBRUNIF(1, sc->tx_full_count = 0);
3345
3346 /*
3347 * The NetXtreme II supports a linked-list structre called
3348 * a Buffer Descriptor Chain (or BD chain). A BD chain
3349 * consists of a series of 1 or more chain pages, each of which
3350 * consists of a fixed number of BD entries.
3351 * The last BD entry on each page is a pointer to the next page
3352 * in the chain, and the last pointer in the BD chain
3353 * points back to the beginning of the chain.
3354 */
3355
3356 /* Set the TX next pointer chain entries. */
3357 for (i = 0; i < TX_PAGES; i++) {
3358 int j;
3359
3360 txbd = &sc->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3361
3362 /* Check if we've reached the last page. */
3363 if (i == (TX_PAGES - 1))
3364 j = 0;
3365 else
3366 j = i + 1;
3367
3368 txbd->tx_bd_haddr_hi =
3369 htole32(BCE_ADDR_HI(sc->tx_bd_chain_paddr[j]));
3370 txbd->tx_bd_haddr_lo =
3371 htole32(BCE_ADDR_LO(sc->tx_bd_chain_paddr[j]));
3372 }
3373
43c2aeb0
SZ
3374 /* Initialize the context ID for an L2 TX chain. */
3375 val = BCE_L2CTX_TYPE_TYPE_L2;
3376 val |= BCE_L2CTX_TYPE_SIZE_L2;
3377 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TYPE, val);
3378
3379 val = BCE_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3380 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_CMD_TYPE, val);
3381
3382 /* Point the hardware to the first page in the chain. */
3383 val = BCE_ADDR_HI(sc->tx_bd_chain_paddr[0]);
3384 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_HI, val);
3385 val = BCE_ADDR_LO(sc->tx_bd_chain_paddr[0]);
3386 CTX_WR(sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TBDR_BHADDR_LO, val);
3387
3388 DBRUN(BCE_VERBOSE_SEND, bce_dump_tx_chain(sc, 0, TOTAL_TX_BD));
3389
3390 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3391
3392 return(rc);
3393}
3394
3395
3396/****************************************************************************/
3397/* Free memory and clear the TX data structures. */
3398/* */
3399/* Returns: */
3400/* Nothing. */
3401/****************************************************************************/
3402static void
3403bce_free_tx_chain(struct bce_softc *sc)
3404{
3405 int i;
3406
3407 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3408
3409 /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3410 for (i = 0; i < TOTAL_TX_BD; i++) {
3411 if (sc->tx_mbuf_ptr[i] != NULL) {
43c2aeb0
SZ
3412 bus_dmamap_unload(sc->tx_mbuf_tag, sc->tx_mbuf_map[i]);
3413 m_freem(sc->tx_mbuf_ptr[i]);
3414 sc->tx_mbuf_ptr[i] = NULL;
3415 DBRUNIF(1, sc->tx_mbuf_alloc--);
3416 }
3417 }
3418
3419 /* Clear each TX chain page. */
3420 for (i = 0; i < TX_PAGES; i++)
3421 bzero(sc->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
24603545 3422 sc->used_tx_bd = 0;
43c2aeb0
SZ
3423
3424 /* Check if we lost any mbufs in the process. */
3425 DBRUNIF((sc->tx_mbuf_alloc),
3426 if_printf(&sc->arpcom.ac_if,
3427 "%s(%d): Memory leak! "
3428 "Lost %d mbufs from tx chain!\n",
3429 __FILE__, __LINE__, sc->tx_mbuf_alloc));
3430
3431 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3432}
3433
3434
3435/****************************************************************************/
3436/* Allocate memory and initialize the RX data structures. */
3437/* */
3438/* Returns: */
3439/* 0 for success, positive value for failure. */
3440/****************************************************************************/
3441static int
3442bce_init_rx_chain(struct bce_softc *sc)
3443{
3444 struct rx_bd *rxbd;
3445 int i, rc = 0;
3446 uint16_t prod, chain_prod;
3447 uint32_t prod_bseq, val;
3448
3449 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3450
3451 /* Initialize the RX producer and consumer indices. */
3452 sc->rx_prod = 0;
3453 sc->rx_cons = 0;
3454 sc->rx_prod_bseq = 0;
3455 sc->free_rx_bd = USABLE_RX_BD;
3456 sc->max_rx_bd = USABLE_RX_BD;
3457 DBRUNIF(1, sc->rx_low_watermark = USABLE_RX_BD);
3458 DBRUNIF(1, sc->rx_empty_count = 0);
3459
3460 /* Initialize the RX next pointer chain entries. */
3461 for (i = 0; i < RX_PAGES; i++) {
3462 int j;
3463
3464 rxbd = &sc->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
3465
3466 /* Check if we've reached the last page. */
3467 if (i == (RX_PAGES - 1))
3468 j = 0;
3469 else
3470 j = i + 1;
3471
3472 /* Setup the chain page pointers. */
3473 rxbd->rx_bd_haddr_hi =
3474 htole32(BCE_ADDR_HI(sc->rx_bd_chain_paddr[j]));
3475 rxbd->rx_bd_haddr_lo =
3476 htole32(BCE_ADDR_LO(sc->rx_bd_chain_paddr[j]));
3477 }
3478
3479 /* Initialize the context ID for an L2 RX chain. */
3480 val = BCE_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3481 val |= BCE_L2CTX_CTX_TYPE_SIZE_L2;
3482 val |= 0x02 << 8;
3483 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_CTX_TYPE, val);
3484
3485 /* Point the hardware to the first page in the chain. */
3486 /* XXX shouldn't this after RX descriptor initialization? */
3487 val = BCE_ADDR_HI(sc->rx_bd_chain_paddr[0]);
3488 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_HI, val);
3489 val = BCE_ADDR_LO(sc->rx_bd_chain_paddr[0]);
3490 CTX_WR(sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_NX_BDHADDR_LO, val);
3491
3492 /* Allocate mbuf clusters for the rx_bd chain. */
3493 prod = prod_bseq = 0;
3494 while (prod < TOTAL_RX_BD) {
3495 chain_prod = RX_CHAIN_IDX(prod);
c36fd9ee 3496 if (bce_newbuf_std(sc, &prod, &chain_prod, &prod_bseq, 1)) {
43c2aeb0
SZ
3497 if_printf(&sc->arpcom.ac_if,
3498 "Error filling RX chain: rx_bd[0x%04X]!\n",
3499 chain_prod);
3500 rc = ENOBUFS;
3501 break;
3502 }
3503 prod = NEXT_RX_BD(prod);
3504 }
3505
3506 /* Save the RX chain producer index. */
3507 sc->rx_prod = prod;
3508 sc->rx_prod_bseq = prod_bseq;
3509
43c2aeb0
SZ
3510 /* Tell the chip about the waiting rx_bd's. */
3511 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3512 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3513
3514 DBRUN(BCE_VERBOSE_RECV, bce_dump_rx_chain(sc, 0, TOTAL_RX_BD));
3515
3516 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3517
3518 return(rc);
3519}
3520
3521
3522/****************************************************************************/
3523/* Free memory and clear the RX data structures. */
3524/* */
3525/* Returns: */
3526/* Nothing. */
3527/****************************************************************************/
3528static void
3529bce_free_rx_chain(struct bce_softc *sc)
3530{
3531 int i;
3532
3533 DBPRINT(sc, BCE_VERBOSE_RESET, "Entering %s()\n", __func__);
3534
3535 /* Free any mbufs still in the RX mbuf chain. */
3536 for (i = 0; i < TOTAL_RX_BD; i++) {
3537 if (sc->rx_mbuf_ptr[i] != NULL) {
43c2aeb0
SZ
3538 bus_dmamap_unload(sc->rx_mbuf_tag, sc->rx_mbuf_map[i]);
3539 m_freem(sc->rx_mbuf_ptr[i]);
3540 sc->rx_mbuf_ptr[i] = NULL;
3541 DBRUNIF(1, sc->rx_mbuf_alloc--);
3542 }
3543 }
3544
3545 /* Clear each RX chain page. */
3546 for (i = 0; i < RX_PAGES; i++)
3547 bzero(sc->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
3548
3549 /* Check if we lost any mbufs in the process. */
3550 DBRUNIF((sc->rx_mbuf_alloc),
3551 if_printf(&sc->arpcom.ac_if,
3552 "%s(%d): Memory leak! "
3553 "Lost %d mbufs from rx chain!\n",
3554 __FILE__, __LINE__, sc->rx_mbuf_alloc));
3555
3556 DBPRINT(sc, BCE_VERBOSE_RESET, "Exiting %s()\n", __func__);
3557}
3558
3559
3560/****************************************************************************/
3561/* Set media options. */
3562/* */
3563/* Returns: */
3564/* 0 for success, positive value for failure. */
3565/****************************************************************************/
3566static int
3567bce_ifmedia_upd(struct ifnet *ifp)
3568{
3569 struct bce_softc *sc = ifp->if_softc;
3570 struct mii_data *mii = device_get_softc(sc->bce_miibus);
3571
3572 /*
3573 * 'mii' will be NULL, when this function is called on following
3574 * code path: bce_attach() -> bce_mgmt_init()
3575 */
3576 if (mii != NULL) {
3577 /* Make sure the MII bus has been enumerated. */
3578 sc->bce_link = 0;
3579 if (mii->mii_instance) {
3580 struct mii_softc *miisc;
3581
3582 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
3583 mii_phy_reset(miisc);
3584 }
3585 mii_mediachg(mii);
3586 }
3587 return 0;
3588}
3589
3590
3591/****************************************************************************/
3592/* Reports current media status. */
3593/* */
3594/* Returns: */
3595/* Nothing. */
3596/****************************************************************************/
3597static void
3598bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
3599{
3600 struct bce_softc *sc = ifp->if_softc;
3601 struct mii_data *mii = device_get_softc(sc->bce_miibus);
3602
3603 mii_pollstat(mii);
3604 ifmr->ifm_active = mii->mii_media_active;
3605 ifmr->ifm_status = mii->mii_media_status;
3606}
3607
3608
3609/****************************************************************************/
3610/* Handles PHY generated interrupt events. */
3611/* */
3612/* Returns: */
3613/* Nothing. */
3614/****************************************************************************/
3615static void
3616bce_phy_intr(struct bce_softc *sc)
3617{
3618 uint32_t new_link_state, old_link_state;
3619 struct ifnet *ifp = &sc->arpcom.ac_if;
3620
3621 ASSERT_SERIALIZED(ifp->if_serializer);
3622
3623 new_link_state = sc->status_block->status_attn_bits &
3624 STATUS_ATTN_BITS_LINK_STATE;
3625 old_link_state = sc->status_block->status_attn_bits_ack &
3626 STATUS_ATTN_BITS_LINK_STATE;
3627
3628 /* Handle any changes if the link state has changed. */
3629 if (new_link_state != old_link_state) { /* XXX redundant? */
3630 DBRUN(BCE_VERBOSE_INTR, bce_dump_status_block(sc));
3631
3632 sc->bce_link = 0;
3633 callout_stop(&sc->bce_stat_ch);
3634 bce_tick_serialized(sc);
3635
3636 /* Update the status_attn_bits_ack field in the status block. */
3637 if (new_link_state) {
3638 REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
3639 STATUS_ATTN_BITS_LINK_STATE);
3640 if (bootverbose)
3641 if_printf(ifp, "Link is now UP.\n");
3642 } else {
3643 REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
3644 STATUS_ATTN_BITS_LINK_STATE);
3645 if (bootverbose)
3646 if_printf(ifp, "Link is now DOWN.\n");
3647 }
3648 }
3649
3650 /* Acknowledge the link change interrupt. */
3651 REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
3652}
3653
3654
24603545
SZ
3655/****************************************************************************/
3656/* Reads the receive consumer value from the status block (skipping over */
3657/* chain page pointer if necessary). */
3658/* */
3659/* Returns: */
3660/* hw_cons */
3661/****************************************************************************/
3662static __inline uint16_t
3663bce_get_hw_rx_cons(struct bce_softc *sc)
3664{
3665 uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
3666
3667 if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
3668 hw_cons++;
3669 return hw_cons;
3670}
3671
3672
43c2aeb0
SZ
3673/****************************************************************************/
3674/* Handles received frame interrupt events. */
3675/* */
3676/* Returns: */
3677/* Nothing. */
3678/****************************************************************************/
3679static void
3680bce_rx_intr(struct bce_softc *sc, int count)
3681{
43c2aeb0
SZ
3682 struct ifnet *ifp = &sc->arpcom.ac_if;
3683 uint16_t hw_cons, sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
3684 uint32_t sw_prod_bseq;
77a7ee7d 3685 struct mbuf_chain chain[MAXCPU];
43c2aeb0
SZ
3686
3687 ASSERT_SERIALIZED(ifp->if_serializer);
3688
77a7ee7d 3689 ether_input_chain_init(chain);
77a7ee7d 3690
43c2aeb0
SZ
3691 DBRUNIF(1, sc->rx_interrupts++);
3692
43c2aeb0 3693 /* Get the hardware's view of the RX consumer index. */
24603545 3694 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
43c2aeb0
SZ
3695
3696 /* Get working copies of the driver's view of the RX indices. */
3697 sw_cons = sc->rx_cons;
3698 sw_prod = sc->rx_prod;
3699 sw_prod_bseq = sc->rx_prod_bseq;
3700
3701 DBPRINT(sc, BCE_INFO_RECV, "%s(enter): sw_prod = 0x%04X, "
3702 "sw_cons = 0x%04X, sw_prod_bseq = 0x%08X\n",
3703 __func__, sw_prod, sw_cons, sw_prod_bseq);
3704
3705 /* Prevent speculative reads from getting ahead of the status block. */
3706 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3707 BUS_SPACE_BARRIER_READ);
3708
3709 /* Update some debug statistics counters */
3710 DBRUNIF((sc->free_rx_bd < sc->rx_low_watermark),
3711 sc->rx_low_watermark = sc->free_rx_bd);
3712 DBRUNIF((sc->free_rx_bd == 0), sc->rx_empty_count++);
3713
3714 /* Scan through the receive chain as long as there is work to do. */
3715 while (sw_cons != hw_cons) {
3716 struct mbuf *m = NULL;
3717 struct l2_fhdr *l2fhdr = NULL;
3718 struct rx_bd *rxbd;
3719 unsigned int len;
3720 uint32_t status = 0;
3721
5205bd03
SZ
3722#ifdef DEVICE_POLLING
3723 if (count >= 0 && count-- == 0) {
3724 sc->hw_rx_cons = sw_cons;
43c2aeb0 3725 break;
5205bd03 3726 }
43c2aeb0
SZ
3727#endif
3728
3729 /*
3730 * Convert the producer/consumer indices
3731 * to an actual rx_bd index.
3732 */
3733 sw_chain_cons = RX_CHAIN_IDX(sw_cons);
3734 sw_chain_prod = RX_CHAIN_IDX(sw_prod);
3735
3736 /* Get the used rx_bd. */
3737 rxbd = &sc->rx_bd_chain[RX_PAGE(sw_chain_cons)]
3738 [RX_IDX(sw_chain_cons)];
3739 sc->free_rx_bd++;
c36fd9ee 3740
43c2aeb0
SZ
3741 DBRUN(BCE_VERBOSE_RECV,
3742 if_printf(ifp, "%s(): ", __func__);
3743 bce_dump_rxbd(sc, sw_chain_cons, rxbd));
3744
3745 /* The mbuf is stored with the last rx_bd entry of a packet. */
3746 if (sc->rx_mbuf_ptr[sw_chain_cons] != NULL) {
3747 /* Validate that this is the last rx_bd. */
3748 DBRUNIF((!(rxbd->rx_bd_flags & RX_BD_FLAGS_END)),
3749 if_printf(ifp, "%s(%d): "
3750 "Unexpected mbuf found in rx_bd[0x%04X]!\n",
3751 __FILE__, __LINE__, sw_chain_cons);
3752 bce_breakpoint(sc));
3753
c36fd9ee
SZ
3754 if (sw_chain_cons != sw_chain_prod) {
3755 if_printf(ifp, "RX cons(%d) != prod(%d), "
3756 "drop!\n", sw_chain_cons,
3757 sw_chain_prod);
3758 ifp->if_ierrors++;
3759
3760 bce_setup_rxdesc_std(sc, sw_chain_cons,
3761 &sw_prod_bseq);
3762 m = NULL;
3763 goto bce_rx_int_next_rx;
3764 }
43c2aeb0
SZ
3765
3766 /* Unmap the mbuf from DMA space. */
3767 bus_dmamap_sync(sc->rx_mbuf_tag,
3768 sc->rx_mbuf_map[sw_chain_cons],
3769 BUS_DMASYNC_POSTREAD);
43c2aeb0 3770
c36fd9ee 3771 /* Save the mbuf from the driver's chain. */
43c2aeb0 3772 m = sc->rx_mbuf_ptr[sw_chain_cons];
43c2aeb0
SZ
3773
3774 /*
3775 * Frames received on the NetXteme II are prepended
3776 * with an l2_fhdr structure which provides status
3777 * information about the received frame (including
3778 * VLAN tags and checksum info). The frames are also
3779 * automatically adjusted to align the IP header
3780 * (i.e. two null bytes are inserted before the
3781 * Ethernet header).
3782 */
3783 l2fhdr = mtod(m, struct l2_fhdr *);
3784
3785 len = l2fhdr->l2_fhdr_pkt_len;
3786 status = l2fhdr->l2_fhdr_status;
3787
3788 DBRUNIF(DB_RANDOMTRUE(bce_debug_l2fhdr_status_check),
3789 if_printf(ifp,
3790 "Simulating l2_fhdr status error.\n");
3791 status = status | L2_FHDR_ERRORS_PHY_DECODE);
3792
3793 /* Watch for unusual sized frames. */
3794 DBRUNIF((len < BCE_MIN_MTU ||
3795 len > BCE_MAX_JUMBO_ETHER_MTU_VLAN),
3796 if_printf(ifp,
3797 "%s(%d): Unusual frame size found. "
3798 "Min(%d), Actual(%d), Max(%d)\n",
3799 __FILE__, __LINE__,
3800 (int)BCE_MIN_MTU, len,
3801 (int)BCE_MAX_JUMBO_ETHER_MTU_VLAN);
3802 bce_dump_mbuf(sc, m);
3803 bce_breakpoint(sc));
3804
3805 len -= ETHER_CRC_LEN;
3806
3807 /* Check the received frame for errors. */
3808 if (status & (L2_FHDR_ERRORS_BAD_CRC |
3809 L2_FHDR_ERRORS_PHY_DECODE |
3810 L2_FHDR_ERRORS_ALIGNMENT |
3811 L2_FHDR_ERRORS_TOO_SHORT |
3812 L2_FHDR_ERRORS_GIANT_FRAME)) {
3813 ifp->if_ierrors++;
3814 DBRUNIF(1, sc->l2fhdr_status_errors++);
3815
3816 /* Reuse the mbuf for a new frame. */
c36fd9ee
SZ
3817 bce_setup_rxdesc_std(sc, sw_chain_prod,
3818 &sw_prod_bseq);
43c2aeb0
SZ
3819 m = NULL;
3820 goto bce_rx_int_next_rx;
3821 }
3822
3823 /*
3824 * Get a new mbuf for the rx_bd. If no new
3825 * mbufs are available then reuse the current mbuf,
3826 * log an ierror on the interface, and generate
3827 * an error in the system log.
3828 */
c36fd9ee
SZ
3829 if (bce_newbuf_std(sc, &sw_prod, &sw_chain_prod,
3830 &sw_prod_bseq, 0)) {
43c2aeb0
SZ
3831 DBRUN(BCE_WARN,
3832 if_printf(ifp,
3833 "%s(%d): Failed to allocate new mbuf, "
3834 "incoming frame dropped!\n",
3835 __FILE__, __LINE__));
3836
3837 ifp->if_ierrors++;
3838
3839 /* Try and reuse the exisitng mbuf. */
c36fd9ee
SZ
3840 bce_setup_rxdesc_std(sc, sw_chain_prod,
3841 &sw_prod_bseq);
43c2aeb0
SZ
3842 m = NULL;
3843 goto bce_rx_int_next_rx;
3844 }
3845
3846 /*
3847 * Skip over the l2_fhdr when passing
3848 * the data up the stack.
3849 */
3850 m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
3851
3852 m->m_pkthdr.len = m->m_len = len;
3853 m->m_pkthdr.rcvif = ifp;
3854
3855 DBRUN(BCE_VERBOSE_RECV,
3856 struct ether_header *eh;
3857 eh = mtod(m, struct ether_header *);
3858 if_printf(ifp, "%s(): to: %6D, from: %6D, "
3859 "type: 0x%04X\n", __func__,
3860 eh->ether_dhost, ":",
3861 eh->ether_shost, ":",
3862 htons(eh->ether_type)));
3863
3864 /* Validate the checksum if offload enabled. */
3865 if (ifp->if_capenable & IFCAP_RXCSUM) {
3866 /* Check for an IP datagram. */
3867 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
3868 m->m_pkthdr.csum_flags |=
3869 CSUM_IP_CHECKED;
3870
3871 /* Check if the IP checksum is valid. */
3872 if ((l2fhdr->l2_fhdr_ip_xsum ^
3873 0xffff) == 0) {
3874 m->m_pkthdr.csum_flags |=
3875 CSUM_IP_VALID;
3876 } else {
3877 DBPRINT(sc, BCE_WARN_RECV,
3878 "%s(): Invalid IP checksum = 0x%04X!\n",
3879 __func__, l2fhdr->l2_fhdr_ip_xsum);
3880 }
3881 }
3882
3883 /* Check for a valid TCP/UDP frame. */
3884 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3885 L2_FHDR_STATUS_UDP_DATAGRAM)) {
3886
3887 /* Check for a good TCP/UDP checksum. */
3888 if ((status &
3889 (L2_FHDR_ERRORS_TCP_XSUM |
3890 L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
3891 m->m_pkthdr.csum_data =
3892 l2fhdr->l2_fhdr_tcp_udp_xsum;
3893 m->m_pkthdr.csum_flags |=
3894 CSUM_DATA_VALID |
3895 CSUM_PSEUDO_HDR;
3896 } else {
3897 DBPRINT(sc, BCE_WARN_RECV,
3898 "%s(): Invalid TCP/UDP checksum = 0x%04X!\n",
3899 __func__, l2fhdr->l2_fhdr_tcp_udp_xsum);
3900 }
3901 }
3902 }
3903
3904 ifp->if_ipackets++;
3905bce_rx_int_next_rx:
3906 sw_prod = NEXT_RX_BD(sw_prod);
3907 }
3908
3909 sw_cons = NEXT_RX_BD(sw_cons);
3910
3911 /* If we have a packet, pass it up the stack */
3912 if (m) {
3913 DBPRINT(sc, BCE_VERBOSE_RECV,
3914 "%s(): Passing received frame up.\n", __func__);
3915
e6b5847c
SZ
3916 if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
3917 m->m_flags |= M_VLANTAG;
3918 m->m_pkthdr.ether_vlantag =
3919 l2fhdr->l2_fhdr_vlan_tag;
3920 }
2eb0d069 3921 ether_input_chain(ifp, m, NULL, chain);
43c2aeb0
SZ
3922
3923 DBRUNIF(1, sc->rx_mbuf_alloc--);
3924 }
3925
3926 /*
3927 * If polling(4) is not enabled, refresh hw_cons to see
3928 * whether there's new work.
3929 *
3930 * If polling(4) is enabled, i.e count >= 0, refreshing
3931 * should not be performed, so that we would not spend
3932 * too much time in RX processing.
3933 */
24603545
SZ
3934 if (count < 0 && sw_cons == hw_cons)
3935 hw_cons = sc->hw_rx_cons = bce_get_hw_rx_cons(sc);
43c2aeb0
SZ
3936
3937 /*
3938 * Prevent speculative reads from getting ahead
3939 * of the status block.
3940 */
3941 bus_space_barrier(sc->bce_btag, sc->bce_bhandle, 0, 0,
3942 BUS_SPACE_BARRIER_READ);
3943 }
3944
77a7ee7d 3945 ether_input_dispatch(chain);
77a7ee7d 3946
43c2aeb0
SZ
3947 sc->rx_cons = sw_cons;
3948 sc->rx_prod = sw_prod;
3949 sc->rx_prod_bseq = sw_prod_bseq;
3950
3951 REG_WR16(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BDIDX, sc->rx_prod);
3952 REG_WR(sc, MB_RX_CID_ADDR + BCE_L2CTX_HOST_BSEQ, sc->rx_prod_bseq);
3953
3954 DBPRINT(sc, BCE_INFO_RECV, "%s(exit): rx_prod = 0x%04X, "
3955 "rx_cons = 0x%04X, rx_prod_bseq = 0x%08X\n",
3956 __func__, sc->rx_prod, sc->rx_cons, sc->rx_prod_bseq);
3957}
3958
3959
24603545
SZ
3960/****************************************************************************/
3961/* Reads the transmit consumer value from the status block (skipping over */
3962/* chain page pointer if necessary). */
3963/* */
3964/* Returns: */
3965/* hw_cons */
3966/****************************************************************************/
3967static __inline uint16_t
3968bce_get_hw_tx_cons(struct bce_softc *sc)
3969{
3970 uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
3971
3972 if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
3973 hw_cons++;