4aa004dedd144c3d95c8c733eeb05510a0705c58
[dragonfly.git] / sys / dev / netif / bce / if_bce.c
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *      David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  */
50
51 #include "opt_bce.h"
52 #include "opt_ifpoll.h"
53
54 #include <sys/param.h>
55 #include <sys/bus.h>
56 #include <sys/endian.h>
57 #include <sys/kernel.h>
58 #include <sys/interrupt.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/queue.h>
62 #include <sys/rman.h>
63 #include <sys/serialize.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 #include <sys/sysctl.h>
67
68 #include <netinet/ip.h>
69 #include <netinet/tcp.h>
70
71 #include <net/bpf.h>
72 #include <net/ethernet.h>
73 #include <net/if.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_poll.h>
78 #include <net/if_types.h>
79 #include <net/ifq_var.h>
80 #include <net/vlan/if_vlan_var.h>
81 #include <net/vlan/if_vlan_ether.h>
82
83 #include <dev/netif/mii_layer/mii.h>
84 #include <dev/netif/mii_layer/miivar.h>
85 #include <dev/netif/mii_layer/brgphyreg.h>
86
87 #include <bus/pci/pcireg.h>
88 #include <bus/pci/pcivar.h>
89
90 #include "miibus_if.h"
91
92 #include <dev/netif/bce/if_bcereg.h>
93 #include <dev/netif/bce/if_bcefw.h>
94
95 #define BCE_MSI_CKINTVL         ((10 * hz) / 1000)      /* 10ms */
96
97 /****************************************************************************/
98 /* PCI Device ID Table                                                      */
99 /*                                                                          */
100 /* Used by bce_probe() to identify the devices supported by this driver.    */
101 /****************************************************************************/
102 #define BCE_DEVDESC_MAX         64
103
104 static struct bce_type bce_devs[] = {
105         /* BCM5706C Controllers and OEM boards. */
106         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
107                 "HP NC370T Multifunction Gigabit Server Adapter" },
108         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
109                 "HP NC370i Multifunction Gigabit Server Adapter" },
110         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
111                 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
112         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
113                 "HP NC371i Multifunction Gigabit Server Adapter" },
114         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
115                 "Broadcom NetXtreme II BCM5706 1000Base-T" },
116
117         /* BCM5706S controllers and OEM boards. */
118         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
119                 "HP NC370F Multifunction Gigabit Server Adapter" },
120         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
121                 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
122
123         /* BCM5708C controllers and OEM boards. */
124         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
125                 "HP NC373T PCIe Multifunction Gig Server Adapter" },
126         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
127                 "HP NC373i Multifunction Gigabit Server Adapter" },
128         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
129                 "HP NC374m PCIe Multifunction Adapter" },
130         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
131                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
132
133         /* BCM5708S controllers and OEM boards. */
134         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
135                 "HP NC373m Multifunction Gigabit Server Adapter" },
136         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
137                 "HP NC373i Multifunction Gigabit Server Adapter" },
138         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
139                 "HP NC373F PCIe Multifunc Giga Server Adapter" },
140         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
141                 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
142
143         /* BCM5709C controllers and OEM boards. */
144         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
145                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
146         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
147                 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
148         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
149                 "Broadcom NetXtreme II BCM5709 1000Base-T" },
150
151         /* BCM5709S controllers and OEM boards. */
152         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
153                 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
154         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
155                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
156         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
157                 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
158
159         /* BCM5716 controllers and OEM boards. */
160         { BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
161                 "Broadcom NetXtreme II BCM5716 1000Base-T" },
162
163         { 0, 0, 0, 0, NULL }
164 };
165
166
167 /****************************************************************************/
168 /* Supported Flash NVRAM device data.                                       */
169 /****************************************************************************/
170 static const struct flash_spec flash_table[] =
171 {
172 #define BUFFERED_FLAGS          (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
173 #define NONBUFFERED_FLAGS       (BCE_NV_WREN)
174
175         /* Slow EEPROM */
176         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
177          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
178          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179          "EEPROM - slow"},
180         /* Expansion entry 0001 */
181         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184          "Entry 0001"},
185         /* Saifun SA25F010 (non-buffered flash) */
186         /* strap, cfg1, & write1 need updates */
187         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
190          "Non-buffered flash (128kB)"},
191         /* Saifun SA25F020 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
196          "Non-buffered flash (256kB)"},
197         /* Expansion entry 0100 */
198         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
199          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 0100"},
202         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
203         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
205          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
206          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
207         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
208         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
210          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
211          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
212         /* Saifun SA25F005 (non-buffered flash) */
213         /* strap, cfg1, & write1 need updates */
214         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
215          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
217          "Non-buffered flash (64kB)"},
218         /* Fast EEPROM */
219         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
220          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
221          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
222          "EEPROM - fast"},
223         /* Expansion entry 1001 */
224         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
225          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
226          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
227          "Entry 1001"},
228         /* Expansion entry 1010 */
229         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
230          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
231          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
232          "Entry 1010"},
233         /* ATMEL AT45DB011B (buffered flash) */
234         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
235          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
236          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
237          "Buffered flash (128kB)"},
238         /* Expansion entry 1100 */
239         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
240          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
241          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
242          "Entry 1100"},
243         /* Expansion entry 1101 */
244         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
245          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
247          "Entry 1101"},
248         /* Ateml Expansion entry 1110 */
249         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
250          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
251          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
252          "Entry 1110 (Atmel)"},
253         /* ATMEL AT45DB021B (buffered flash) */
254         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
255          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
256          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
257          "Buffered flash (256kB)"},
258 };
259
260 /*
261  * The BCM5709 controllers transparently handle the
262  * differences between Atmel 264 byte pages and all
263  * flash devices which use 256 byte pages, so no
264  * logical-to-physical mapping is required in the
265  * driver.
266  */
267 static struct flash_spec flash_5709 = {
268         .flags          = BCE_NV_BUFFERED,
269         .page_bits      = BCM5709_FLASH_PAGE_BITS,
270         .page_size      = BCM5709_FLASH_PAGE_SIZE,
271         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
272         .total_size     = BUFFERED_FLASH_TOTAL_SIZE * 2,
273         .name           = "5709/5716 buffered flash (256kB)",
274 };
275
276
277 /****************************************************************************/
278 /* DragonFly device entry points.                                           */
279 /****************************************************************************/
280 static int      bce_probe(device_t);
281 static int      bce_attach(device_t);
282 static int      bce_detach(device_t);
283 static void     bce_shutdown(device_t);
284
285
286 /****************************************************************************/
287 /* BCE Register/Memory Access Routines                                      */
288 /****************************************************************************/
289 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
290 static void     bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
291 static void     bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
292 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
293 static void     bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
294 static int      bce_miibus_read_reg(device_t, int, int);
295 static int      bce_miibus_write_reg(device_t, int, int, int);
296 static void     bce_miibus_statchg(device_t);
297
298
299 /****************************************************************************/
300 /* BCE NVRAM Access Routines                                                */
301 /****************************************************************************/
302 static int      bce_acquire_nvram_lock(struct bce_softc *);
303 static int      bce_release_nvram_lock(struct bce_softc *);
304 static void     bce_enable_nvram_access(struct bce_softc *);
305 static void     bce_disable_nvram_access(struct bce_softc *);
306 static int      bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
307                                      uint32_t);
308 static int      bce_init_nvram(struct bce_softc *);
309 static int      bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
310 static int      bce_nvram_test(struct bce_softc *);
311
312 /****************************************************************************/
313 /* BCE DMA Allocate/Free Routines                                           */
314 /****************************************************************************/
315 static int      bce_dma_alloc(struct bce_softc *);
316 static void     bce_dma_free(struct bce_softc *);
317 static void     bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
318
319 /****************************************************************************/
320 /* BCE Firmware Synchronization and Load                                    */
321 /****************************************************************************/
322 static int      bce_fw_sync(struct bce_softc *, uint32_t);
323 static void     bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
324                                  uint32_t, uint32_t);
325 static void     bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
326                                 struct fw_info *);
327 static void     bce_start_cpu(struct bce_softc *, struct cpu_reg *);
328 static void     bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
329 static void     bce_start_rxp_cpu(struct bce_softc *);
330 static void     bce_init_rxp_cpu(struct bce_softc *);
331 static void     bce_init_txp_cpu(struct bce_softc *);
332 static void     bce_init_tpat_cpu(struct bce_softc *);
333 static void     bce_init_cp_cpu(struct bce_softc *);
334 static void     bce_init_com_cpu(struct bce_softc *);
335 static void     bce_init_cpus(struct bce_softc *);
336
337 static void     bce_stop(struct bce_softc *);
338 static int      bce_reset(struct bce_softc *, uint32_t);
339 static int      bce_chipinit(struct bce_softc *);
340 static int      bce_blockinit(struct bce_softc *);
341 static int      bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t *,
342                     uint32_t *, int);
343 static void     bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
344                     uint32_t *);
345 static void     bce_probe_pci_caps(struct bce_softc *);
346 static void     bce_print_adapter_info(struct bce_softc *);
347 static void     bce_get_media(struct bce_softc *);
348
349 static int      bce_create_tx_ring(struct bce_tx_ring *);
350 static void     bce_destroy_tx_ring(struct bce_tx_ring *);
351 static void     bce_init_tx_context(struct bce_tx_ring *);
352 static int      bce_init_tx_chain(struct bce_tx_ring *);
353 static void     bce_free_tx_chain(struct bce_tx_ring *);
354 static int      bce_create_rx_ring(struct bce_rx_ring *);
355 static void     bce_destroy_rx_ring(struct bce_rx_ring *);
356 static void     bce_init_rx_context(struct bce_rx_ring *);
357 static int      bce_init_rx_chain(struct bce_rx_ring *);
358 static void     bce_free_rx_chain(struct bce_rx_ring *);
359
360 static void     bce_xmit(struct bce_tx_ring *);
361 static int      bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
362 static int      bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
363                     uint16_t *, uint16_t *);
364 static void     bce_start(struct ifnet *, struct ifaltq_subque *);
365 static int      bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
366 static void     bce_watchdog(struct ifnet *);
367 static int      bce_ifmedia_upd(struct ifnet *);
368 static void     bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
369 static void     bce_init(void *);
370 static void     bce_mgmt_init(struct bce_softc *);
371
372 static int      bce_init_ctx(struct bce_softc *);
373 static void     bce_get_mac_addr(struct bce_softc *);
374 static void     bce_set_mac_addr(struct bce_softc *);
375 static void     bce_phy_intr(struct bce_softc *);
376 static void     bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
377 static void     bce_tx_intr(struct bce_tx_ring *, uint16_t);
378 static void     bce_disable_intr(struct bce_softc *);
379 static void     bce_enable_intr(struct bce_softc *);
380 static void     bce_reenable_intr(struct bce_softc *);
381
382 #ifdef IFPOLL_ENABLE
383 static void     bce_npoll(struct ifnet *, struct ifpoll_info *);
384 static void     bce_npoll_compat(struct ifnet *, void *, int);
385 #endif
386 static void     bce_intr(struct bce_softc *);
387 static void     bce_intr_legacy(void *);
388 static void     bce_intr_msi(void *);
389 static void     bce_intr_msi_oneshot(void *);
390 static void     bce_set_rx_mode(struct bce_softc *);
391 static void     bce_stats_update(struct bce_softc *);
392 static void     bce_tick(void *);
393 static void     bce_tick_serialized(struct bce_softc *);
394 static void     bce_pulse(void *);
395 static void     bce_check_msi(void *);
396 static void     bce_add_sysctls(struct bce_softc *);
397
398 static void     bce_coal_change(struct bce_softc *);
399 static int      bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
400 static int      bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
401 static int      bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
402 static int      bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
403 static int      bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
404 static int      bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
405 static int      bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
406 static int      bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
407 static int      bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
408                                        uint32_t *, uint32_t);
409
410 /*
411  * NOTE:
412  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
413  * takes 1023 as the TX ticks limit.  However, using 1023 will
414  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
415  * there is _no_ network activity on the NIC.
416  */
417 static uint32_t bce_tx_bds_int = 255;           /* bcm: 20 */
418 static uint32_t bce_tx_bds = 255;               /* bcm: 20 */
419 static uint32_t bce_tx_ticks_int = 1022;        /* bcm: 80 */
420 static uint32_t bce_tx_ticks = 1022;            /* bcm: 80 */
421 static uint32_t bce_rx_bds_int = 128;           /* bcm: 6 */
422 static uint32_t bce_rx_bds = 0;                 /* bcm: 6 */
423 static uint32_t bce_rx_ticks_int = 150;         /* bcm: 18 */
424 static uint32_t bce_rx_ticks = 150;             /* bcm: 18 */
425
426 static int      bce_tx_wreg = 8;
427
428 static int      bce_msi_enable = 1;
429
430 static int      bce_rx_pages = RX_PAGES_DEFAULT;
431 static int      bce_tx_pages = TX_PAGES_DEFAULT;
432
433 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
434 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
435 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
436 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
437 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
438 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
439 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
440 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
441 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
442 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
443 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
444 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
445
446 /****************************************************************************/
447 /* DragonFly device dispatch table.                                         */
448 /****************************************************************************/
449 static device_method_t bce_methods[] = {
450         /* Device interface */
451         DEVMETHOD(device_probe,         bce_probe),
452         DEVMETHOD(device_attach,        bce_attach),
453         DEVMETHOD(device_detach,        bce_detach),
454         DEVMETHOD(device_shutdown,      bce_shutdown),
455
456         /* bus interface */
457         DEVMETHOD(bus_print_child,      bus_generic_print_child),
458         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
459
460         /* MII interface */
461         DEVMETHOD(miibus_readreg,       bce_miibus_read_reg),
462         DEVMETHOD(miibus_writereg,      bce_miibus_write_reg),
463         DEVMETHOD(miibus_statchg,       bce_miibus_statchg),
464
465         { 0, 0 }
466 };
467
468 static driver_t bce_driver = {
469         "bce",
470         bce_methods,
471         sizeof(struct bce_softc)
472 };
473
474 static devclass_t bce_devclass;
475
476
477 DECLARE_DUMMY_MODULE(if_bce);
478 MODULE_DEPEND(bce, miibus, 1, 1, 1);
479 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
480 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
481
482
483 /****************************************************************************/
484 /* Device probe function.                                                   */
485 /*                                                                          */
486 /* Compares the device to the driver's list of supported devices and        */
487 /* reports back to the OS whether this is the right driver for the device.  */
488 /*                                                                          */
489 /* Returns:                                                                 */
490 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
491 /****************************************************************************/
492 static int
493 bce_probe(device_t dev)
494 {
495         struct bce_type *t;
496         uint16_t vid, did, svid, sdid;
497
498         /* Get the data for the device to be probed. */
499         vid  = pci_get_vendor(dev);
500         did  = pci_get_device(dev);
501         svid = pci_get_subvendor(dev);
502         sdid = pci_get_subdevice(dev);
503
504         /* Look through the list of known devices for a match. */
505         for (t = bce_devs; t->bce_name != NULL; ++t) {
506                 if (vid == t->bce_vid && did == t->bce_did && 
507                     (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
508                     (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
509                         uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
510                         char *descbuf;
511
512                         descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
513
514                         /* Print out the device identity. */
515                         ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
516                                   t->bce_name,
517                                   ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
518
519                         device_set_desc_copy(dev, descbuf);
520                         kfree(descbuf, M_TEMP);
521                         return 0;
522                 }
523         }
524         return ENXIO;
525 }
526
527
528 /****************************************************************************/
529 /* PCI Capabilities Probe Function.                                         */
530 /*                                                                          */
531 /* Walks the PCI capabiites list for the device to find what features are   */
532 /* supported.                                                               */
533 /*                                                                          */
534 /* Returns:                                                                 */
535 /*   None.                                                                  */
536 /****************************************************************************/
537 static void
538 bce_print_adapter_info(struct bce_softc *sc)
539 {
540         device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
541
542         kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
543                 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
544
545         /* Bus info. */
546         if (sc->bce_flags & BCE_PCIE_FLAG) {
547                 kprintf("Bus (PCIe x%d, ", sc->link_width);
548                 switch (sc->link_speed) {
549                 case 1:
550                         kprintf("2.5Gbps); ");
551                         break;
552                 case 2:
553                         kprintf("5Gbps); ");
554                         break;
555                 default:
556                         kprintf("Unknown link speed); ");
557                         break;
558                 }
559         } else {
560                 kprintf("Bus (PCI%s, %s, %dMHz); ",
561                     ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
562                     ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
563                     sc->bus_speed_mhz);
564         }
565
566         /* Firmware version and device features. */
567         kprintf("B/C (%s)", sc->bce_bc_ver);
568
569         if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
570             (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
571                 kprintf("; Flags(");
572                 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
573                         kprintf("MFW[%s]", sc->bce_mfw_ver);
574                 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
575                         kprintf(" 2.5G");
576                 kprintf(")");
577         }
578         kprintf("\n");
579 }
580
581
582 /****************************************************************************/
583 /* PCI Capabilities Probe Function.                                         */
584 /*                                                                          */
585 /* Walks the PCI capabiites list for the device to find what features are   */
586 /* supported.                                                               */
587 /*                                                                          */
588 /* Returns:                                                                 */
589 /*   None.                                                                  */
590 /****************************************************************************/
591 static void
592 bce_probe_pci_caps(struct bce_softc *sc)
593 {
594         device_t dev = sc->bce_dev;
595         uint8_t ptr;
596
597         if (pci_is_pcix(dev))
598                 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
599
600         ptr = pci_get_pciecap_ptr(dev);
601         if (ptr) {
602                 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
603
604                 sc->link_speed = link_status & 0xf;
605                 sc->link_width = (link_status >> 4) & 0x3f;
606                 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
607                 sc->bce_flags |= BCE_PCIE_FLAG;
608         }
609 }
610
611
612 /****************************************************************************/
613 /* Device attach function.                                                  */
614 /*                                                                          */
615 /* Allocates device resources, performs secondary chip identification,      */
616 /* resets and initializes the hardware, and initializes driver instance     */
617 /* variables.                                                               */
618 /*                                                                          */
619 /* Returns:                                                                 */
620 /*   0 on success, positive value on failure.                               */
621 /****************************************************************************/
622 static int
623 bce_attach(device_t dev)
624 {
625         struct bce_softc *sc = device_get_softc(dev);
626         struct ifnet *ifp = &sc->arpcom.ac_if;
627         uint32_t val;
628         u_int irq_flags;
629         void (*irq_handle)(void *);
630         int rid, rc = 0;
631         int i, j;
632         struct mii_probe_args mii_args;
633         uintptr_t mii_priv = 0;
634
635         sc->bce_dev = dev;
636         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
637
638         pci_enable_busmaster(dev);
639
640         bce_probe_pci_caps(sc);
641
642         /* Allocate PCI memory resources. */
643         rid = PCIR_BAR(0);
644         sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
645                                                  RF_ACTIVE | PCI_RF_DENSE);
646         if (sc->bce_res_mem == NULL) {
647                 device_printf(dev, "PCI memory allocation failed\n");
648                 return ENXIO;
649         }
650         sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
651         sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
652
653         /* Allocate PCI IRQ resources. */
654         sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable,
655             &sc->bce_irq_rid, &irq_flags);
656
657         sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
658             &sc->bce_irq_rid, irq_flags);
659         if (sc->bce_res_irq == NULL) {
660                 device_printf(dev, "PCI map interrupt failed\n");
661                 rc = ENXIO;
662                 goto fail;
663         }
664
665         /*
666          * Configure byte swap and enable indirect register access.
667          * Rely on CPU to do target byte swapping on big endian systems.
668          * Access to registers outside of PCI configurtion space are not
669          * valid until this is done.
670          */
671         pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
672                          BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
673                          BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
674
675         /* Save ASIC revsion info. */
676         sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
677
678         /* Weed out any non-production controller revisions. */
679         switch (BCE_CHIP_ID(sc)) {
680         case BCE_CHIP_ID_5706_A0:
681         case BCE_CHIP_ID_5706_A1:
682         case BCE_CHIP_ID_5708_A0:
683         case BCE_CHIP_ID_5708_B0:
684         case BCE_CHIP_ID_5709_A0:
685         case BCE_CHIP_ID_5709_B0:
686         case BCE_CHIP_ID_5709_B1:
687 #ifdef foo
688         /* 5709C B2 seems to work fine */
689         case BCE_CHIP_ID_5709_B2:
690 #endif
691                 device_printf(dev, "Unsupported chip id 0x%08x!\n",
692                               BCE_CHIP_ID(sc));
693                 rc = ENODEV;
694                 goto fail;
695         }
696
697         mii_priv |= BRGPHY_FLAG_WIRESPEED;
698         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
699                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
700                     BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
701                         mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
702         } else {
703                 mii_priv |= BRGPHY_FLAG_BER_BUG;
704         }
705
706         if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
707                 irq_handle = bce_intr_legacy;
708         } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
709                 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
710                         irq_handle = bce_intr_msi_oneshot;
711                         sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
712                 } else {
713                         irq_handle = bce_intr_msi;
714                         sc->bce_flags |= BCE_CHECK_MSI_FLAG;
715                 }
716         } else {
717                 panic("%s: unsupported intr type %d",
718                     device_get_nameunit(dev), sc->bce_irq_type);
719         }
720
721         /*
722          * Find the base address for shared memory access.
723          * Newer versions of bootcode use a signature and offset
724          * while older versions use a fixed address.
725          */
726         val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
727         if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
728             BCE_SHM_HDR_SIGNATURE_SIG) {
729                 /* Multi-port devices use different offsets in shared memory. */
730                 sc->bce_shmem_base = REG_RD_IND(sc,
731                     BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
732         } else {
733                 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
734         }
735
736         /* Fetch the bootcode revision. */
737         val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
738         for (i = 0, j = 0; i < 3; i++) {
739                 uint8_t num;
740                 int k, skip0;
741
742                 num = (uint8_t)(val >> (24 - (i * 8)));
743                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
744                         if (num >= k || !skip0 || k == 1) {
745                                 sc->bce_bc_ver[j++] = (num / k) + '0';
746                                 skip0 = 0;
747                         }
748                 }
749                 if (i != 2)
750                         sc->bce_bc_ver[j++] = '.';
751         }
752
753         /* Check if any management firwmare is running. */
754         val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
755         if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
756                 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
757
758                 /* Allow time for firmware to enter the running state. */
759                 for (i = 0; i < 30; i++) {
760                         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
761                         if (val & BCE_CONDITION_MFW_RUN_MASK)
762                                 break;
763                         DELAY(10000);
764                 }
765         }
766
767         /* Check the current bootcode state. */
768         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
769             BCE_CONDITION_MFW_RUN_MASK;
770         if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
771             val != BCE_CONDITION_MFW_RUN_NONE) {
772                 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
773
774                 for (i = 0, j = 0; j < 3; j++) {
775                         val = bce_reg_rd_ind(sc, addr + j * 4);
776                         val = bswap32(val);
777                         memcpy(&sc->bce_mfw_ver[i], &val, 4);
778                         i += 4;
779                 }
780         }
781
782         /* Get PCI bus information (speed and type). */
783         val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
784         if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
785                 uint32_t clkreg;
786
787                 sc->bce_flags |= BCE_PCIX_FLAG;
788
789                 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
790                          BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
791                 switch (clkreg) {
792                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
793                         sc->bus_speed_mhz = 133;
794                         break;
795
796                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
797                         sc->bus_speed_mhz = 100;
798                         break;
799
800                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
801                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
802                         sc->bus_speed_mhz = 66;
803                         break;
804
805                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
806                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
807                         sc->bus_speed_mhz = 50;
808                         break;
809
810                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
811                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
812                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
813                         sc->bus_speed_mhz = 33;
814                         break;
815                 }
816         } else {
817                 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
818                         sc->bus_speed_mhz = 66;
819                 else
820                         sc->bus_speed_mhz = 33;
821         }
822
823         if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
824                 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
825
826         /* Reset the controller. */
827         rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
828         if (rc != 0)
829                 goto fail;
830
831         /* Initialize the controller. */
832         rc = bce_chipinit(sc);
833         if (rc != 0) {
834                 device_printf(dev, "Controller initialization failed!\n");
835                 goto fail;
836         }
837
838         /* Perform NVRAM test. */
839         rc = bce_nvram_test(sc);
840         if (rc != 0) {
841                 device_printf(dev, "NVRAM test failed!\n");
842                 goto fail;
843         }
844
845         /* Fetch the permanent Ethernet MAC address. */
846         bce_get_mac_addr(sc);
847
848         /*
849          * Trip points control how many BDs
850          * should be ready before generating an
851          * interrupt while ticks control how long
852          * a BD can sit in the chain before
853          * generating an interrupt.  Set the default 
854          * values for the RX and TX rings.
855          */
856
857 #ifdef BCE_DRBUG
858         /* Force more frequent interrupts. */
859         sc->bce_tx_quick_cons_trip_int = 1;
860         sc->bce_tx_quick_cons_trip     = 1;
861         sc->bce_tx_ticks_int           = 0;
862         sc->bce_tx_ticks               = 0;
863
864         sc->bce_rx_quick_cons_trip_int = 1;
865         sc->bce_rx_quick_cons_trip     = 1;
866         sc->bce_rx_ticks_int           = 0;
867         sc->bce_rx_ticks               = 0;
868 #else
869         sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
870         sc->bce_tx_quick_cons_trip     = bce_tx_bds;
871         sc->bce_tx_ticks_int           = bce_tx_ticks_int;
872         sc->bce_tx_ticks               = bce_tx_ticks;
873
874         sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
875         sc->bce_rx_quick_cons_trip     = bce_rx_bds;
876         sc->bce_rx_ticks_int           = bce_rx_ticks_int;
877         sc->bce_rx_ticks               = bce_rx_ticks;
878 #endif
879
880         /* Update statistics once every second. */
881         sc->bce_stats_ticks = 1000000 & 0xffff00;
882
883         /* Find the media type for the adapter. */
884         bce_get_media(sc);
885
886         /* Find out RX/TX ring count */
887         sc->ring_cnt = 1; /* XXX */
888
889         /* Allocate DMA memory resources. */
890         rc = bce_dma_alloc(sc);
891         if (rc != 0) {
892                 device_printf(dev, "DMA resource allocation failed!\n");
893                 goto fail;
894         }
895
896         /* Initialize the ifnet interface. */
897         ifp->if_softc = sc;
898         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
899         ifp->if_ioctl = bce_ioctl;
900         ifp->if_start = bce_start;
901         ifp->if_init = bce_init;
902         ifp->if_watchdog = bce_watchdog;
903 #ifdef IFPOLL_ENABLE
904         ifp->if_npoll = bce_npoll;
905 #endif
906         ifp->if_mtu = ETHERMTU;
907         ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
908         ifp->if_capabilities = BCE_IF_CAPABILITIES;
909         ifp->if_capenable = ifp->if_capabilities;
910         ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
911         ifq_set_ready(&ifp->if_snd);
912
913         if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
914                 ifp->if_baudrate = IF_Gbps(2.5);
915         else
916                 ifp->if_baudrate = IF_Gbps(1);
917
918         /*
919          * Look for our PHY.
920          */
921         mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
922         mii_args.mii_probemask = 1 << sc->bce_phy_addr;
923         mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
924         mii_args.mii_priv = mii_priv;
925
926         rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
927         if (rc != 0) {
928                 device_printf(dev, "PHY probe failed!\n");
929                 goto fail;
930         }
931
932         /* Attach to the Ethernet interface list. */
933         ether_ifattach(ifp, sc->eaddr, NULL);
934
935         callout_init_mp(&sc->bce_tick_callout);
936         callout_init_mp(&sc->bce_pulse_callout);
937         callout_init_mp(&sc->bce_ckmsi_callout);
938
939         /* Hookup IRQ last. */
940         rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc,
941                             &sc->bce_intrhand, ifp->if_serializer);
942         if (rc != 0) {
943                 device_printf(dev, "Failed to setup IRQ!\n");
944                 ether_ifdetach(ifp);
945                 goto fail;
946         }
947
948         sc->bce_intr_cpuid = rman_get_cpuid(sc->bce_res_irq);
949         ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
950
951         /* Add the supported sysctls to the kernel. */
952         bce_add_sysctls(sc);
953
954 #ifdef IFPOLL_ENABLE
955         ifpoll_compat_setup(&sc->bce_npoll,
956             &sc->bce_sysctl_ctx, sc->bce_sysctl_tree, device_get_unit(dev),
957             ifp->if_serializer);
958 #endif
959
960         /*
961          * The chip reset earlier notified the bootcode that
962          * a driver is present.  We now need to start our pulse
963          * routine so that the bootcode is reminded that we're
964          * still running.
965          */
966         bce_pulse(sc);
967
968         /* Get the firmware running so IPMI still works */
969         bce_mgmt_init(sc);
970
971         if (bootverbose)
972                 bce_print_adapter_info(sc);
973
974         return 0;
975 fail:
976         bce_detach(dev);
977         return(rc);
978 }
979
980
981 /****************************************************************************/
982 /* Device detach function.                                                  */
983 /*                                                                          */
984 /* Stops the controller, resets the controller, and releases resources.     */
985 /*                                                                          */
986 /* Returns:                                                                 */
987 /*   0 on success, positive value on failure.                               */
988 /****************************************************************************/
989 static int
990 bce_detach(device_t dev)
991 {
992         struct bce_softc *sc = device_get_softc(dev);
993
994         if (device_is_attached(dev)) {
995                 struct ifnet *ifp = &sc->arpcom.ac_if;
996                 uint32_t msg;
997
998                 /* Stop and reset the controller. */
999                 lwkt_serialize_enter(ifp->if_serializer);
1000                 callout_stop(&sc->bce_pulse_callout);
1001                 bce_stop(sc);
1002                 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1003                         msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1004                 else
1005                         msg = BCE_DRV_MSG_CODE_UNLOAD;
1006                 bce_reset(sc, msg);
1007                 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
1008                 lwkt_serialize_exit(ifp->if_serializer);
1009
1010                 ether_ifdetach(ifp);
1011         }
1012
1013         /* If we have a child device on the MII bus remove it too. */
1014         if (sc->bce_miibus)
1015                 device_delete_child(dev, sc->bce_miibus);
1016         bus_generic_detach(dev);
1017
1018         if (sc->bce_res_irq != NULL) {
1019                 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
1020                     sc->bce_res_irq);
1021         }
1022
1023         if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
1024                 pci_release_msi(dev);
1025
1026         if (sc->bce_res_mem != NULL) {
1027                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1028                                      sc->bce_res_mem);
1029         }
1030
1031         bce_dma_free(sc);
1032
1033         if (sc->bce_sysctl_tree != NULL)
1034                 sysctl_ctx_free(&sc->bce_sysctl_ctx);
1035
1036         return 0;
1037 }
1038
1039
1040 /****************************************************************************/
1041 /* Device shutdown function.                                                */
1042 /*                                                                          */
1043 /* Stops and resets the controller.                                         */
1044 /*                                                                          */
1045 /* Returns:                                                                 */
1046 /*   Nothing                                                                */
1047 /****************************************************************************/
1048 static void
1049 bce_shutdown(device_t dev)
1050 {
1051         struct bce_softc *sc = device_get_softc(dev);
1052         struct ifnet *ifp = &sc->arpcom.ac_if;
1053         uint32_t msg;
1054
1055         lwkt_serialize_enter(ifp->if_serializer);
1056         bce_stop(sc);
1057         if (sc->bce_flags & BCE_NO_WOL_FLAG)
1058                 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1059         else
1060                 msg = BCE_DRV_MSG_CODE_UNLOAD;
1061         bce_reset(sc, msg);
1062         lwkt_serialize_exit(ifp->if_serializer);
1063 }
1064
1065
1066 /****************************************************************************/
1067 /* Indirect register read.                                                  */
1068 /*                                                                          */
1069 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1070 /* configuration space.  Using this mechanism avoids issues with posted     */
1071 /* reads but is much slower than memory-mapped I/O.                         */
1072 /*                                                                          */
1073 /* Returns:                                                                 */
1074 /*   The value of the register.                                             */
1075 /****************************************************************************/
1076 static uint32_t
1077 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1078 {
1079         device_t dev = sc->bce_dev;
1080
1081         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1082         return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1083 }
1084
1085
1086 /****************************************************************************/
1087 /* Indirect register write.                                                 */
1088 /*                                                                          */
1089 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1090 /* configuration space.  Using this mechanism avoids issues with posted     */
1091 /* writes but is muchh slower than memory-mapped I/O.                       */
1092 /*                                                                          */
1093 /* Returns:                                                                 */
1094 /*   Nothing.                                                               */
1095 /****************************************************************************/
1096 static void
1097 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1098 {
1099         device_t dev = sc->bce_dev;
1100
1101         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1102         pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1103 }
1104
1105
1106 /****************************************************************************/
1107 /* Shared memory write.                                                     */
1108 /*                                                                          */
1109 /* Writes NetXtreme II shared memory region.                                */
1110 /*                                                                          */
1111 /* Returns:                                                                 */
1112 /*   Nothing.                                                               */
1113 /****************************************************************************/
1114 static void
1115 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1116 {
1117         bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1118 }
1119
1120
1121 /****************************************************************************/
1122 /* Shared memory read.                                                      */
1123 /*                                                                          */
1124 /* Reads NetXtreme II shared memory region.                                 */
1125 /*                                                                          */
1126 /* Returns:                                                                 */
1127 /*   The 32 bit value read.                                                 */
1128 /****************************************************************************/
1129 static u32
1130 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1131 {
1132         return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1133 }
1134
1135
1136 /****************************************************************************/
1137 /* Context memory write.                                                    */
1138 /*                                                                          */
1139 /* The NetXtreme II controller uses context memory to track connection      */
1140 /* information for L2 and higher network protocols.                         */
1141 /*                                                                          */
1142 /* Returns:                                                                 */
1143 /*   Nothing.                                                               */
1144 /****************************************************************************/
1145 static void
1146 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1147     uint32_t ctx_val)
1148 {
1149         uint32_t idx, offset = ctx_offset + cid_addr;
1150         uint32_t val, retry_cnt = 5;
1151
1152         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1153             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1154                 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1155                 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1156
1157                 for (idx = 0; idx < retry_cnt; idx++) {
1158                         val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1159                         if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1160                                 break;
1161                         DELAY(5);
1162                 }
1163
1164                 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1165                         device_printf(sc->bce_dev,
1166                             "Unable to write CTX memory: "
1167                             "cid_addr = 0x%08X, offset = 0x%08X!\n",
1168                             cid_addr, ctx_offset);
1169                 }
1170         } else {
1171                 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1172                 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1173         }
1174 }
1175
1176
1177 /****************************************************************************/
1178 /* PHY register read.                                                       */
1179 /*                                                                          */
1180 /* Implements register reads on the MII bus.                                */
1181 /*                                                                          */
1182 /* Returns:                                                                 */
1183 /*   The value of the register.                                             */
1184 /****************************************************************************/
1185 static int
1186 bce_miibus_read_reg(device_t dev, int phy, int reg)
1187 {
1188         struct bce_softc *sc = device_get_softc(dev);
1189         uint32_t val;
1190         int i;
1191
1192         /* Make sure we are accessing the correct PHY address. */
1193         KASSERT(phy == sc->bce_phy_addr,
1194             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1195
1196         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1197                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1198                 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1199
1200                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1201                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1202
1203                 DELAY(40);
1204         }
1205
1206         val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1207               BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1208               BCE_EMAC_MDIO_COMM_START_BUSY;
1209         REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1210
1211         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1212                 DELAY(10);
1213
1214                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1215                 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1216                         DELAY(5);
1217
1218                         val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1219                         val &= BCE_EMAC_MDIO_COMM_DATA;
1220                         break;
1221                 }
1222         }
1223
1224         if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1225                 if_printf(&sc->arpcom.ac_if,
1226                           "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1227                           phy, reg);
1228                 val = 0x0;
1229         } else {
1230                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1231         }
1232
1233         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1234                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1235                 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1236
1237                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1238                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1239
1240                 DELAY(40);
1241         }
1242         return (val & 0xffff);
1243 }
1244
1245
1246 /****************************************************************************/
1247 /* PHY register write.                                                      */
1248 /*                                                                          */
1249 /* Implements register writes on the MII bus.                               */
1250 /*                                                                          */
1251 /* Returns:                                                                 */
1252 /*   The value of the register.                                             */
1253 /****************************************************************************/
1254 static int
1255 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1256 {
1257         struct bce_softc *sc = device_get_softc(dev);
1258         uint32_t val1;
1259         int i;
1260
1261         /* Make sure we are accessing the correct PHY address. */
1262         KASSERT(phy == sc->bce_phy_addr,
1263             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1264
1265         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1266                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1267                 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1268
1269                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1270                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1271
1272                 DELAY(40);
1273         }
1274
1275         val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1276                 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1277                 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1278         REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1279
1280         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1281                 DELAY(10);
1282
1283                 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1284                 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1285                         DELAY(5);
1286                         break;
1287                 }
1288         }
1289
1290         if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1291                 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1292
1293         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1294                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1295                 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1296
1297                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1298                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1299
1300                 DELAY(40);
1301         }
1302         return 0;
1303 }
1304
1305
1306 /****************************************************************************/
1307 /* MII bus status change.                                                   */
1308 /*                                                                          */
1309 /* Called by the MII bus driver when the PHY establishes link to set the    */
1310 /* MAC interface registers.                                                 */
1311 /*                                                                          */
1312 /* Returns:                                                                 */
1313 /*   Nothing.                                                               */
1314 /****************************************************************************/
1315 static void
1316 bce_miibus_statchg(device_t dev)
1317 {
1318         struct bce_softc *sc = device_get_softc(dev);
1319         struct mii_data *mii = device_get_softc(sc->bce_miibus);
1320
1321         BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1322
1323         /*
1324          * Set MII or GMII interface based on the speed negotiated
1325          * by the PHY.
1326          */
1327         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 
1328             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1329                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1330         } else {
1331                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1332         }
1333
1334         /*
1335          * Set half or full duplex based on the duplicity negotiated
1336          * by the PHY.
1337          */
1338         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1339                 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1340         } else {
1341                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1342         }
1343 }
1344
1345
1346 /****************************************************************************/
1347 /* Acquire NVRAM lock.                                                      */
1348 /*                                                                          */
1349 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1350 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1351 /* for use by the driver.                                                   */
1352 /*                                                                          */
1353 /* Returns:                                                                 */
1354 /*   0 on success, positive value on failure.                               */
1355 /****************************************************************************/
1356 static int
1357 bce_acquire_nvram_lock(struct bce_softc *sc)
1358 {
1359         uint32_t val;
1360         int j;
1361
1362         /* Request access to the flash interface. */
1363         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1364         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1365                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1366                 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1367                         break;
1368
1369                 DELAY(5);
1370         }
1371
1372         if (j >= NVRAM_TIMEOUT_COUNT) {
1373                 return EBUSY;
1374         }
1375         return 0;
1376 }
1377
1378
1379 /****************************************************************************/
1380 /* Release NVRAM lock.                                                      */
1381 /*                                                                          */
1382 /* When the caller is finished accessing NVRAM the lock must be released.   */
1383 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1384 /* for use by the driver.                                                   */
1385 /*                                                                          */
1386 /* Returns:                                                                 */
1387 /*   0 on success, positive value on failure.                               */
1388 /****************************************************************************/
1389 static int
1390 bce_release_nvram_lock(struct bce_softc *sc)
1391 {
1392         int j;
1393         uint32_t val;
1394
1395         /*
1396          * Relinquish nvram interface.
1397          */
1398         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1399
1400         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1401                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1402                 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1403                         break;
1404
1405                 DELAY(5);
1406         }
1407
1408         if (j >= NVRAM_TIMEOUT_COUNT) {
1409                 return EBUSY;
1410         }
1411         return 0;
1412 }
1413
1414
1415 /****************************************************************************/
1416 /* Enable NVRAM access.                                                     */
1417 /*                                                                          */
1418 /* Before accessing NVRAM for read or write operations the caller must      */
1419 /* enabled NVRAM access.                                                    */
1420 /*                                                                          */
1421 /* Returns:                                                                 */
1422 /*   Nothing.                                                               */
1423 /****************************************************************************/
1424 static void
1425 bce_enable_nvram_access(struct bce_softc *sc)
1426 {
1427         uint32_t val;
1428
1429         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1430         /* Enable both bits, even on read. */
1431         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1432                val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1433 }
1434
1435
1436 /****************************************************************************/
1437 /* Disable NVRAM access.                                                    */
1438 /*                                                                          */
1439 /* When the caller is finished accessing NVRAM access must be disabled.     */
1440 /*                                                                          */
1441 /* Returns:                                                                 */
1442 /*   Nothing.                                                               */
1443 /****************************************************************************/
1444 static void
1445 bce_disable_nvram_access(struct bce_softc *sc)
1446 {
1447         uint32_t val;
1448
1449         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1450
1451         /* Disable both bits, even after read. */
1452         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1453                val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1454 }
1455
1456
1457 /****************************************************************************/
1458 /* Read a dword (32 bits) from NVRAM.                                       */
1459 /*                                                                          */
1460 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1461 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1462 /*                                                                          */
1463 /* Returns:                                                                 */
1464 /*   0 on success and the 32 bit value read, positive value on failure.     */
1465 /****************************************************************************/
1466 static int
1467 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1468                      uint32_t cmd_flags)
1469 {
1470         uint32_t cmd;
1471         int i, rc = 0;
1472
1473         /* Build the command word. */
1474         cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1475
1476         /* Calculate the offset for buffered flash. */
1477         if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1478                 offset = ((offset / sc->bce_flash_info->page_size) <<
1479                           sc->bce_flash_info->page_bits) +
1480                          (offset % sc->bce_flash_info->page_size);
1481         }
1482
1483         /*
1484          * Clear the DONE bit separately, set the address to read,
1485          * and issue the read.
1486          */
1487         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1488         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1489         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1490
1491         /* Wait for completion. */
1492         for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1493                 uint32_t val;
1494
1495                 DELAY(5);
1496
1497                 val = REG_RD(sc, BCE_NVM_COMMAND);
1498                 if (val & BCE_NVM_COMMAND_DONE) {
1499                         val = REG_RD(sc, BCE_NVM_READ);
1500
1501                         val = be32toh(val);
1502                         memcpy(ret_val, &val, 4);
1503                         break;
1504                 }
1505         }
1506
1507         /* Check for errors. */
1508         if (i >= NVRAM_TIMEOUT_COUNT) {
1509                 if_printf(&sc->arpcom.ac_if,
1510                           "Timeout error reading NVRAM at offset 0x%08X!\n",
1511                           offset);
1512                 rc = EBUSY;
1513         }
1514         return rc;
1515 }
1516
1517
1518 /****************************************************************************/
1519 /* Initialize NVRAM access.                                                 */
1520 /*                                                                          */
1521 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1522 /* access that device.                                                      */
1523 /*                                                                          */
1524 /* Returns:                                                                 */
1525 /*   0 on success, positive value on failure.                               */
1526 /****************************************************************************/
1527 static int
1528 bce_init_nvram(struct bce_softc *sc)
1529 {
1530         uint32_t val;
1531         int j, entry_count, rc = 0;
1532         const struct flash_spec *flash;
1533
1534         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1535             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1536                 sc->bce_flash_info = &flash_5709;
1537                 goto bce_init_nvram_get_flash_size;
1538         }
1539
1540         /* Determine the selected interface. */
1541         val = REG_RD(sc, BCE_NVM_CFG1);
1542
1543         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1544
1545         /*
1546          * Flash reconfiguration is required to support additional
1547          * NVRAM devices not directly supported in hardware.
1548          * Check if the flash interface was reconfigured
1549          * by the bootcode.
1550          */
1551
1552         if (val & 0x40000000) {
1553                 /* Flash interface reconfigured by bootcode. */
1554                 for (j = 0, flash = flash_table; j < entry_count;
1555                      j++, flash++) {
1556                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
1557                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1558                                 sc->bce_flash_info = flash;
1559                                 break;
1560                         }
1561                 }
1562         } else {
1563                 /* Flash interface not yet reconfigured. */
1564                 uint32_t mask;
1565
1566                 if (val & (1 << 23))
1567                         mask = FLASH_BACKUP_STRAP_MASK;
1568                 else
1569                         mask = FLASH_STRAP_MASK;
1570
1571                 /* Look for the matching NVRAM device configuration data. */
1572                 for (j = 0, flash = flash_table; j < entry_count;
1573                      j++, flash++) {
1574                         /* Check if the device matches any of the known devices. */
1575                         if ((val & mask) == (flash->strapping & mask)) {
1576                                 /* Found a device match. */
1577                                 sc->bce_flash_info = flash;
1578
1579                                 /* Request access to the flash interface. */
1580                                 rc = bce_acquire_nvram_lock(sc);
1581                                 if (rc != 0)
1582                                         return rc;
1583
1584                                 /* Reconfigure the flash interface. */
1585                                 bce_enable_nvram_access(sc);
1586                                 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1587                                 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1588                                 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1589                                 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1590                                 bce_disable_nvram_access(sc);
1591                                 bce_release_nvram_lock(sc);
1592                                 break;
1593                         }
1594                 }
1595         }
1596
1597         /* Check if a matching device was found. */
1598         if (j == entry_count) {
1599                 sc->bce_flash_info = NULL;
1600                 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1601                 return ENODEV;
1602         }
1603
1604 bce_init_nvram_get_flash_size:
1605         /* Write the flash config data to the shared memory interface. */
1606         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1607             BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1608         if (val)
1609                 sc->bce_flash_size = val;
1610         else
1611                 sc->bce_flash_size = sc->bce_flash_info->total_size;
1612
1613         return rc;
1614 }
1615
1616
1617 /****************************************************************************/
1618 /* Read an arbitrary range of data from NVRAM.                              */
1619 /*                                                                          */
1620 /* Prepares the NVRAM interface for access and reads the requested data     */
1621 /* into the supplied buffer.                                                */
1622 /*                                                                          */
1623 /* Returns:                                                                 */
1624 /*   0 on success and the data read, positive value on failure.             */
1625 /****************************************************************************/
1626 static int
1627 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1628                int buf_size)
1629 {
1630         uint32_t cmd_flags, offset32, len32, extra;
1631         int rc = 0;
1632
1633         if (buf_size == 0)
1634                 return 0;
1635
1636         /* Request access to the flash interface. */
1637         rc = bce_acquire_nvram_lock(sc);
1638         if (rc != 0)
1639                 return rc;
1640
1641         /* Enable access to flash interface */
1642         bce_enable_nvram_access(sc);
1643
1644         len32 = buf_size;
1645         offset32 = offset;
1646         extra = 0;
1647
1648         cmd_flags = 0;
1649
1650         /* XXX should we release nvram lock if read_dword() fails? */
1651         if (offset32 & 3) {
1652                 uint8_t buf[4];
1653                 uint32_t pre_len;
1654
1655                 offset32 &= ~3;
1656                 pre_len = 4 - (offset & 3);
1657
1658                 if (pre_len >= len32) {
1659                         pre_len = len32;
1660                         cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1661                 } else {
1662                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1663                 }
1664
1665                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1666                 if (rc)
1667                         return rc;
1668
1669                 memcpy(ret_buf, buf + (offset & 3), pre_len);
1670
1671                 offset32 += 4;
1672                 ret_buf += pre_len;
1673                 len32 -= pre_len;
1674         }
1675
1676         if (len32 & 3) {
1677                 extra = 4 - (len32 & 3);
1678                 len32 = (len32 + 4) & ~3;
1679         }
1680
1681         if (len32 == 4) {
1682                 uint8_t buf[4];
1683
1684                 if (cmd_flags)
1685                         cmd_flags = BCE_NVM_COMMAND_LAST;
1686                 else
1687                         cmd_flags = BCE_NVM_COMMAND_FIRST |
1688                                     BCE_NVM_COMMAND_LAST;
1689
1690                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1691
1692                 memcpy(ret_buf, buf, 4 - extra);
1693         } else if (len32 > 0) {
1694                 uint8_t buf[4];
1695
1696                 /* Read the first word. */
1697                 if (cmd_flags)
1698                         cmd_flags = 0;
1699                 else
1700                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1701
1702                 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1703
1704                 /* Advance to the next dword. */
1705                 offset32 += 4;
1706                 ret_buf += 4;
1707                 len32 -= 4;
1708
1709                 while (len32 > 4 && rc == 0) {
1710                         rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1711
1712                         /* Advance to the next dword. */
1713                         offset32 += 4;
1714                         ret_buf += 4;
1715                         len32 -= 4;
1716                 }
1717
1718                 if (rc)
1719                         goto bce_nvram_read_locked_exit;
1720
1721                 cmd_flags = BCE_NVM_COMMAND_LAST;
1722                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1723
1724                 memcpy(ret_buf, buf, 4 - extra);
1725         }
1726
1727 bce_nvram_read_locked_exit:
1728         /* Disable access to flash interface and release the lock. */
1729         bce_disable_nvram_access(sc);
1730         bce_release_nvram_lock(sc);
1731
1732         return rc;
1733 }
1734
1735
1736 /****************************************************************************/
1737 /* Verifies that NVRAM is accessible and contains valid data.               */
1738 /*                                                                          */
1739 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1740 /* correct.                                                                 */
1741 /*                                                                          */
1742 /* Returns:                                                                 */
1743 /*   0 on success, positive value on failure.                               */
1744 /****************************************************************************/
1745 static int
1746 bce_nvram_test(struct bce_softc *sc)
1747 {
1748         uint32_t buf[BCE_NVRAM_SIZE / 4];
1749         uint32_t magic, csum;
1750         uint8_t *data = (uint8_t *)buf;
1751         int rc = 0;
1752
1753         /*
1754          * Check that the device NVRAM is valid by reading
1755          * the magic value at offset 0.
1756          */
1757         rc = bce_nvram_read(sc, 0, data, 4);
1758         if (rc != 0)
1759                 return rc;
1760
1761         magic = be32toh(buf[0]);
1762         if (magic != BCE_NVRAM_MAGIC) {
1763                 if_printf(&sc->arpcom.ac_if,
1764                           "Invalid NVRAM magic value! Expected: 0x%08X, "
1765                           "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1766                 return ENODEV;
1767         }
1768
1769         /*
1770          * Verify that the device NVRAM includes valid
1771          * configuration data.
1772          */
1773         rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1774         if (rc != 0)
1775                 return rc;
1776
1777         csum = ether_crc32_le(data, 0x100);
1778         if (csum != BCE_CRC32_RESIDUAL) {
1779                 if_printf(&sc->arpcom.ac_if,
1780                           "Invalid Manufacturing Information NVRAM CRC! "
1781                           "Expected: 0x%08X, Found: 0x%08X\n",
1782                           BCE_CRC32_RESIDUAL, csum);
1783                 return ENODEV;
1784         }
1785
1786         csum = ether_crc32_le(data + 0x100, 0x100);
1787         if (csum != BCE_CRC32_RESIDUAL) {
1788                 if_printf(&sc->arpcom.ac_if,
1789                           "Invalid Feature Configuration Information "
1790                           "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1791                           BCE_CRC32_RESIDUAL, csum);
1792                 rc = ENODEV;
1793         }
1794         return rc;
1795 }
1796
1797
1798 /****************************************************************************/
1799 /* Identifies the current media type of the controller and sets the PHY     */
1800 /* address.                                                                 */
1801 /*                                                                          */
1802 /* Returns:                                                                 */
1803 /*   Nothing.                                                               */
1804 /****************************************************************************/
1805 static void
1806 bce_get_media(struct bce_softc *sc)
1807 {
1808         uint32_t val;
1809
1810         sc->bce_phy_addr = 1;
1811
1812         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1813             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1814                 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1815                 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1816                 uint32_t strap;
1817
1818                 /*
1819                  * The BCM5709S is software configurable
1820                  * for Copper or SerDes operation.
1821                  */
1822                 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1823                         return;
1824                 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1825                         sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1826                         return;
1827                 }
1828
1829                 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1830                         strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1831                 } else {
1832                         strap =
1833                         (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1834                 }
1835
1836                 if (pci_get_function(sc->bce_dev) == 0) {
1837                         switch (strap) {
1838                         case 0x4:
1839                         case 0x5:
1840                         case 0x6:
1841                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1842                                 break;
1843                         }
1844                 } else {
1845                         switch (strap) {
1846                         case 0x1:
1847                         case 0x2:
1848                         case 0x4:
1849                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1850                                 break;
1851                         }
1852                 }
1853         } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1854                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1855         }
1856
1857         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1858                 sc->bce_flags |= BCE_NO_WOL_FLAG;
1859                 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1860                         sc->bce_phy_addr = 2;
1861                         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1862                         if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1863                                 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1864                 }
1865         } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1866             (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1867                 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1868         }
1869 }
1870
1871
1872 static void
1873 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1874 {
1875         int i;
1876
1877         /* Destroy the TX buffer descriptor DMA stuffs. */
1878         if (txr->tx_bd_chain_tag != NULL) {
1879                 for (i = 0; i < txr->tx_pages; i++) {
1880                         if (txr->tx_bd_chain[i] != NULL) {
1881                                 bus_dmamap_unload(txr->tx_bd_chain_tag,
1882                                     txr->tx_bd_chain_map[i]);
1883                                 bus_dmamem_free(txr->tx_bd_chain_tag,
1884                                     txr->tx_bd_chain[i],
1885                                     txr->tx_bd_chain_map[i]);
1886                         }
1887                 }
1888                 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1889         }
1890
1891         /* Destroy the TX mbuf DMA stuffs. */
1892         if (txr->tx_mbuf_tag != NULL) {
1893                 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1894                         /* Must have been unloaded in bce_stop() */
1895                         KKASSERT(txr->tx_mbuf_ptr[i] == NULL);
1896                         bus_dmamap_destroy(txr->tx_mbuf_tag,
1897                             txr->tx_mbuf_map[i]);
1898                 }
1899                 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1900         }
1901
1902         if (txr->tx_bd_chain_map != NULL)
1903                 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1904         if (txr->tx_bd_chain != NULL)
1905                 kfree(txr->tx_bd_chain, M_DEVBUF);
1906         if (txr->tx_bd_chain_paddr != NULL)
1907                 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1908
1909         if (txr->tx_mbuf_map != NULL)
1910                 kfree(txr->tx_mbuf_map, M_DEVBUF);
1911         if (txr->tx_mbuf_ptr != NULL)
1912                 kfree(txr->tx_mbuf_ptr, M_DEVBUF);
1913 }
1914
1915
1916 static void
1917 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1918 {
1919         int i;
1920
1921         /* Destroy the RX buffer descriptor DMA stuffs. */
1922         if (rxr->rx_bd_chain_tag != NULL) {
1923                 for (i = 0; i < rxr->rx_pages; i++) {
1924                         if (rxr->rx_bd_chain[i] != NULL) {
1925                                 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1926                                     rxr->rx_bd_chain_map[i]);
1927                                 bus_dmamem_free(rxr->rx_bd_chain_tag,
1928                                     rxr->rx_bd_chain[i],
1929                                     rxr->rx_bd_chain_map[i]);
1930                         }
1931                 }
1932                 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1933         }
1934
1935         /* Destroy the RX mbuf DMA stuffs. */
1936         if (rxr->rx_mbuf_tag != NULL) {
1937                 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1938                         /* Must have been unloaded in bce_stop() */
1939                         KKASSERT(rxr->rx_mbuf_ptr[i] == NULL);
1940                         bus_dmamap_destroy(rxr->rx_mbuf_tag,
1941                             rxr->rx_mbuf_map[i]);
1942                 }
1943                 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1944                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
1945         }
1946
1947         if (rxr->rx_bd_chain_map != NULL)
1948                 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
1949         if (rxr->rx_bd_chain != NULL)
1950                 kfree(rxr->rx_bd_chain, M_DEVBUF);
1951         if (rxr->rx_bd_chain_paddr != NULL)
1952                 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
1953
1954         if (rxr->rx_mbuf_map != NULL)
1955                 kfree(rxr->rx_mbuf_map, M_DEVBUF);
1956         if (rxr->rx_mbuf_ptr != NULL)
1957                 kfree(rxr->rx_mbuf_ptr, M_DEVBUF);
1958         if (rxr->rx_mbuf_paddr != NULL)
1959                 kfree(rxr->rx_mbuf_paddr, M_DEVBUF);
1960 }
1961
1962
1963 /****************************************************************************/
1964 /* Free any DMA memory owned by the driver.                                 */
1965 /*                                                                          */
1966 /* Scans through each data structre that requires DMA memory and frees      */
1967 /* the memory if allocated.                                                 */
1968 /*                                                                          */
1969 /* Returns:                                                                 */
1970 /*   Nothing.                                                               */
1971 /****************************************************************************/
1972 static void
1973 bce_dma_free(struct bce_softc *sc)
1974 {
1975         int i;
1976
1977         /* Destroy the status block. */
1978         if (sc->status_tag != NULL) {
1979                 if (sc->status_block != NULL) {
1980                         bus_dmamap_unload(sc->status_tag, sc->status_map);
1981                         bus_dmamem_free(sc->status_tag, sc->status_block,
1982                                         sc->status_map);
1983                 }
1984                 bus_dma_tag_destroy(sc->status_tag);
1985         }
1986
1987         /* Destroy the statistics block. */
1988         if (sc->stats_tag != NULL) {
1989                 if (sc->stats_block != NULL) {
1990                         bus_dmamap_unload(sc->stats_tag, sc->stats_map);
1991                         bus_dmamem_free(sc->stats_tag, sc->stats_block,
1992                                         sc->stats_map);
1993                 }
1994                 bus_dma_tag_destroy(sc->stats_tag);
1995         }
1996
1997         /* Destroy the CTX DMA stuffs. */
1998         if (sc->ctx_tag != NULL) {
1999                 for (i = 0; i < sc->ctx_pages; i++) {
2000                         if (sc->ctx_block[i] != NULL) {
2001                                 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2002                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2003                                                 sc->ctx_map[i]);
2004                         }
2005                 }
2006                 bus_dma_tag_destroy(sc->ctx_tag);
2007         }
2008
2009         /* Free TX rings */
2010         if (sc->tx_rings != NULL) {
2011                 for (i = 0; i < sc->ring_cnt; ++i)
2012                         bce_destroy_tx_ring(&sc->tx_rings[i]);
2013                 kfree(sc->tx_rings, M_DEVBUF);
2014         }
2015
2016         /* Free RX rings */
2017         if (sc->rx_rings != NULL) {
2018                 for (i = 0; i < sc->ring_cnt; ++i)
2019                         bce_destroy_rx_ring(&sc->rx_rings[i]);
2020                 kfree(sc->rx_rings, M_DEVBUF);
2021         }
2022
2023         /* Destroy the parent tag */
2024         if (sc->parent_tag != NULL)
2025                 bus_dma_tag_destroy(sc->parent_tag);
2026 }
2027
2028
2029 /****************************************************************************/
2030 /* Get DMA memory from the OS.                                              */
2031 /*                                                                          */
2032 /* Validates that the OS has provided DMA buffers in response to a          */
2033 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2034 /* When the callback is used the OS will return 0 for the mapping function  */
2035 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2036 /* failures back to the caller.                                             */
2037 /*                                                                          */
2038 /* Returns:                                                                 */
2039 /*   Nothing.                                                               */
2040 /****************************************************************************/
2041 static void
2042 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2043 {
2044         bus_addr_t *busaddr = arg;
2045
2046         /* Check for an error and signal the caller that an error occurred. */
2047         if (error)
2048                 return;
2049
2050         KASSERT(nseg == 1, ("only one segment is allowed"));
2051         *busaddr = segs->ds_addr;
2052 }
2053
2054
2055 static int
2056 bce_create_tx_ring(struct bce_tx_ring *txr)
2057 {
2058         int pages, rc, i;
2059
2060         txr->tx_wreg = bce_tx_wreg;
2061
2062         pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2063         if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2064                 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2065                 pages = TX_PAGES_DEFAULT;
2066         }
2067         txr->tx_pages = pages;
2068
2069         txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2070             M_DEVBUF, M_WAITOK | M_ZERO);
2071         txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2072             M_DEVBUF, M_WAITOK | M_ZERO);
2073         txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2074             M_DEVBUF, M_WAITOK | M_ZERO);
2075
2076         txr->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(txr),
2077             M_DEVBUF, M_WAITOK | M_ZERO);
2078         txr->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(txr),
2079             M_DEVBUF, M_WAITOK | M_ZERO);
2080
2081         /*
2082          * Create a DMA tag for the TX buffer descriptor chain,
2083          * allocate and clear the  memory, and fetch the
2084          * physical address of the block.
2085          */
2086         rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2087             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2088             BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2089             0, &txr->tx_bd_chain_tag);
2090         if (rc != 0) {
2091                 device_printf(txr->sc->bce_dev, "Could not allocate "
2092                     "TX descriptor chain DMA tag!\n");
2093                 return rc;
2094         }
2095
2096         for (i = 0; i < txr->tx_pages; i++) {
2097                 bus_addr_t busaddr;
2098
2099                 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2100                     (void **)&txr->tx_bd_chain[i],
2101                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2102                     &txr->tx_bd_chain_map[i]);
2103                 if (rc != 0) {
2104                         device_printf(txr->sc->bce_dev,
2105                             "Could not allocate %dth TX descriptor "
2106                             "chain DMA memory!\n", i);
2107                         return rc;
2108                 }
2109
2110                 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2111                     txr->tx_bd_chain_map[i],
2112                     txr->tx_bd_chain[i],
2113                     BCE_TX_CHAIN_PAGE_SZ,
2114                     bce_dma_map_addr, &busaddr,
2115                     BUS_DMA_WAITOK);
2116                 if (rc != 0) {
2117                         if (rc == EINPROGRESS) {
2118                                 panic("%s coherent memory loading "
2119                                     "is still in progress!",
2120                                     txr->sc->arpcom.ac_if.if_xname);
2121                         }
2122                         device_printf(txr->sc->bce_dev, "Could not map %dth "
2123                             "TX descriptor chain DMA memory!\n", i);
2124                         bus_dmamem_free(txr->tx_bd_chain_tag,
2125                             txr->tx_bd_chain[i],
2126                             txr->tx_bd_chain_map[i]);
2127                         txr->tx_bd_chain[i] = NULL;
2128                         return rc;
2129                 }
2130
2131                 txr->tx_bd_chain_paddr[i] = busaddr;
2132         }
2133
2134         /* Create a DMA tag for TX mbufs. */
2135         rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2136             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2137             IP_MAXPACKET + sizeof(struct ether_vlan_header),
2138             BCE_MAX_SEGMENTS, PAGE_SIZE,
2139             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2140             &txr->tx_mbuf_tag);
2141         if (rc != 0) {
2142                 device_printf(txr->sc->bce_dev,
2143                     "Could not allocate TX mbuf DMA tag!\n");
2144                 return rc;
2145         }
2146
2147         /* Create DMA maps for the TX mbufs clusters. */
2148         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2149                 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2150                     BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2151                     &txr->tx_mbuf_map[i]);
2152                 if (rc != 0) {
2153                         int j;
2154
2155                         for (j = 0; j < i; ++j) {
2156                                 bus_dmamap_destroy(txr->tx_mbuf_tag,
2157                                     txr->tx_mbuf_map[i]);
2158                         }
2159                         bus_dma_tag_destroy(txr->tx_mbuf_tag);
2160                         txr->tx_mbuf_tag = NULL;
2161
2162                         device_printf(txr->sc->bce_dev, "Unable to create "
2163                             "%dth TX mbuf DMA map!\n", i);
2164                         return rc;
2165                 }
2166         }
2167         return 0;
2168 }
2169
2170
2171 static int
2172 bce_create_rx_ring(struct bce_rx_ring *rxr)
2173 {
2174         int pages, rc, i;
2175
2176         pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2177         if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2178                 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2179                 pages = RX_PAGES_DEFAULT;
2180         }
2181         rxr->rx_pages = pages;
2182
2183         rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2184             M_DEVBUF, M_WAITOK | M_ZERO);
2185         rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2186             M_DEVBUF, M_WAITOK | M_ZERO);
2187         rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2188             M_DEVBUF, M_WAITOK | M_ZERO);
2189
2190         rxr->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(rxr),
2191             M_DEVBUF, M_WAITOK | M_ZERO);
2192         rxr->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(rxr),
2193             M_DEVBUF, M_WAITOK | M_ZERO);
2194         rxr->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(rxr),
2195             M_DEVBUF, M_WAITOK | M_ZERO);
2196
2197         /*
2198          * Create a DMA tag for the RX buffer descriptor chain,
2199          * allocate and clear the  memory, and fetch the physical
2200          * address of the blocks.
2201          */
2202         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2203             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2204             BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2205             0, &rxr->rx_bd_chain_tag);
2206         if (rc != 0) {
2207                 device_printf(rxr->sc->bce_dev, "Could not allocate "
2208                     "RX descriptor chain DMA tag!\n");
2209                 return rc;
2210         }
2211
2212         for (i = 0; i < rxr->rx_pages; i++) {
2213                 bus_addr_t busaddr;
2214
2215                 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2216                     (void **)&rxr->rx_bd_chain[i],
2217                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2218                     &rxr->rx_bd_chain_map[i]);
2219                 if (rc != 0) {
2220                         device_printf(rxr->sc->bce_dev,
2221                             "Could not allocate %dth RX descriptor "
2222                             "chain DMA memory!\n", i);
2223                         return rc;
2224                 }
2225
2226                 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2227                     rxr->rx_bd_chain_map[i],
2228                     rxr->rx_bd_chain[i],
2229                     BCE_RX_CHAIN_PAGE_SZ,
2230                     bce_dma_map_addr, &busaddr,
2231                     BUS_DMA_WAITOK);
2232                 if (rc != 0) {
2233                         if (rc == EINPROGRESS) {
2234                                 panic("%s coherent memory loading "
2235                                     "is still in progress!",
2236                                     rxr->sc->arpcom.ac_if.if_xname);
2237                         }
2238                         device_printf(rxr->sc->bce_dev,
2239                             "Could not map %dth RX descriptor "
2240                             "chain DMA memory!\n", i);
2241                         bus_dmamem_free(rxr->rx_bd_chain_tag,
2242                             rxr->rx_bd_chain[i],
2243                             rxr->rx_bd_chain_map[i]);
2244                         rxr->rx_bd_chain[i] = NULL;
2245                         return rc;
2246                 }
2247
2248                 rxr->rx_bd_chain_paddr[i] = busaddr;
2249         }
2250
2251         /* Create a DMA tag for RX mbufs. */
2252         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2253             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2254             MCLBYTES, 1, MCLBYTES,
2255             BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2256             &rxr->rx_mbuf_tag);
2257         if (rc != 0) {
2258                 device_printf(rxr->sc->bce_dev,
2259                     "Could not allocate RX mbuf DMA tag!\n");
2260                 return rc;
2261         }
2262
2263         /* Create tmp DMA map for RX mbuf clusters. */
2264         rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2265             &rxr->rx_mbuf_tmpmap);
2266         if (rc != 0) {
2267                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2268                 rxr->rx_mbuf_tag = NULL;
2269
2270                 device_printf(rxr->sc->bce_dev,
2271                     "Could not create RX mbuf tmp DMA map!\n");
2272                 return rc;
2273         }
2274
2275         /* Create DMA maps for the RX mbuf clusters. */
2276         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2277                 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2278                     &rxr->rx_mbuf_map[i]);
2279                 if (rc != 0) {
2280                         int j;
2281
2282                         for (j = 0; j < i; ++j) {
2283                                 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2284                                     rxr->rx_mbuf_map[j]);
2285                         }
2286                         bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2287                         rxr->rx_mbuf_tag = NULL;
2288
2289                         device_printf(rxr->sc->bce_dev, "Unable to create "
2290                             "%dth RX mbuf DMA map!\n", i);
2291                         return rc;
2292                 }
2293         }
2294         return 0;
2295 }
2296
2297
2298 /****************************************************************************/
2299 /* Allocate any DMA memory needed by the driver.                            */
2300 /*                                                                          */
2301 /* Allocates DMA memory needed for the various global structures needed by  */
2302 /* hardware.                                                                */
2303 /*                                                                          */
2304 /* Memory alignment requirements:                                           */
2305 /* -----------------+----------+----------+----------+----------+           */
2306 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2307 /* -----------------+----------+----------+----------+----------+           */
2308 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2309 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2310 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2311 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2312 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2313 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2314 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2315 /* -----------------+----------+----------+----------+----------+           */
2316 /*                                                                          */
2317 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2318 /*                                                                          */
2319 /* Returns:                                                                 */
2320 /*   0 for success, positive value for failure.                             */
2321 /****************************************************************************/
2322 static int
2323 bce_dma_alloc(struct bce_softc *sc)
2324 {
2325         struct ifnet *ifp = &sc->arpcom.ac_if;
2326         int i, rc = 0;
2327         bus_addr_t busaddr, max_busaddr;
2328         bus_size_t status_align, stats_align;
2329
2330         /*
2331          * The embedded PCIe to PCI-X bridge (EPB) 
2332          * in the 5708 cannot address memory above 
2333          * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 
2334          */
2335         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2336                 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2337         else
2338                 max_busaddr = BUS_SPACE_MAXADDR;
2339
2340         /*
2341          * BCM5709 and BCM5716 uses host memory as cache for context memory.
2342          */
2343         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2344             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2345                 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2346                 if (sc->ctx_pages == 0)
2347                         sc->ctx_pages = 1;
2348                 if (sc->ctx_pages > BCE_CTX_PAGES) {
2349                         device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2350                             sc->ctx_pages);
2351                         return ENOMEM;
2352                 }
2353                 status_align = 16;
2354                 stats_align = 16;
2355         } else {
2356                 status_align = 8;
2357                 stats_align = 8;
2358         }
2359
2360         /*
2361          * Allocate the parent bus DMA tag appropriate for PCI.
2362          */
2363         rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2364                                 max_busaddr, BUS_SPACE_MAXADDR,
2365                                 NULL, NULL,
2366                                 BUS_SPACE_MAXSIZE_32BIT, 0,
2367                                 BUS_SPACE_MAXSIZE_32BIT,
2368                                 0, &sc->parent_tag);
2369         if (rc != 0) {
2370                 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2371                 return rc;
2372         }
2373
2374         /*
2375          * Allocate status block.
2376          */
2377         sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2378                                 status_align, BCE_STATUS_BLK_SZ,
2379                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2380                                 &sc->status_tag, &sc->status_map,
2381                                 &sc->status_block_paddr);
2382         if (sc->status_block == NULL) {
2383                 if_printf(ifp, "Could not allocate status block!\n");
2384                 return ENOMEM;
2385         }
2386
2387         /*
2388          * Allocate statistics block.
2389          */
2390         sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2391                                 stats_align, BCE_STATS_BLK_SZ,
2392                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2393                                 &sc->stats_tag, &sc->stats_map,
2394                                 &sc->stats_block_paddr);
2395         if (sc->stats_block == NULL) {
2396                 if_printf(ifp, "Could not allocate statistics block!\n");
2397                 return ENOMEM;
2398         }
2399
2400         /*
2401          * Allocate context block, if needed
2402          */
2403         if (sc->ctx_pages != 0) {
2404                 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2405                                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2406                                         NULL, NULL,
2407                                         BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2408                                         0, &sc->ctx_tag);
2409                 if (rc != 0) {
2410                         if_printf(ifp, "Could not allocate "
2411                                   "context block DMA tag!\n");
2412                         return rc;
2413                 }
2414
2415                 for (i = 0; i < sc->ctx_pages; i++) {
2416                         rc = bus_dmamem_alloc(sc->ctx_tag,
2417                                               (void **)&sc->ctx_block[i],
2418                                               BUS_DMA_WAITOK | BUS_DMA_ZERO |
2419                                               BUS_DMA_COHERENT,
2420                                               &sc->ctx_map[i]);
2421                         if (rc != 0) {
2422                                 if_printf(ifp, "Could not allocate %dth context "
2423                                           "DMA memory!\n", i);
2424                                 return rc;
2425                         }
2426
2427                         rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2428                                              sc->ctx_block[i], BCM_PAGE_SIZE,
2429                                              bce_dma_map_addr, &busaddr,
2430                                              BUS_DMA_WAITOK);
2431                         if (rc != 0) {
2432                                 if (rc == EINPROGRESS) {
2433                                         panic("%s coherent memory loading "
2434                                               "is still in progress!", ifp->if_xname);
2435                                 }
2436                                 if_printf(ifp, "Could not map %dth context "
2437                                           "DMA memory!\n", i);
2438                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2439                                                 sc->ctx_map[i]);
2440                                 sc->ctx_block[i] = NULL;
2441                                 return rc;
2442                         }
2443                         sc->ctx_paddr[i] = busaddr;
2444                 }
2445         }
2446
2447         sc->tx_rings = kmalloc_cachealign(
2448             sizeof(struct bce_tx_ring) * sc->ring_cnt, M_DEVBUF,
2449             M_WAITOK | M_ZERO);
2450         for (i = 0; i < sc->ring_cnt; ++i) {
2451                 sc->tx_rings[i].sc = sc;
2452
2453                 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2454                 if (rc != 0) {
2455                         device_printf(sc->bce_dev,
2456                             "can't create %dth tx ring\n", i);
2457                         return rc;
2458                 }
2459         }
2460
2461         sc->rx_rings = kmalloc_cachealign(
2462             sizeof(struct bce_rx_ring) * sc->ring_cnt, M_DEVBUF,
2463             M_WAITOK | M_ZERO);
2464         for (i = 0; i < sc->ring_cnt; ++i) {
2465                 sc->rx_rings[i].sc = sc;
2466
2467                 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2468                 if (rc != 0) {
2469                         device_printf(sc->bce_dev,
2470                             "can't create %dth rx ring\n", i);
2471                         return rc;
2472                 }
2473         }
2474
2475         return 0;
2476 }
2477
2478
2479 /****************************************************************************/
2480 /* Firmware synchronization.                                                */
2481 /*                                                                          */
2482 /* Before performing certain events such as a chip reset, synchronize with  */
2483 /* the firmware first.                                                      */
2484 /*                                                                          */
2485 /* Returns:                                                                 */
2486 /*   0 for success, positive value for failure.                             */
2487 /****************************************************************************/
2488 static int
2489 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2490 {
2491         int i, rc = 0;
2492         uint32_t val;
2493
2494         /* Don't waste any time if we've timed out before. */
2495         if (sc->bce_fw_timed_out)
2496                 return EBUSY;
2497
2498         /* Increment the message sequence number. */
2499         sc->bce_fw_wr_seq++;
2500         msg_data |= sc->bce_fw_wr_seq;
2501
2502         /* Send the message to the bootcode driver mailbox. */
2503         bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2504
2505         /* Wait for the bootcode to acknowledge the message. */
2506         for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2507                 /* Check for a response in the bootcode firmware mailbox. */
2508                 val = bce_shmem_rd(sc, BCE_FW_MB);
2509                 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2510                         break;
2511                 DELAY(1000);
2512         }
2513
2514         /* If we've timed out, tell the bootcode that we've stopped waiting. */
2515         if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2516             (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2517                 if_printf(&sc->arpcom.ac_if,
2518                           "Firmware synchronization timeout! "
2519                           "msg_data = 0x%08X\n", msg_data);
2520
2521                 msg_data &= ~BCE_DRV_MSG_CODE;
2522                 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2523
2524                 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2525
2526                 sc->bce_fw_timed_out = 1;
2527                 rc = EBUSY;
2528         }
2529         return rc;
2530 }
2531
2532
2533 /****************************************************************************/
2534 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2535 /*                                                                          */
2536 /* Returns:                                                                 */
2537 /*   Nothing.                                                               */
2538 /****************************************************************************/
2539 static void
2540 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2541                  uint32_t rv2p_code_len, uint32_t rv2p_proc)
2542 {
2543         int i;
2544         uint32_t val;
2545
2546         for (i = 0; i < rv2p_code_len; i += 8) {
2547                 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2548                 rv2p_code++;
2549                 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2550                 rv2p_code++;
2551
2552                 if (rv2p_proc == RV2P_PROC1) {
2553                         val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2554                         REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2555                 } else {
2556                         val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2557                         REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2558                 }
2559         }
2560
2561         /* Reset the processor, un-stall is done later. */
2562         if (rv2p_proc == RV2P_PROC1)
2563                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2564         else
2565                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2566 }
2567
2568
2569 /****************************************************************************/
2570 /* Load RISC processor firmware.                                            */
2571 /*                                                                          */
2572 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2573 /* associated with a particular processor.                                  */
2574 /*                                                                          */
2575 /* Returns:                                                                 */
2576 /*   Nothing.                                                               */
2577 /****************************************************************************/
2578 static void
2579 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2580                 struct fw_info *fw)
2581 {
2582         uint32_t offset;
2583         int j;
2584
2585         bce_halt_cpu(sc, cpu_reg);
2586
2587         /* Load the Text area. */
2588         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2589         if (fw->text) {
2590                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2591                         REG_WR_IND(sc, offset, fw->text[j]);
2592         }
2593
2594         /* Load the Data area. */
2595         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2596         if (fw->data) {
2597                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2598                         REG_WR_IND(sc, offset, fw->data[j]);
2599         }
2600
2601         /* Load the SBSS area. */
2602         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2603         if (fw->sbss) {
2604                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2605                         REG_WR_IND(sc, offset, fw->sbss[j]);
2606         }
2607
2608         /* Load the BSS area. */
2609         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2610         if (fw->bss) {
2611                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2612                         REG_WR_IND(sc, offset, fw->bss[j]);
2613         }
2614
2615         /* Load the Read-Only area. */
2616         offset = cpu_reg->spad_base +
2617                 (fw->rodata_addr - cpu_reg->mips_view_base);
2618         if (fw->rodata) {
2619                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2620                         REG_WR_IND(sc, offset, fw->rodata[j]);
2621         }
2622
2623         /* Clear the pre-fetch instruction and set the FW start address. */
2624         REG_WR_IND(sc, cpu_reg->inst, 0);
2625         REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2626 }
2627
2628
2629 /****************************************************************************/
2630 /* Starts the RISC processor.                                               */
2631 /*                                                                          */
2632 /* Assumes the CPU starting address has already been set.                   */
2633 /*                                                                          */
2634 /* Returns:                                                                 */
2635 /*   Nothing.                                                               */
2636 /****************************************************************************/
2637 static void
2638 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2639 {
2640         uint32_t val;
2641
2642         /* Start the CPU. */
2643         val = REG_RD_IND(sc, cpu_reg->mode);
2644         val &= ~cpu_reg->mode_value_halt;
2645         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2646         REG_WR_IND(sc, cpu_reg->mode, val);
2647 }
2648
2649
2650 /****************************************************************************/
2651 /* Halts the RISC processor.                                                */
2652 /*                                                                          */
2653 /* Returns:                                                                 */
2654 /*   Nothing.                                                               */
2655 /****************************************************************************/
2656 static void
2657 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2658 {
2659         uint32_t val;
2660
2661         /* Halt the CPU. */
2662         val = REG_RD_IND(sc, cpu_reg->mode);
2663         val |= cpu_reg->mode_value_halt;
2664         REG_WR_IND(sc, cpu_reg->mode, val);
2665         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2666 }
2667
2668
2669 /****************************************************************************/
2670 /* Start the RX CPU.                                                        */
2671 /*                                                                          */
2672 /* Returns:                                                                 */
2673 /*   Nothing.                                                               */
2674 /****************************************************************************/
2675 static void
2676 bce_start_rxp_cpu(struct bce_softc *sc)
2677 {
2678         struct cpu_reg cpu_reg;
2679
2680         cpu_reg.mode = BCE_RXP_CPU_MODE;
2681         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2682         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2683         cpu_reg.state = BCE_RXP_CPU_STATE;
2684         cpu_reg.state_value_clear = 0xffffff;
2685         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2686         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2687         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2688         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2689         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2690         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2691         cpu_reg.mips_view_base = 0x8000000;
2692
2693         bce_start_cpu(sc, &cpu_reg);
2694 }
2695
2696
2697 /****************************************************************************/
2698 /* Initialize the RX CPU.                                                   */
2699 /*                                                                          */
2700 /* Returns:                                                                 */
2701 /*   Nothing.                                                               */
2702 /****************************************************************************/
2703 static void
2704 bce_init_rxp_cpu(struct bce_softc *sc)
2705 {
2706         struct cpu_reg cpu_reg;
2707         struct fw_info fw;
2708
2709         cpu_reg.mode = BCE_RXP_CPU_MODE;
2710         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2711         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2712         cpu_reg.state = BCE_RXP_CPU_STATE;
2713         cpu_reg.state_value_clear = 0xffffff;
2714         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2715         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2716         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2717         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2718         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2719         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2720         cpu_reg.mips_view_base = 0x8000000;
2721
2722         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2723             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2724                 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2725                 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2726                 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2727                 fw.start_addr = bce_RXP_b09FwStartAddr;
2728
2729                 fw.text_addr = bce_RXP_b09FwTextAddr;
2730                 fw.text_len = bce_RXP_b09FwTextLen;
2731                 fw.text_index = 0;
2732                 fw.text = bce_RXP_b09FwText;
2733
2734                 fw.data_addr = bce_RXP_b09FwDataAddr;
2735                 fw.data_len = bce_RXP_b09FwDataLen;
2736                 fw.data_index = 0;
2737                 fw.data = bce_RXP_b09FwData;
2738
2739                 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2740                 fw.sbss_len = bce_RXP_b09FwSbssLen;
2741                 fw.sbss_index = 0;
2742                 fw.sbss = bce_RXP_b09FwSbss;
2743
2744                 fw.bss_addr = bce_RXP_b09FwBssAddr;
2745                 fw.bss_len = bce_RXP_b09FwBssLen;
2746                 fw.bss_index = 0;
2747                 fw.bss = bce_RXP_b09FwBss;
2748
2749                 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2750                 fw.rodata_len = bce_RXP_b09FwRodataLen;
2751                 fw.rodata_index = 0;
2752                 fw.rodata = bce_RXP_b09FwRodata;
2753         } else {
2754                 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2755                 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2756                 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2757                 fw.start_addr = bce_RXP_b06FwStartAddr;
2758
2759                 fw.text_addr = bce_RXP_b06FwTextAddr;
2760                 fw.text_len = bce_RXP_b06FwTextLen;
2761                 fw.text_index = 0;
2762                 fw.text = bce_RXP_b06FwText;
2763
2764                 fw.data_addr = bce_RXP_b06FwDataAddr;
2765                 fw.data_len = bce_RXP_b06FwDataLen;
2766                 fw.data_index = 0;
2767                 fw.data = bce_RXP_b06FwData;
2768
2769                 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2770                 fw.sbss_len = bce_RXP_b06FwSbssLen;
2771                 fw.sbss_index = 0;
2772                 fw.sbss = bce_RXP_b06FwSbss;
2773
2774                 fw.bss_addr = bce_RXP_b06FwBssAddr;
2775                 fw.bss_len = bce_RXP_b06FwBssLen;
2776                 fw.bss_index = 0;
2777                 fw.bss = bce_RXP_b06FwBss;
2778
2779                 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2780                 fw.rodata_len = bce_RXP_b06FwRodataLen;
2781                 fw.rodata_index = 0;
2782                 fw.rodata = bce_RXP_b06FwRodata;
2783         }
2784
2785         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2786         /* Delay RXP start until initialization is complete. */
2787 }
2788
2789
2790 /****************************************************************************/
2791 /* Initialize the TX CPU.                                                   */
2792 /*                                                                          */
2793 /* Returns:                                                                 */
2794 /*   Nothing.                                                               */
2795 /****************************************************************************/
2796 static void
2797 bce_init_txp_cpu(struct bce_softc *sc)
2798 {
2799         struct cpu_reg cpu_reg;
2800         struct fw_info fw;
2801
2802         cpu_reg.mode = BCE_TXP_CPU_MODE;
2803         cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2804         cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2805         cpu_reg.state = BCE_TXP_CPU_STATE;
2806         cpu_reg.state_value_clear = 0xffffff;
2807         cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2808         cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2809         cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2810         cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2811         cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2812         cpu_reg.spad_base = BCE_TXP_SCRATCH;
2813         cpu_reg.mips_view_base = 0x8000000;
2814
2815         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2816             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2817                 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2818                 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2819                 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2820                 fw.start_addr = bce_TXP_b09FwStartAddr;
2821
2822                 fw.text_addr = bce_TXP_b09FwTextAddr;
2823                 fw.text_len = bce_TXP_b09FwTextLen;
2824                 fw.text_index = 0;
2825                 fw.text = bce_TXP_b09FwText;
2826
2827                 fw.data_addr = bce_TXP_b09FwDataAddr;
2828                 fw.data_len = bce_TXP_b09FwDataLen;
2829                 fw.data_index = 0;
2830                 fw.data = bce_TXP_b09FwData;
2831
2832                 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2833                 fw.sbss_len = bce_TXP_b09FwSbssLen;
2834                 fw.sbss_index = 0;
2835                 fw.sbss = bce_TXP_b09FwSbss;
2836
2837                 fw.bss_addr = bce_TXP_b09FwBssAddr;
2838                 fw.bss_len = bce_TXP_b09FwBssLen;
2839                 fw.bss_index = 0;
2840                 fw.bss = bce_TXP_b09FwBss;
2841
2842                 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2843                 fw.rodata_len = bce_TXP_b09FwRodataLen;
2844                 fw.rodata_index = 0;
2845                 fw.rodata = bce_TXP_b09FwRodata;
2846         } else {
2847                 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2848                 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2849                 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2850                 fw.start_addr = bce_TXP_b06FwStartAddr;
2851
2852                 fw.text_addr = bce_TXP_b06FwTextAddr;
2853                 fw.text_len = bce_TXP_b06FwTextLen;
2854                 fw.text_index = 0;
2855                 fw.text = bce_TXP_b06FwText;
2856
2857                 fw.data_addr = bce_TXP_b06FwDataAddr;
2858                 fw.data_len = bce_TXP_b06FwDataLen;
2859                 fw.data_index = 0;
2860                 fw.data = bce_TXP_b06FwData;
2861
2862                 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2863                 fw.sbss_len = bce_TXP_b06FwSbssLen;
2864                 fw.sbss_index = 0;
2865                 fw.sbss = bce_TXP_b06FwSbss;
2866
2867                 fw.bss_addr = bce_TXP_b06FwBssAddr;
2868                 fw.bss_len = bce_TXP_b06FwBssLen;
2869                 fw.bss_index = 0;
2870                 fw.bss = bce_TXP_b06FwBss;
2871
2872                 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2873                 fw.rodata_len = bce_TXP_b06FwRodataLen;
2874                 fw.rodata_index = 0;
2875                 fw.rodata = bce_TXP_b06FwRodata;
2876         }
2877
2878         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2879         bce_start_cpu(sc, &cpu_reg);
2880 }
2881
2882
2883 /****************************************************************************/
2884 /* Initialize the TPAT CPU.                                                 */
2885 /*                                                                          */
2886 /* Returns:                                                                 */
2887 /*   Nothing.                                                               */
2888 /****************************************************************************/
2889 static void
2890 bce_init_tpat_cpu(struct bce_softc *sc)
2891 {
2892         struct cpu_reg cpu_reg;
2893         struct fw_info fw;
2894
2895         cpu_reg.mode = BCE_TPAT_CPU_MODE;
2896         cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2897         cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2898         cpu_reg.state = BCE_TPAT_CPU_STATE;
2899         cpu_reg.state_value_clear = 0xffffff;
2900         cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2901         cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2902         cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2903         cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2904         cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2905         cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2906         cpu_reg.mips_view_base = 0x8000000;
2907
2908         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2909             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2910                 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2911                 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2912                 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2913                 fw.start_addr = bce_TPAT_b09FwStartAddr;
2914
2915                 fw.text_addr = bce_TPAT_b09FwTextAddr;
2916                 fw.text_len = bce_TPAT_b09FwTextLen;
2917                 fw.text_index = 0;
2918                 fw.text = bce_TPAT_b09FwText;
2919
2920                 fw.data_addr = bce_TPAT_b09FwDataAddr;
2921                 fw.data_len = bce_TPAT_b09FwDataLen;
2922                 fw.data_index = 0;
2923                 fw.data = bce_TPAT_b09FwData;
2924
2925                 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
2926                 fw.sbss_len = bce_TPAT_b09FwSbssLen;
2927                 fw.sbss_index = 0;
2928                 fw.sbss = bce_TPAT_b09FwSbss;
2929
2930                 fw.bss_addr = bce_TPAT_b09FwBssAddr;
2931                 fw.bss_len = bce_TPAT_b09FwBssLen;
2932                 fw.bss_index = 0;
2933                 fw.bss = bce_TPAT_b09FwBss;
2934
2935                 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
2936                 fw.rodata_len = bce_TPAT_b09FwRodataLen;
2937                 fw.rodata_index = 0;
2938                 fw.rodata = bce_TPAT_b09FwRodata;
2939         } else {
2940                 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2941                 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2942                 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2943                 fw.start_addr = bce_TPAT_b06FwStartAddr;
2944
2945                 fw.text_addr = bce_TPAT_b06FwTextAddr;
2946                 fw.text_len = bce_TPAT_b06FwTextLen;
2947                 fw.text_index = 0;
2948                 fw.text = bce_TPAT_b06FwText;
2949
2950                 fw.data_addr = bce_TPAT_b06FwDataAddr;
2951                 fw.data_len = bce_TPAT_b06FwDataLen;
2952                 fw.data_index = 0;
2953                 fw.data = bce_TPAT_b06FwData;
2954
2955                 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
2956                 fw.sbss_len = bce_TPAT_b06FwSbssLen;
2957                 fw.sbss_index = 0;
2958                 fw.sbss = bce_TPAT_b06FwSbss;
2959
2960                 fw.bss_addr = bce_TPAT_b06FwBssAddr;
2961                 fw.bss_len = bce_TPAT_b06FwBssLen;
2962                 fw.bss_index = 0;
2963                 fw.bss = bce_TPAT_b06FwBss;
2964
2965                 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
2966                 fw.rodata_len = bce_TPAT_b06FwRodataLen;
2967                 fw.rodata_index = 0;
2968                 fw.rodata = bce_TPAT_b06FwRodata;
2969         }
2970
2971         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2972         bce_start_cpu(sc, &cpu_reg);
2973 }
2974
2975
2976 /****************************************************************************/
2977 /* Initialize the CP CPU.                                                   */
2978 /*                                                                          */
2979 /* Returns:                                                                 */
2980 /*   Nothing.                                                               */
2981 /****************************************************************************/
2982 static void
2983 bce_init_cp_cpu(struct bce_softc *sc)
2984 {
2985         struct cpu_reg cpu_reg;
2986         struct fw_info fw;
2987
2988         cpu_reg.mode = BCE_CP_CPU_MODE;
2989         cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
2990         cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
2991         cpu_reg.state = BCE_CP_CPU_STATE;
2992         cpu_reg.state_value_clear = 0xffffff;
2993         cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
2994         cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
2995         cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
2996         cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
2997         cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
2998         cpu_reg.spad_base = BCE_CP_SCRATCH;
2999         cpu_reg.mips_view_base = 0x8000000;
3000
3001         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3002             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3003                 fw.ver_major = bce_CP_b09FwReleaseMajor;
3004                 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3005                 fw.ver_fix = bce_CP_b09FwReleaseFix;
3006                 fw.start_addr = bce_CP_b09FwStartAddr;
3007
3008                 fw.text_addr = bce_CP_b09FwTextAddr;
3009                 fw.text_len = bce_CP_b09FwTextLen;
3010                 fw.text_index = 0;
3011                 fw.text = bce_CP_b09FwText;
3012
3013                 fw.data_addr = bce_CP_b09FwDataAddr;
3014                 fw.data_len = bce_CP_b09FwDataLen;
3015                 fw.data_index = 0;
3016                 fw.data = bce_CP_b09FwData;
3017
3018                 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3019                 fw.sbss_len = bce_CP_b09FwSbssLen;
3020                 fw.sbss_index = 0;
3021                 fw.sbss = bce_CP_b09FwSbss;
3022
3023                 fw.bss_addr = bce_CP_b09FwBssAddr;
3024                 fw.bss_len = bce_CP_b09FwBssLen;
3025                 fw.bss_index = 0;
3026                 fw.bss = bce_CP_b09FwBss;
3027
3028                 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3029                 fw.rodata_len = bce_CP_b09FwRodataLen;
3030                 fw.rodata_index = 0;
3031                 fw.rodata = bce_CP_b09FwRodata;
3032         } else {
3033                 fw.ver_major = bce_CP_b06FwReleaseMajor;
3034                 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3035                 fw.ver_fix = bce_CP_b06FwReleaseFix;
3036                 fw.start_addr = bce_CP_b06FwStartAddr;
3037
3038                 fw.text_addr = bce_CP_b06FwTextAddr;
3039                 fw.text_len = bce_CP_b06FwTextLen;
3040                 fw.text_index = 0;
3041                 fw.text = bce_CP_b06FwText;
3042
3043                 fw.data_addr = bce_CP_b06FwDataAddr;
3044                 fw.data_len = bce_CP_b06FwDataLen;
3045                 fw.data_index = 0;
3046                 fw.data = bce_CP_b06FwData;
3047
3048                 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3049                 fw.sbss_len = bce_CP_b06FwSbssLen;
3050                 fw.sbss_index = 0;
3051                 fw.sbss = bce_CP_b06FwSbss;
3052
3053                 fw.bss_addr = bce_CP_b06FwBssAddr;
3054                 fw.bss_len = bce_CP_b06FwBssLen;
3055                 fw.bss_index = 0;
3056                 fw.bss = bce_CP_b06FwBss;
3057
3058                 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3059                 fw.rodata_len = bce_CP_b06FwRodataLen;
3060                 fw.rodata_index = 0;
3061                 fw.rodata = bce_CP_b06FwRodata;
3062         }
3063
3064         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3065         bce_start_cpu(sc, &cpu_reg);
3066 }
3067
3068
3069 /****************************************************************************/
3070 /* Initialize the COM CPU.                                                 */
3071 /*                                                                          */
3072 /* Returns:                                                                 */
3073 /*   Nothing.                                                               */
3074 /****************************************************************************/
3075 static void
3076 bce_init_com_cpu(struct bce_softc *sc)
3077 {
3078         struct cpu_reg cpu_reg;
3079         struct fw_info fw;
3080
3081         cpu_reg.mode = BCE_COM_CPU_MODE;
3082         cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3083         cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3084         cpu_reg.state = BCE_COM_CPU_STATE;
3085         cpu_reg.state_value_clear = 0xffffff;
3086         cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3087         cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3088         cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3089         cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3090         cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3091         cpu_reg.spad_base = BCE_COM_SCRATCH;
3092         cpu_reg.mips_view_base = 0x8000000;
3093
3094         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3095             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3096                 fw.ver_major = bce_COM_b09FwReleaseMajor;
3097                 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3098                 fw.ver_fix = bce_COM_b09FwReleaseFix;
3099                 fw.start_addr = bce_COM_b09FwStartAddr;
3100
3101                 fw.text_addr = bce_COM_b09FwTextAddr;
3102                 fw.text_len = bce_COM_b09FwTextLen;
3103                 fw.text_index = 0;
3104                 fw.text = bce_COM_b09FwText;
3105
3106                 fw.data_addr = bce_COM_b09FwDataAddr;
3107                 fw.data_len = bce_COM_b09FwDataLen;
3108                 fw.data_index = 0;
3109                 fw.data = bce_COM_b09FwData;
3110
3111                 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3112                 fw.sbss_len = bce_COM_b09FwSbssLen;
3113                 fw.sbss_index = 0;
3114                 fw.sbss = bce_COM_b09FwSbss;
3115
3116                 fw.bss_addr = bce_COM_b09FwBssAddr;
3117                 fw.bss_len = bce_COM_b09FwBssLen;
3118                 fw.bss_index = 0;
3119                 fw.bss = bce_COM_b09FwBss;
3120
3121                 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3122                 fw.rodata_len = bce_COM_b09FwRodataLen;
3123                 fw.rodata_index = 0;
3124                 fw.rodata = bce_COM_b09FwRodata;
3125         } else {
3126                 fw.ver_major = bce_COM_b06FwReleaseMajor;
3127                 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3128                 fw.ver_fix = bce_COM_b06FwReleaseFix;
3129                 fw.start_addr = bce_COM_b06FwStartAddr;
3130
3131                 fw.text_addr = bce_COM_b06FwTextAddr;
3132                 fw.text_len = bce_COM_b06FwTextLen;
3133                 fw.text_index = 0;
3134                 fw.text = bce_COM_b06FwText;
3135
3136                 fw.data_addr = bce_COM_b06FwDataAddr;
3137                 fw.data_len = bce_COM_b06FwDataLen;
3138                 fw.data_index = 0;
3139                 fw.data = bce_COM_b06FwData;
3140
3141                 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3142                 fw.sbss_len = bce_COM_b06FwSbssLen;
3143                 fw.sbss_index = 0;
3144                 fw.sbss = bce_COM_b06FwSbss;
3145
3146                 fw.bss_addr = bce_COM_b06FwBssAddr;
3147                 fw.bss_len = bce_COM_b06FwBssLen;
3148                 fw.bss_index = 0;
3149                 fw.bss = bce_COM_b06FwBss;
3150
3151                 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3152                 fw.rodata_len = bce_COM_b06FwRodataLen;
3153                 fw.rodata_index = 0;
3154                 fw.rodata = bce_COM_b06FwRodata;
3155         }
3156
3157         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3158         bce_start_cpu(sc, &cpu_reg);
3159 }
3160
3161
3162 /****************************************************************************/
3163 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3164 /*                                                                          */
3165 /* Loads the firmware for each CPU and starts the CPU.                      */
3166 /*                                                                          */
3167 /* Returns:                                                                 */
3168 /*   Nothing.                                                               */
3169 /****************************************************************************/
3170 static void
3171 bce_init_cpus(struct bce_softc *sc)
3172 {
3173         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3174             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3175                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3176                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3177                             sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3178                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3179                             sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3180                 } else {
3181                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3182                             sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3183                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3184                             sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3185                 }
3186         } else {
3187                 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3188                     sizeof(bce_rv2p_proc1), RV2P_PROC1);
3189                 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3190                     sizeof(bce_rv2p_proc2), RV2P_PROC2);
3191         }
3192
3193         bce_init_rxp_cpu(sc);
3194         bce_init_txp_cpu(sc);
3195         bce_init_tpat_cpu(sc);
3196         bce_init_com_cpu(sc);
3197         bce_init_cp_cpu(sc);
3198 }
3199
3200
3201 /****************************************************************************/
3202 /* Initialize context memory.                                               */
3203 /*                                                                          */
3204 /* Clears the memory associated with each Context ID (CID).                 */
3205 /*                                                                          */
3206 /* Returns:                                                                 */
3207 /*   Nothing.                                                               */
3208 /****************************************************************************/
3209 static int
3210 bce_init_ctx(struct bce_softc *sc)
3211 {
3212         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3213             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3214                 /* DRC: Replace this constant value with a #define. */
3215                 int i, retry_cnt = 10;
3216                 uint32_t val;
3217
3218                 /*
3219                  * BCM5709 context memory may be cached
3220                  * in host memory so prepare the host memory
3221                  * for access.
3222                  */
3223                 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3224                     (1 << 12);
3225                 val |= (BCM_PAGE_BITS - 8) << 16;
3226                 REG_WR(sc, BCE_CTX_COMMAND, val);
3227
3228                 /* Wait for mem init command to complete. */
3229                 for (i = 0; i < retry_cnt; i++) {
3230                         val = REG_RD(sc, BCE_CTX_COMMAND);
3231                         if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3232                                 break;
3233                         DELAY(2);
3234                 }
3235                 if (i == retry_cnt) {
3236                         device_printf(sc->bce_dev,
3237                             "Context memory initialization failed!\n");
3238                         return ETIMEDOUT;
3239                 }
3240
3241                 for (i = 0; i < sc->ctx_pages; i++) {
3242                         int j;
3243
3244                         /*
3245                          * Set the physical address of the context
3246                          * memory cache.
3247                          */
3248                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3249                             BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3250                             BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3251                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3252                             BCE_ADDR_HI(sc->ctx_paddr[i]));
3253                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3254                             i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3255
3256                         /*
3257                          * Verify that the context memory write was successful.
3258                          */
3259                         for (j = 0; j < retry_cnt; j++) {
3260                                 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3261                                 if ((val &
3262                                     BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3263                                         break;
3264                                 DELAY(5);
3265                         }
3266                         if (j == retry_cnt) {
3267                                 device_printf(sc->bce_dev,
3268                                     "Failed to initialize context page!\n");
3269                                 return ETIMEDOUT;
3270                         }
3271                 }
3272         } else {
3273                 uint32_t vcid_addr, offset;
3274
3275                 /*
3276                  * For the 5706/5708, context memory is local to
3277                  * the controller, so initialize the controller
3278                  * context memory.
3279                  */
3280
3281                 vcid_addr = GET_CID_ADDR(96);
3282                 while (vcid_addr) {
3283                         vcid_addr -= PHY_CTX_SIZE;
3284
3285                         REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3286                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3287
3288                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3289                                 CTX_WR(sc, 0x00, offset, 0);
3290
3291                         REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3292                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3293                 }
3294         }
3295         return 0;
3296 }
3297
3298
3299 /****************************************************************************/
3300 /* Fetch the permanent MAC address of the controller.                       */
3301 /*                                                                          */
3302 /* Returns:                                                                 */
3303 /*   Nothing.                                                               */
3304 /****************************************************************************/
3305 static void
3306 bce_get_mac_addr(struct bce_softc *sc)
3307 {
3308         uint32_t mac_lo = 0, mac_hi = 0;
3309
3310         /*
3311          * The NetXtreme II bootcode populates various NIC
3312          * power-on and runtime configuration items in a
3313          * shared memory area.  The factory configured MAC
3314          * address is available from both NVRAM and the
3315          * shared memory area so we'll read the value from
3316          * shared memory for speed.
3317          */
3318
3319         mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3320         mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3321
3322         if (mac_lo == 0 && mac_hi == 0) {
3323                 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3324         } else {
3325                 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3326                 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3327                 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3328                 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3329                 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3330                 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3331         }
3332 }
3333
3334
3335 /****************************************************************************/
3336 /* Program the MAC address.                                                 */
3337 /*                                                                          */
3338 /* Returns:                                                                 */
3339 /*   Nothing.                                                               */
3340 /****************************************************************************/
3341 static void
3342 bce_set_mac_addr(struct bce_softc *sc)
3343 {
3344         const uint8_t *mac_addr = sc->eaddr;
3345         uint32_t val;
3346
3347         val = (mac_addr[0] << 8) | mac_addr[1];
3348         REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3349
3350         val = (mac_addr[2] << 24) |
3351               (mac_addr[3] << 16) |
3352               (mac_addr[4] << 8) |
3353               mac_addr[5];
3354         REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3355 }
3356
3357
3358 /****************************************************************************/
3359 /* Stop the controller.                                                     */
3360 /*                                                                          */
3361 /* Returns:                                                                 */
3362 /*   Nothing.                                                               */
3363 /****************************************************************************/
3364 static void
3365 bce_stop(struct bce_softc *sc)
3366 {
3367         struct ifnet *ifp = &sc->arpcom.ac_if;
3368         int i;
3369
3370         ASSERT_SERIALIZED(ifp->if_serializer);
3371
3372         callout_stop(&sc->bce_tick_callout);
3373
3374         /* Disable the transmit/receive blocks. */
3375         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3376         REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3377         DELAY(20);
3378
3379         bce_disable_intr(sc);
3380
3381         /* Free the RX lists. */
3382         for (i = 0; i < sc->ring_cnt; ++i)
3383                 bce_free_rx_chain(&sc->rx_rings[i]);
3384
3385         /* Free TX buffers. */
3386         for (i = 0; i < sc->ring_cnt; ++i)
3387                 bce_free_tx_chain(&sc->tx_rings[i]);
3388
3389         sc->bce_link = 0;
3390         sc->bce_coalchg_mask = 0;
3391
3392         ifp->if_flags &= ~IFF_RUNNING;
3393         ifq_clr_oactive(&ifp->if_snd);
3394         ifp->if_timer = 0;
3395 }
3396
3397
3398 static int
3399 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3400 {
3401         uint32_t val;
3402         int i, rc = 0;
3403
3404         /* Wait for pending PCI transactions to complete. */
3405         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3406                BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3407                BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3408                BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3409                BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3410         val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3411         DELAY(5);
3412
3413         /* Disable DMA */
3414         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3415             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3416                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3417                 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3418                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3419         }
3420
3421         /* Assume bootcode is running. */
3422         sc->bce_fw_timed_out = 0;
3423         sc->bce_drv_cardiac_arrest = 0;
3424
3425         /* Give the firmware a chance to prepare for the reset. */
3426         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3427         if (rc) {
3428                 if_printf(&sc->arpcom.ac_if,
3429                           "Firmware is not ready for reset\n");
3430                 return rc;
3431         }
3432
3433         /* Set a firmware reminder that this is a soft reset. */
3434         bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3435             BCE_DRV_RESET_SIGNATURE_MAGIC);
3436
3437         /* Dummy read to force the chip to complete all current transactions. */
3438         val = REG_RD(sc, BCE_MISC_ID);
3439
3440         /* Chip reset. */
3441         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3442             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3443                 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3444                 REG_RD(sc, BCE_MISC_COMMAND);
3445                 DELAY(5);
3446
3447                 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3448                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3449
3450                 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3451         } else {
3452                 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3453                     BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3454                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3455                 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3456
3457                 /* Allow up to 30us for reset to complete. */
3458                 for (i = 0; i < 10; i++) {
3459                         val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3460                         if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3461                             BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3462                                 break;
3463                         DELAY(10);
3464                 }
3465
3466                 /* Check that reset completed successfully. */
3467                 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3468                     BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3469                         if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3470                         return EBUSY;
3471                 }
3472         }
3473
3474         /* Make sure byte swapping is properly configured. */
3475         val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3476         if (val != 0x01020304) {
3477                 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3478                 return ENODEV;
3479         }
3480
3481         /* Just completed a reset, assume that firmware is running again. */
3482         sc->bce_fw_timed_out = 0;
3483         sc->bce_drv_cardiac_arrest = 0;
3484
3485         /* Wait for the firmware to finish its initialization. */
3486         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3487         if (rc) {
3488                 if_printf(&sc->arpcom.ac_if,
3489                           "Firmware did not complete initialization!\n");
3490         }
3491         return rc;
3492 }
3493
3494
3495 static int
3496 bce_chipinit(struct bce_softc *sc)
3497 {
3498         uint32_t val;
3499         int rc = 0;
3500
3501         /* Make sure the interrupt is not active. */
3502         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3503         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3504
3505         /*
3506          * Initialize DMA byte/word swapping, configure the number of DMA
3507          * channels and PCI clock compensation delay.
3508          */
3509         val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3510               BCE_DMA_CONFIG_DATA_WORD_SWAP |
3511 #if BYTE_ORDER == BIG_ENDIAN
3512               BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3513 #endif
3514               BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3515               DMA_READ_CHANS << 12 |
3516               DMA_WRITE_CHANS << 16;
3517
3518         val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3519
3520         if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3521                 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3522
3523         /*
3524          * This setting resolves a problem observed on certain Intel PCI
3525          * chipsets that cannot handle multiple outstanding DMA operations.
3526          * See errata E9_5706A1_65.
3527          */
3528         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3529             BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3530             !(sc->bce_flags & BCE_PCIX_FLAG))
3531                 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3532
3533         REG_WR(sc, BCE_DMA_CONFIG, val);
3534
3535         /* Enable the RX_V2P and Context state machines before access. */
3536         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3537                BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3538                BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3539                BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3540
3541         /* Initialize context mapping and zero out the quick contexts. */
3542         rc = bce_init_ctx(sc);
3543         if (rc != 0)
3544                 return rc;
3545
3546         /* Initialize the on-boards CPUs */
3547         bce_init_cpus(sc);
3548
3549         /* Enable management frames (NC-SI) to flow to the MCP. */
3550         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3551                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3552                     BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3553                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3554         }
3555
3556         /* Prepare NVRAM for access. */
3557         rc = bce_init_nvram(sc);
3558         if (rc != 0)
3559                 return rc;
3560
3561         /* Set the kernel bypass block size */
3562         val = REG_RD(sc, BCE_MQ_CONFIG);
3563         val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3564         val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3565
3566         /* Enable bins used on the 5709/5716. */
3567         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3568             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3569                 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3570                 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3571                         val |= BCE_MQ_CONFIG_HALT_DIS;
3572         }
3573
3574         REG_WR(sc, BCE_MQ_CONFIG, val);
3575
3576         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3577         REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3578         REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3579
3580         /* Set the page size and clear the RV2P processor stall bits. */
3581         val = (BCM_PAGE_BITS - 8) << 24;
3582         REG_WR(sc, BCE_RV2P_CONFIG, val);
3583
3584         /* Configure page size. */
3585         val = REG_RD(sc, BCE_TBDR_CONFIG);
3586         val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3587         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3588         REG_WR(sc, BCE_TBDR_CONFIG, val);
3589
3590         /* Set the perfect match control register to default. */
3591         REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3592
3593         return 0;
3594 }
3595
3596
3597 /****************************************************************************/
3598 /* Initialize the controller in preparation to send/receive traffic.        */
3599 /*                                                                          */
3600 /* Returns:                                                                 */
3601 /*   0 for success, positive value for failure.                             */
3602 /****************************************************************************/
3603 static int
3604 bce_blockinit(struct bce_softc *sc)
3605 {
3606         uint32_t reg, val;
3607
3608         /* Load the hardware default MAC address. */
3609         bce_set_mac_addr(sc);
3610
3611         /* Set the Ethernet backoff seed value */
3612         val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3613               sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3614         REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3615
3616         sc->last_status_idx = 0;
3617         sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3618
3619         /* Set up link change interrupt generation. */
3620         REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3621
3622         /* Program the physical address of the status block. */
3623         REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3624         REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3625
3626         /* Program the physical address of the statistics block. */
3627         REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3628                BCE_ADDR_LO(sc->stats_block_paddr));
3629         REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3630                BCE_ADDR_HI(sc->stats_block_paddr));
3631
3632         /* Program various host coalescing parameters. */
3633         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3634                (sc->bce_tx_quick_cons_trip_int << 16) |
3635                sc->bce_tx_quick_cons_trip);
3636         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3637                (sc->bce_rx_quick_cons_trip_int << 16) |
3638                sc->bce_rx_quick_cons_trip);
3639         REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3640                (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3641         REG_WR(sc, BCE_HC_TX_TICKS,
3642                (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3643         REG_WR(sc, BCE_HC_RX_TICKS,
3644                (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3645         REG_WR(sc, BCE_HC_COM_TICKS,
3646                (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3647         REG_WR(sc, BCE_HC_CMD_TICKS,
3648                (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3649         REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3650         REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);   /* 3ms */
3651
3652         val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3653         if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) {
3654                 if (bootverbose)
3655                         if_printf(&sc->arpcom.ac_if, "oneshot MSI\n");
3656                 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3657         }
3658         REG_WR(sc, BCE_HC_CONFIG, val);
3659
3660         /* Clear the internal statistics counters. */
3661         REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3662
3663         /* Verify that bootcode is running. */
3664         reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3665
3666         if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3667             BCE_DEV_INFO_SIGNATURE_MAGIC) {
3668                 if_printf(&sc->arpcom.ac_if,
3669                           "Bootcode not running! Found: 0x%08X, "
3670                           "Expected: 08%08X\n",
3671                           reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3672                           BCE_DEV_INFO_SIGNATURE_MAGIC);
3673                 return ENODEV;
3674         }
3675
3676         /* Enable DMA */
3677         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3678             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3679                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3680                 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3681                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3682         }
3683
3684         /* Allow bootcode to apply any additional fixes before enabling MAC. */
3685         bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3686
3687         /* Enable link state change interrupt generation. */
3688         REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3689
3690         /* Enable the RXP. */
3691         bce_start_rxp_cpu(sc);
3692
3693         /* Disable management frames (NC-SI) from flowing to the MCP. */
3694         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3695                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3696                     ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3697                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3698         }
3699
3700         /* Enable all remaining blocks in the MAC. */
3701         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3702             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3703                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3704                     BCE_MISC_ENABLE_DEFAULT_XI);
3705         } else {
3706                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3707         }
3708         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3709         DELAY(20);
3710
3711         /* Save the current host coalescing block settings. */
3712         sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3713
3714         return 0;
3715 }
3716
3717
3718 /****************************************************************************/
3719 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3720 /*                                                                          */
3721 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3722 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3723 /* necessary.                                                               */
3724 /*                                                                          */
3725 /* Returns:                                                                 */
3726 /*   0 for success, positive value for failure.                             */
3727 /****************************************************************************/
3728 static int
3729 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t *chain_prod,
3730     uint32_t *prod_bseq, int init)
3731 {
3732         bus_dmamap_t map;
3733         bus_dma_segment_t seg;
3734         struct mbuf *m_new;
3735         int error, nseg;
3736
3737         /* This is a new mbuf allocation. */
3738         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3739         if (m_new == NULL)
3740                 return ENOBUFS;
3741
3742         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3743
3744         /* Map the mbuf cluster into device memory. */
3745         error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3746             rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3747         if (error) {
3748                 m_freem(m_new);
3749                 if (init) {
3750                         if_printf(&rxr->sc->arpcom.ac_if,
3751                             "Error mapping mbuf into RX chain!\n");
3752                 }
3753                 return error;
3754         }
3755
3756         if (rxr->rx_mbuf_ptr[*chain_prod] != NULL) {
3757                 bus_dmamap_unload(rxr->rx_mbuf_tag,
3758                     rxr->rx_mbuf_map[*chain_prod]);
3759         }
3760
3761         map = rxr->rx_mbuf_map[*chain_prod];
3762         rxr->rx_mbuf_map[*chain_prod] = rxr->rx_mbuf_tmpmap;
3763         rxr->rx_mbuf_tmpmap = map;
3764
3765         /* Save the mbuf and update our counter. */
3766         rxr->rx_mbuf_ptr[*chain_prod] = m_new;
3767         rxr->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3768         rxr->free_rx_bd--;
3769
3770         bce_setup_rxdesc_std(rxr, *chain_prod, prod_bseq);
3771
3772         return 0;
3773 }
3774
3775
3776 static void
3777 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3778     uint32_t *prod_bseq)
3779 {
3780         struct rx_bd *rxbd;
3781         bus_addr_t paddr;
3782         int len;
3783
3784         paddr = rxr->rx_mbuf_paddr[chain_prod];
3785         len = rxr->rx_mbuf_ptr[chain_prod]->m_len;
3786
3787         /* Setup the rx_bd for the first segment. */
3788         rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3789
3790         rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3791         rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3792         rxbd->rx_bd_len = htole32(len);
3793         rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3794         *prod_bseq += len;
3795
3796         rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3797 }
3798
3799
3800 /****************************************************************************/
3801 /* Initialize the TX context memory.                                        */
3802 /*                                                                          */
3803 /* Returns:                                                                 */
3804 /*   Nothing                                                                */
3805 /****************************************************************************/
3806 static void
3807 bce_init_tx_context(struct bce_tx_ring *txr)
3808 {
3809         uint32_t val;
3810
3811         /* Initialize the context ID for an L2 TX chain. */
3812         if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3813             BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3814                 /* Set the CID type to support an L2 connection. */
3815                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3816                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3817                     BCE_L2CTX_TX_TYPE_XI, val);
3818                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3819                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3820                     BCE_L2CTX_TX_CMD_TYPE_XI, val);
3821
3822                 /* Point the hardware to the first page in the chain. */
3823                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3824                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3825                     BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3826                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3827                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3828                     BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3829         } else {
3830                 /* Set the CID type to support an L2 connection. */
3831                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3832                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
3833                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3834                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3835                     BCE_L2CTX_TX_CMD_TYPE, val);
3836
3837                 /* Point the hardware to the first page in the chain. */
3838                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3839                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3840                     BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3841                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3842                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3843                     BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3844         }
3845 }
3846
3847
3848 /****************************************************************************/
3849 /* Allocate memory and initialize the TX data structures.                   */
3850 /*                                                                          */
3851 /* Returns:                                                                 */
3852 /*   0 for success, positive value for failure.                             */
3853 /****************************************************************************/
3854 static int
3855 bce_init_tx_chain(struct bce_tx_ring *txr)
3856 {
3857         struct tx_bd *txbd;
3858         int i, rc = 0;
3859
3860         /* Set the initial TX producer/consumer indices. */
3861         txr->tx_prod = 0;
3862         txr->tx_cons = 0;
3863         txr->tx_prod_bseq = 0;
3864         txr->used_tx_bd = 0;
3865         txr->max_tx_bd = USABLE_TX_BD(txr);
3866
3867         /*
3868          * The NetXtreme II supports a linked-list structre called
3869          * a Buffer Descriptor Chain (or BD chain).  A BD chain
3870          * consists of a series of 1 or more chain pages, each of which
3871          * consists of a fixed number of BD entries.
3872          * The last BD entry on each page is a pointer to the next page
3873          * in the chain, and the last pointer in the BD chain
3874          * points back to the beginning of the chain.
3875          */
3876
3877         /* Set the TX next pointer chain entries. */
3878         for (i = 0; i < txr->tx_pages; i++) {
3879                 int j;
3880
3881                 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3882
3883                 /* Check if we've reached the last page. */
3884                 if (i == (txr->tx_pages - 1))
3885                         j = 0;
3886                 else
3887                         j = i + 1;
3888
3889                 txbd->tx_bd_haddr_hi =
3890                     htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
3891                 txbd->tx_bd_haddr_lo =
3892                     htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
3893         }
3894         bce_init_tx_context(txr);
3895
3896         return(rc);
3897 }
3898
3899
3900 /****************************************************************************/
3901 /* Free memory and clear the TX data structures.                            */
3902 /*                                                                          */
3903 /* Returns:                                                                 */
3904 /*   Nothing.                                                               */
3905 /****************************************************************************/
3906 static void
3907 bce_free_tx_chain(struct bce_tx_ring *txr)
3908 {
3909         int i;
3910
3911         /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3912         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
3913                 if (txr->tx_mbuf_ptr[i] != NULL) {
3914                         bus_dmamap_unload(txr->tx_mbuf_tag,
3915                             txr->tx_mbuf_map[i]);
3916                         m_freem(txr->tx_mbuf_ptr[i]);
3917                         txr->tx_mbuf_ptr[i] = NULL;
3918                 }
3919         }
3920
3921         /* Clear each TX chain page. */
3922         for (i = 0; i < txr->tx_pages; i++)
3923                 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3924         txr->used_tx_bd = 0;
3925 }
3926
3927
3928 /****************************************************************************/
3929 /* Initialize the RX context memory.                                        */
3930 /*                                                                          */
3931 /* Returns:                                                                 */
3932 /*   Nothing                                                                */
3933 /****************************************************************************/
3934 static void
3935 bce_init_rx_context(struct bce_rx_ring *rxr)
3936 {
3937         uint32_t val;
3938
3939         /* Initialize the context ID for an L2 RX chain. */
3940         val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3941             BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3942
3943         /*
3944          * Set the level for generating pause frames
3945          * when the number of available rx_bd's gets
3946          * too low (the low watermark) and the level
3947          * when pause frames can be stopped (the high
3948          * watermark).
3949          */
3950         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
3951             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
3952                 uint32_t lo_water, hi_water;
3953
3954                 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
3955                 hi_water = USABLE_RX_BD(rxr) / 4;
3956
3957                 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
3958                 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
3959
3960                 if (hi_water > 0xf)
3961                         hi_water = 0xf;
3962                 else if (hi_water == 0)
3963                         lo_water = 0;
3964                 val |= lo_water |
3965                     (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
3966         }
3967
3968         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
3969
3970         /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
3971         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
3972             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
3973                 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
3974                 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
3975         }
3976
3977         /* Point the hardware to the first page in the chain. */
3978         val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
3979         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
3980         val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
3981         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
3982 }
3983
3984
3985 /****************************************************************************/
3986 /* Allocate memory and initialize the RX data structures.                   */
3987 /*                                                                          */
3988 /* Returns:                                                                 */
3989 /*   0 for success, positive value for failure.                             */
3990 /****************************************************************************/
3991 static int
3992 bce_init_rx_chain(struct bce_rx_ring *rxr)
3993 {
3994         struct rx_bd *rxbd;
3995         int i, rc = 0;
3996         uint16_t prod, chain_prod;
3997         uint32_t prod_bseq;
3998
3999         /* Initialize the RX producer and consumer indices. */
4000         rxr->rx_prod = 0;
4001         rxr->rx_cons = 0;
4002         rxr->rx_prod_bseq = 0;
4003         rxr->free_rx_bd = USABLE_RX_BD(rxr);
4004         rxr->max_rx_bd = USABLE_RX_BD(rxr);
4005
4006         /* Initialize the RX next pointer chain entries. */
4007         for (i = 0; i < rxr->rx_pages; i++) {
4008                 int j;
4009
4010                 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4011
4012                 /* Check if we've reached the last page. */
4013                 if (i == (rxr->rx_pages - 1))
4014                         j = 0;
4015                 else
4016                         j = i + 1;
4017
4018                 /* Setup the chain page pointers. */
4019                 rxbd->rx_bd_haddr_hi =
4020                     htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4021                 rxbd->rx_bd_haddr_lo =
4022                     htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4023         }
4024
4025         /* Allocate mbuf clusters for the rx_bd chain. */
4026         prod = prod_bseq = 0;
4027         while (prod < TOTAL_RX_BD(rxr)) {
4028                 chain_prod = RX_CHAIN_IDX(rxr, prod);
4029                 if (bce_newbuf_std(rxr, &prod, &chain_prod, &prod_bseq, 1)) {
4030                         if_printf(&rxr->sc->arpcom.ac_if,
4031                             "Error filling RX chain: rx_bd[0x%04X]!\n",
4032                             chain_prod);
4033                         rc = ENOBUFS;
4034                         break;
4035                 }
4036                 prod = NEXT_RX_BD(prod);
4037         }
4038
4039         /* Save the RX chain producer index. */
4040         rxr->rx_prod = prod;
4041         rxr->rx_prod_bseq = prod_bseq;
4042
4043         /* Tell the chip about the waiting rx_bd's. */
4044         REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4045             rxr->rx_prod);
4046         REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4047             rxr->rx_prod_bseq);
4048
4049         bce_init_rx_context(rxr);
4050
4051         return(rc);
4052 }
4053
4054
4055 /****************************************************************************/
4056 /* Free memory and clear the RX data structures.                            */
4057 /*                                                                          */
4058 /* Returns:                                                                 */
4059 /*   Nothing.                                                               */
4060 /****************************************************************************/
4061 static void
4062 bce_free_rx_chain(struct bce_rx_ring *rxr)
4063 {
4064         int i;
4065
4066         /* Free any mbufs still in the RX mbuf chain. */
4067         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4068                 if (rxr->rx_mbuf_ptr[i] != NULL) {
4069                         bus_dmamap_unload(rxr->rx_mbuf_tag,
4070                             rxr->rx_mbuf_map[i]);
4071                         m_freem(rxr->rx_mbuf_ptr[i]);
4072                         rxr->rx_mbuf_ptr[i] = NULL;
4073                 }
4074         }
4075
4076         /* Clear each RX chain page. */
4077         for (i = 0; i < rxr->rx_pages; i++)
4078                 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4079 }
4080
4081
4082 /****************************************************************************/
4083 /* Set media options.                                                       */
4084 /*                                                                          */
4085 /* Returns:                                                                 */
4086 /*   0 for success, positive value for failure.                             */
4087 /****************************************************************************/
4088 static int
4089 bce_ifmedia_upd(struct ifnet *ifp)
4090 {
4091         struct bce_softc *sc = ifp->if_softc;
4092         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4093         int error = 0;
4094
4095         /*
4096          * 'mii' will be NULL, when this function is called on following
4097          * code path: bce_attach() -> bce_mgmt_init()
4098          */
4099         if (mii != NULL) {
4100                 /* Make sure the MII bus has been enumerated. */
4101                 sc->bce_link = 0;
4102                 if (mii->mii_instance) {
4103                         struct mii_softc *miisc;
4104
4105                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4106                                 mii_phy_reset(miisc);
4107                 }
4108                 error = mii_mediachg(mii);
4109         }
4110         return error;
4111 }
4112
4113
4114 /****************************************************************************/
4115 /* Reports current media status.                                            */
4116 /*                                                                          */
4117 /* Returns:                                                                 */
4118 /*   Nothing.                                                               */
4119 /****************************************************************************/
4120 static void
4121 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4122 {
4123         struct bce_softc *sc = ifp->if_softc;
4124         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4125
4126         mii_pollstat(mii);
4127         ifmr->ifm_active = mii->mii_media_active;
4128         ifmr->ifm_status = mii->mii_media_status;
4129 }
4130
4131
4132 /****************************************************************************/
4133 /* Handles PHY generated interrupt events.                                  */
4134 /*                                                                          */
4135 /* Returns:                                                                 */
4136 /*   Nothing.                                                               */
4137 /****************************************************************************/
4138 static void
4139 bce_phy_intr(struct bce_softc *sc)
4140 {
4141         uint32_t new_link_state, old_link_state;
4142         struct ifnet *ifp = &sc->arpcom.ac_if;
4143
4144         ASSERT_SERIALIZED(ifp->if_serializer);
4145
4146         new_link_state = sc->status_block->status_attn_bits &
4147                          STATUS_ATTN_BITS_LINK_STATE;
4148         old_link_state = sc->status_block->status_attn_bits_ack &
4149                          STATUS_ATTN_BITS_LINK_STATE;
4150
4151         /* Handle any changes if the link state has changed. */
4152         if (new_link_state != old_link_state) { /* XXX redundant? */
4153                 /* Update the status_attn_bits_ack field in the status block. */
4154                 if (new_link_state) {
4155                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4156                                STATUS_ATTN_BITS_LINK_STATE);
4157                         if (bootverbose)
4158                                 if_printf(ifp, "Link is now UP.\n");
4159                 } else {
4160                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4161                                STATUS_ATTN_BITS_LINK_STATE);
4162                         if (bootverbose)
4163                                 if_printf(ifp, "Link is now DOWN.\n");
4164                 }
4165
4166                 /*
4167                  * Assume link is down and allow tick routine to
4168                  * update the state based on the actual media state.
4169                  */
4170                 sc->bce_link = 0;
4171                 callout_stop(&sc->bce_tick_callout);
4172                 bce_tick_serialized(sc);
4173         }
4174
4175         /* Acknowledge the link change interrupt. */
4176         REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4177 }
4178
4179
4180 /****************************************************************************/
4181 /* Reads the receive consumer value from the status block (skipping over    */
4182 /* chain page pointer if necessary).                                        */
4183 /*                                                                          */
4184 /* Returns:                                                                 */
4185 /*   hw_cons                                                                */
4186 /****************************************************************************/
4187 static __inline uint16_t
4188 bce_get_hw_rx_cons(struct bce_softc *sc)
4189 {
4190         uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4191
4192         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4193                 hw_cons++;
4194         return hw_cons;
4195 }
4196
4197
4198 /****************************************************************************/
4199 /* Handles received frame interrupt events.                                 */
4200 /*                                                                          */
4201 /* Returns:                                                                 */
4202 /*   Nothing.                                                               */
4203 /****************************************************************************/
4204 static void
4205 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4206 {
4207         struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4208         uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4209         uint32_t sw_prod_bseq;
4210
4211         ASSERT_SERIALIZED(ifp->if_serializer);
4212
4213         /* Get working copies of the driver's view of the RX indices. */
4214         sw_cons = rxr->rx_cons;
4215         sw_prod = rxr->rx_prod;
4216         sw_prod_bseq = rxr->rx_prod_bseq;
4217
4218         /* Scan through the receive chain as long as there is work to do. */
4219         while (sw_cons != hw_cons) {
4220                 struct mbuf *m = NULL;
4221                 struct l2_fhdr *l2fhdr = NULL;
4222                 unsigned int len;
4223                 uint32_t status = 0;
4224
4225 #ifdef IFPOLL_ENABLE
4226                 if (count >= 0 && count-- == 0)
4227                         break;
4228 #endif
4229
4230                 /*
4231                  * Convert the producer/consumer indices
4232                  * to an actual rx_bd index.
4233                  */
4234                 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4235                 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4236
4237                 rxr->free_rx_bd++;
4238
4239                 /* The mbuf is stored with the last rx_bd entry of a packet. */
4240                 if (rxr->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4241                         if (sw_chain_cons != sw_chain_prod) {
4242                                 if_printf(ifp, "RX cons(%d) != prod(%d), "
4243                                     "drop!\n", sw_chain_cons, sw_chain_prod);
4244                                 IFNET_STAT_INC(ifp, ierrors, 1);
4245
4246                                 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4247                                     &sw_prod_bseq);
4248                                 m = NULL;
4249                                 goto bce_rx_int_next_rx;
4250                         }
4251
4252                         /* Unmap the mbuf from DMA space. */
4253                         bus_dmamap_sync(rxr->rx_mbuf_tag,
4254                             rxr->rx_mbuf_map[sw_chain_cons],
4255                             BUS_DMASYNC_POSTREAD);
4256
4257                         /* Save the mbuf from the driver's chain. */
4258                         m = rxr->rx_mbuf_ptr[sw_chain_cons];
4259
4260                         /*
4261                          * Frames received on the NetXteme II are prepended 
4262                          * with an l2_fhdr structure which provides status
4263                          * information about the received frame (including
4264                          * VLAN tags and checksum info).  The frames are also
4265                          * automatically adjusted to align the IP header
4266                          * (i.e. two null bytes are inserted before the 
4267                          * Ethernet header).  As a result the data DMA'd by
4268                          * the controller into the mbuf is as follows:
4269                          *
4270                          * +---------+-----+---------------------+-----+
4271                          * | l2_fhdr | pad | packet data         | FCS |
4272                          * +---------+-----+---------------------+-----+
4273                          * 
4274                          * The l2_fhdr needs to be checked and skipped and the
4275                          * FCS needs to be stripped before sending the packet
4276                          * up the stack.
4277                          */
4278                         l2fhdr = mtod(m, struct l2_fhdr *);
4279
4280                         len = l2fhdr->l2_fhdr_pkt_len;
4281                         status = l2fhdr->l2_fhdr_status;
4282
4283                         len -= ETHER_CRC_LEN;
4284
4285                         /* Check the received frame for errors. */
4286                         if (status & (L2_FHDR_ERRORS_BAD_CRC |
4287                                       L2_FHDR_ERRORS_PHY_DECODE |
4288                                       L2_FHDR_ERRORS_ALIGNMENT |
4289                                       L2_FHDR_ERRORS_TOO_SHORT |
4290                                       L2_FHDR_ERRORS_GIANT_FRAME)) {
4291                                 IFNET_STAT_INC(ifp, ierrors, 1);
4292
4293                                 /* Reuse the mbuf for a new frame. */
4294                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4295                                     &sw_prod_bseq);
4296                                 m = NULL;
4297                                 goto bce_rx_int_next_rx;
4298                         }
4299
4300                         /* 
4301                          * Get a new mbuf for the rx_bd.   If no new
4302                          * mbufs are available then reuse the current mbuf,
4303                          * log an ierror on the interface, and generate
4304                          * an error in the system log.
4305                          */
4306                         if (bce_newbuf_std(rxr, &sw_prod, &sw_chain_prod,
4307                             &sw_prod_bseq, 0)) {
4308                                 IFNET_STAT_INC(ifp, ierrors, 1);
4309
4310                                 /* Try and reuse the exisitng mbuf. */
4311                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4312                                     &sw_prod_bseq);
4313                                 m = NULL;
4314                                 goto bce_rx_int_next_rx;
4315                         }
4316
4317                         /*
4318                          * Skip over the l2_fhdr when passing
4319                          * the data up the stack.
4320                          */
4321                         m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4322
4323                         m->m_pkthdr.len = m->m_len = len;
4324                         m->m_pkthdr.rcvif = ifp;
4325
4326                         /* Validate the checksum if offload enabled. */
4327                         if (ifp->if_capenable & IFCAP_RXCSUM) {
4328                                 /* Check for an IP datagram. */
4329                                 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4330                                         m->m_pkthdr.csum_flags |=
4331                                                 CSUM_IP_CHECKED;
4332
4333                                         /* Check if the IP checksum is valid. */
4334                                         if ((l2fhdr->l2_fhdr_ip_xsum ^
4335                                              0xffff) == 0) {
4336                                                 m->m_pkthdr.csum_flags |=
4337                                                         CSUM_IP_VALID;
4338                                         }
4339                                 }
4340
4341                                 /* Check for a valid TCP/UDP frame. */
4342                                 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4343                                               L2_FHDR_STATUS_UDP_DATAGRAM)) {
4344
4345                                         /* Check for a good TCP/UDP checksum. */
4346                                         if ((status &
4347                                              (L2_FHDR_ERRORS_TCP_XSUM |
4348                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4349                                                 m->m_pkthdr.csum_data =
4350                                                 l2fhdr->l2_fhdr_tcp_udp_xsum;
4351                                                 m->m_pkthdr.csum_flags |=
4352                                                         CSUM_DATA_VALID |
4353                                                         CSUM_PSEUDO_HDR;
4354                                         }
4355                                 }
4356                         }
4357
4358                         IFNET_STAT_INC(ifp, ipackets, 1);
4359 bce_rx_int_next_rx:
4360                         sw_prod = NEXT_RX_BD(sw_prod);
4361                 }
4362
4363                 sw_cons = NEXT_RX_BD(sw_cons);
4364
4365                 /* If we have a packet, pass it up the stack */
4366                 if (m) {
4367                         if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4368                                 m->m_flags |= M_VLANTAG;
4369                                 m->m_pkthdr.ether_vlantag =
4370                                         l2fhdr->l2_fhdr_vlan_tag;
4371                         }
4372                         ifp->if_input(ifp, m);
4373                 }
4374         }
4375
4376         rxr->rx_cons = sw_cons;
4377         rxr->rx_prod = sw_prod;
4378         rxr->rx_prod_bseq = sw_prod_bseq;
4379
4380         REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4381             rxr->rx_prod);
4382         REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4383             rxr->rx_prod_bseq);
4384 }
4385
4386
4387 /****************************************************************************/
4388 /* Reads the transmit consumer value from the status block (skipping over   */
4389 /* chain page pointer if necessary).                                        */
4390 /*                                                                          */
4391 /* Returns:                                                                 */
4392 /*   hw_cons                                                                */
4393 /****************************************************************************/
4394 static __inline uint16_t
4395 bce_get_hw_tx_cons(struct bce_softc *sc)
4396 {
4397         uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4398
4399         if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4400                 hw_cons++;
4401         return hw_cons;
4402 }
4403
4404
4405 /****************************************************************************/
4406 /* Handles transmit completion interrupt events.                            */
4407 /*                                                                          */
4408 /* Returns:                                                                 */
4409 /*   Nothing.                                                               */
4410 /****************************************************************************/
4411 static void
4412 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4413 {
4414         struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4415         uint16_t sw_tx_cons, sw_tx_chain_cons;
4416
4417         ASSERT_SERIALIZED(ifp->if_serializer);
4418
4419         /* Get the hardware's view of the TX consumer index. */
4420         sw_tx_cons = txr->tx_cons;
4421
4422         /* Cycle through any completed TX chain page entries. */
4423         while (sw_tx_cons != hw_tx_cons) {
4424                 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4425
4426                 /*
4427                  * Free the associated mbuf. Remember
4428                  * that only the last tx_bd of a packet
4429                  * has an mbuf pointer and DMA map.
4430                  */
4431                 if (txr->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4432                         /* Unmap the mbuf. */
4433                         bus_dmamap_unload(txr->tx_mbuf_tag,
4434                             txr->tx_mbuf_map[sw_tx_chain_cons]);
4435
4436                         /* Free the mbuf. */
4437                         m_freem(txr->tx_mbuf_ptr[sw_tx_chain_cons]);
4438                         txr->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4439
4440                         IFNET_STAT_INC(ifp, opackets, 1);
4441                 }
4442
4443                 txr->used_tx_bd--;
4444                 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4445         }
4446
4447         if (txr->used_tx_bd == 0) {
4448                 /* Clear the TX timeout timer. */
4449                 ifp->if_timer = 0;
4450         }
4451
4452         /* Clear the tx hardware queue full flag. */
4453         if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4454                 ifq_clr_oactive(&ifp->if_snd);
4455         txr->tx_cons = sw_tx_cons;
4456 }
4457
4458
4459 /****************************************************************************/
4460 /* Disables interrupt generation.                                           */
4461 /*                                                                          */
4462 /* Returns:                                                                 */
4463 /*   Nothing.                                                               */
4464 /****************************************************************************/
4465 static void
4466 bce_disable_intr(struct bce_softc *sc)
4467 {
4468         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4469         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4470
4471         callout_stop(&sc->bce_ckmsi_callout);
4472         sc->bce_msi_maylose = FALSE;
4473         sc->bce_check_rx_cons = 0;
4474         sc->bce_check_tx_cons = 0;
4475         sc->bce_check_status_idx = 0xffff;
4476
4477         sc->bce_npoll.ifpc_stcount = 0;
4478
4479         lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer);
4480 }
4481
4482
4483 /****************************************************************************/
4484 /* Enables interrupt generation.                                            */
4485 /*                                                                          */
4486 /* Returns:                                                                 */
4487 /*   Nothing.                                                               */
4488 /****************************************************************************/
4489 static void
4490 bce_enable_intr(struct bce_softc *sc)
4491 {
4492         lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer);
4493
4494         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4495                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4496                BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4497         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4498                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4499
4500         REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4501
4502         if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4503                 sc->bce_msi_maylose = FALSE;
4504                 sc->bce_check_rx_cons = 0;
4505                 sc->bce_check_tx_cons = 0;
4506                 sc->bce_check_status_idx = 0xffff;
4507
4508                 if (bootverbose)
4509                         if_printf(&sc->arpcom.ac_if, "check msi\n");
4510
4511                 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4512                     bce_check_msi, sc, sc->bce_intr_cpuid);
4513         }
4514 }
4515
4516
4517 /****************************************************************************/
4518 /* Reenables interrupt generation during interrupt handling.                */
4519 /*                                                                          */
4520 /* Returns:                                                                 */
4521 /*   Nothing.                                                               */
4522 /****************************************************************************/
4523 static void
4524 bce_reenable_intr(struct bce_softc *sc)
4525 {
4526         if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
4527                 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4528                        BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4529                        BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4530         }
4531         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4532                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4533 }
4534
4535
4536 /****************************************************************************/
4537 /* Handles controller initialization.                                       */
4538 /*                                                                          */
4539 /* Returns:                                                                 */
4540 /*   Nothing.                                                               */
4541 /****************************************************************************/
4542 static void
4543 bce_init(void *xsc)
4544 {
4545         struct bce_softc *sc = xsc;
4546         struct ifnet *ifp = &sc->arpcom.ac_if;
4547         uint32_t ether_mtu;
4548         int error, i;
4549
4550         ASSERT_SERIALIZED(ifp->if_serializer);
4551
4552         /* Check if the driver is still running and bail out if it is. */
4553         if (ifp->if_flags & IFF_RUNNING)
4554                 return;
4555
4556         bce_stop(sc);
4557
4558         error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4559         if (error) {
4560                 if_printf(ifp, "Controller reset failed!\n");
4561                 goto back;
4562         }
4563
4564         error = bce_chipinit(sc);
4565         if (error) {
4566                 if_printf(ifp, "Controller initialization failed!\n");
4567                 goto back;
4568         }
4569
4570         error = bce_blockinit(sc);
4571         if (error) {
4572                 if_printf(ifp, "Block initialization failed!\n");
4573                 goto back;
4574         }
4575
4576         /* Load our MAC address. */
4577         bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4578         bce_set_mac_addr(sc);
4579
4580         /* Calculate and program the Ethernet MTU size. */
4581         ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4582
4583         /* 
4584          * Program the mtu, enabling jumbo frame 
4585          * support if necessary.  Also set the mbuf
4586          * allocation count for RX frames.
4587          */
4588         if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4589 #ifdef notyet
4590                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4591                        min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4592                        BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4593 #else
4594                 panic("jumbo buffer is not supported yet");
4595 #endif
4596         } else {
4597                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4598         }
4599
4600         /* Program appropriate promiscuous/multicast filtering. */
4601         bce_set_rx_mode(sc);
4602
4603         /* Init RX buffer descriptor chain. */
4604         for (i = 0; i < sc->ring_cnt; ++i)
4605                 bce_init_rx_chain(&sc->rx_rings[i]);    /* XXX return value */
4606
4607         /* Init TX buffer descriptor chain. */
4608         for (i = 0; i < sc->ring_cnt; ++i)
4609                 bce_init_tx_chain(&sc->tx_rings[i]);
4610
4611 #ifdef IFPOLL_ENABLE
4612         /* Disable interrupts if we are polling. */
4613         if (ifp->if_flags & IFF_NPOLLING) {
4614                 bce_disable_intr(sc);
4615
4616                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4617                        (1 << 16) | sc->bce_rx_quick_cons_trip);
4618                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4619                        (1 << 16) | sc->bce_tx_quick_cons_trip);
4620         } else
4621 #endif
4622         /* Enable host interrupts. */
4623         bce_enable_intr(sc);
4624
4625         bce_ifmedia_upd(ifp);
4626
4627         ifp->if_flags |= IFF_RUNNING;
4628         ifq_clr_oactive(&ifp->if_snd);
4629
4630         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4631             sc->bce_intr_cpuid);
4632 back:
4633         if (error)
4634                 bce_stop(sc);
4635 }
4636
4637
4638 /****************************************************************************/
4639 /* Initialize the controller just enough so that any management firmware    */
4640 /* running on the device will continue to operate corectly.                 */
4641 /*                                                                          */
4642 /* Returns:                                                                 */
4643 /*   Nothing.                                                               */
4644 /****************************************************************************/
4645 static void
4646 bce_mgmt_init(struct bce_softc *sc)
4647 {
4648         struct ifnet *ifp = &sc->arpcom.ac_if;
4649
4650         /* Bail out if management firmware is not running. */
4651         if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4652                 return;
4653
4654         /* Enable all critical blocks in the MAC. */
4655         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4656             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4657                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4658                     BCE_MISC_ENABLE_DEFAULT_XI);
4659         } else {
4660                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4661         }
4662         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4663         DELAY(20);
4664
4665         bce_ifmedia_upd(ifp);
4666 }
4667
4668
4669 /****************************************************************************/
4670 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4671 /* memory visible to the controller.                                        */
4672 /*                                                                          */
4673 /* Returns:                                                                 */
4674 /*   0 for success, positive value for failure.                             */
4675 /****************************************************************************/
4676 static int
4677 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4678 {
4679         bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4680         bus_dmamap_t map, tmp_map;
4681         struct mbuf *m0 = *m_head;
4682         struct tx_bd *txbd = NULL;
4683         uint16_t vlan_tag = 0, flags = 0, mss = 0;
4684         uint16_t chain_prod, chain_prod_start, prod;
4685         uint32_t prod_bseq;
4686         int i, error, maxsegs, nsegs;
4687
4688         /* Transfer any checksum offload flags to the bd. */
4689         if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4690                 error = bce_tso_setup(txr, m_head, &flags, &mss);
4691                 if (error)
4692                         return ENOBUFS;
4693                 m0 = *m_head;
4694         } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4695                 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4696                         flags |= TX_BD_FLAGS_IP_CKSUM;
4697                 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4698                         flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4699         }
4700
4701         /* Transfer any VLAN tags to the bd. */
4702         if (m0->m_flags & M_VLANTAG) {
4703                 flags |= TX_BD_FLAGS_VLAN_TAG;
4704                 vlan_tag = m0->m_pkthdr.ether_vlantag;
4705         }
4706
4707         prod = txr->tx_prod;
4708         chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4709
4710         /* Map the mbuf into DMAable memory. */
4711         map = txr->tx_mbuf_map[chain_prod_start];
4712
4713         maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4714         KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4715                 ("not enough segments %d", maxsegs));
4716         if (maxsegs > BCE_MAX_SEGMENTS)
4717                 maxsegs = BCE_MAX_SEGMENTS;
4718
4719         /* Map the mbuf into our DMA address space. */
4720         error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4721                         segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4722         if (error)
4723                 goto back;
4724         bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4725
4726         *nsegs_used += nsegs;
4727
4728         /* Reset m0 */
4729         m0 = *m_head;
4730
4731         /* prod points to an empty tx_bd at this point. */
4732         prod_bseq  = txr->tx_prod_bseq;
4733
4734         /*
4735          * Cycle through each mbuf segment that makes up
4736          * the outgoing frame, gathering the mapping info
4737          * for that segment and creating a tx_bd to for
4738          * the mbuf.
4739          */
4740         for (i = 0; i < nsegs; i++) {
4741                 chain_prod = TX_CHAIN_IDX(txr, prod);
4742                 txbd =
4743                 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4744
4745                 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4746                 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4747                 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4748                     htole16(segs[i].ds_len);
4749                 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4750                 txbd->tx_bd_flags = htole16(flags);
4751
4752                 prod_bseq += segs[i].ds_len;
4753                 if (i == 0)
4754                         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4755                 prod = NEXT_TX_BD(prod);
4756         }
4757
4758         /* Set the END flag on the last TX buffer descriptor. */
4759         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4760
4761         /*
4762          * Ensure that the mbuf pointer for this transmission
4763          * is placed at the array index of the last
4764          * descriptor in this chain.  This is done
4765          * because a single map is used for all 
4766          * segments of the mbuf and we don't want to
4767          * unload the map before all of the segments
4768          * have been freed.
4769          */
4770         txr->tx_mbuf_ptr[chain_prod] = m0;
4771
4772         tmp_map = txr->tx_mbuf_map[chain_prod];
4773         txr->tx_mbuf_map[chain_prod] = map;
4774         txr->tx_mbuf_map[chain_prod_start] = tmp_map;
4775
4776         txr->used_tx_bd += nsegs;
4777
4778         /* prod points to the next free tx_bd at this point. */
4779         txr->tx_prod = prod;
4780         txr->tx_prod_bseq = prod_bseq;
4781 back:
4782         if (error) {
4783                 m_freem(*m_head);
4784                 *m_head = NULL;
4785         }
4786         return error;
4787 }
4788
4789
4790 static void
4791 bce_xmit(struct bce_tx_ring *txr)
4792 {
4793         /* Start the transmit. */
4794         REG_WR16(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX,
4795             txr->tx_prod);
4796         REG_WR(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ,
4797             txr->tx_prod_bseq);
4798 }
4799
4800
4801 /****************************************************************************/
4802 /* Main transmit routine when called from another routine with a lock.      */
4803 /*                                                                          */
4804 /* Returns:                                                                 */
4805 /*   Nothing.                                                               */
4806 /****************************************************************************/
4807 static void
4808 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4809 {
4810         struct bce_softc *sc = ifp->if_softc;
4811         struct bce_tx_ring *txr = &sc->tx_rings[0];
4812         int count = 0;
4813
4814         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
4815         ASSERT_SERIALIZED(ifp->if_serializer);
4816
4817         /* If there's no link or the transmit queue is empty then just exit. */
4818         if (!sc->bce_link) {
4819                 ifq_purge(&ifp->if_snd);
4820                 return;
4821         }
4822
4823         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
4824                 return;
4825
4826         for (;;) {
4827                 struct mbuf *m_head;
4828
4829                 /*
4830                  * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4831                  * unlikely to fail.
4832                  */
4833                 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4834                         ifq_set_oactive(&ifp->if_snd);
4835                         break;
4836                 }
4837
4838                 /* Check for any frames to send. */
4839                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
4840                 if (m_head == NULL)
4841                         break;
4842
4843                 /*
4844                  * Pack the data into the transmit ring. If we
4845                  * don't have room, place the mbuf back at the
4846                  * head of the queue and set the OACTIVE flag
4847                  * to wait for the NIC to drain the chain.
4848                  */
4849                 if (bce_encap(txr, &m_head, &count)) {
4850                         IFNET_STAT_INC(ifp, oerrors, 1);
4851                         if (txr->used_tx_bd == 0) {
4852                                 continue;
4853                         } else {
4854                                 ifq_set_oactive(&ifp->if_snd);
4855                                 break;
4856                         }
4857                 }
4858
4859                 if (count >= txr->tx_wreg) {
4860                         bce_xmit(txr);
4861                         count = 0;
4862                 }
4863
4864                 /* Send a copy of the frame to any BPF listeners. */
4865                 ETHER_BPF_MTAP(ifp, m_head);
4866
4867                 /* Set the tx timeout. */
4868                 ifp->if_timer = BCE_TX_TIMEOUT;
4869         }
4870         if (count > 0)
4871                 bce_xmit(txr);
4872 }
4873
4874
4875 /****************************************************************************/
4876 /* Handles any IOCTL calls from the operating system.                       */
4877 /*                                                                          */
4878 /* Returns:                                                                 */
4879 /*   0 for success, positive value for failure.                             */
4880 /****************************************************************************/
4881 static int
4882 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4883 {
4884         struct bce_softc *sc = ifp->if_softc;
4885         struct ifreq *ifr = (struct ifreq *)data;
4886         struct mii_data *mii;
4887         int mask, error = 0;
4888
4889         ASSERT_SERIALIZED(ifp->if_serializer);
4890
4891         switch(command) {
4892         case SIOCSIFMTU:
4893                 /* Check that the MTU setting is supported. */
4894                 if (ifr->ifr_mtu < BCE_MIN_MTU ||
4895 #ifdef notyet
4896                     ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4897 #else
4898                     ifr->ifr_mtu > ETHERMTU
4899 #endif
4900                    ) {
4901                         error = EINVAL;
4902                         break;
4903                 }
4904
4905                 ifp->if_mtu = ifr->ifr_mtu;
4906                 ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
4907                 bce_init(sc);
4908                 break;
4909
4910         case SIOCSIFFLAGS:
4911                 if (ifp->if_flags & IFF_UP) {
4912                         if (ifp->if_flags & IFF_RUNNING) {
4913                                 mask = ifp->if_flags ^ sc->bce_if_flags;
4914
4915                                 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4916                                         bce_set_rx_mode(sc);
4917                         } else {
4918                                 bce_init(sc);
4919                         }
4920                 } else if (ifp->if_flags & IFF_RUNNING) {
4921                         bce_stop(sc);
4922
4923                         /* If MFW is running, restart the controller a bit. */
4924                         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4925                                 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4926                                 bce_chipinit(sc);
4927                                 bce_mgmt_init(sc);
4928                         }
4929                 }
4930                 sc->bce_if_flags = ifp->if_flags;
4931                 break;
4932
4933         case SIOCADDMULTI:
4934         case SIOCDELMULTI:
4935                 if (ifp->if_flags & IFF_RUNNING)
4936                         bce_set_rx_mode(sc);
4937                 break;
4938
4939         case SIOCSIFMEDIA:
4940         case SIOCGIFMEDIA:
4941                 mii = device_get_softc(sc->bce_miibus);
4942                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4943                 break;
4944
4945         case SIOCSIFCAP:
4946                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4947                 if (mask & IFCAP_HWCSUM) {
4948                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
4949                         if (ifp->if_capenable & IFCAP_TXCSUM)
4950                                 ifp->if_hwassist |= BCE_CSUM_FEATURES;
4951                         else
4952                                 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
4953                 }
4954                 if (mask & IFCAP_TSO) {
4955                         ifp->if_capenable ^= IFCAP_TSO;
4956                         if (ifp->if_capenable & IFCAP_TSO)
4957                                 ifp->if_hwassist |= CSUM_TSO;
4958                         else
4959                                 ifp->if_hwassist &= ~CSUM_TSO;
4960                 }
4961                 break;
4962
4963         default:
4964                 error = ether_ioctl(ifp, command, data);
4965                 break;
4966         }
4967         return error;
4968 }
4969
4970
4971 /****************************************************************************/
4972 /* Transmit timeout handler.                                                */
4973 /*                                                                          */
4974 /* Returns:                                                                 */
4975 /*   Nothing.                                                               */
4976 /****************************************************************************/
4977 static void
4978 bce_watchdog(struct ifnet *ifp)
4979 {
4980         struct bce_softc *sc = ifp->if_softc;
4981
4982         ASSERT_SERIALIZED(ifp->if_serializer);
4983
4984         /*
4985          * If we are in this routine because of pause frames, then
4986          * don't reset the hardware.
4987          */
4988         if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 
4989                 return;
4990
4991         if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
4992
4993         ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
4994         bce_init(sc);
4995
4996         IFNET_STAT_INC(ifp, oerrors, 1);
4997
4998         if (!ifq_is_empty(&ifp->if_snd))
4999                 if_devstart(ifp);
5000 }
5001
5002
5003 #ifdef IFPOLL_ENABLE
5004
5005 static void
5006 bce_npoll_compat(struct ifnet *ifp, void *arg __unused, int count)
5007 {
5008         struct bce_softc *sc = ifp->if_softc;
5009         struct status_block *sblk = sc->status_block;
5010         struct bce_tx_ring *txr = &sc->tx_rings[0];
5011         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5012         uint16_t hw_tx_cons, hw_rx_cons;
5013
5014         ASSERT_SERIALIZED(ifp->if_serializer);
5015
5016         /*
5017          * Save the status block index value for use when enabling
5018          * the interrupt.
5019          */
5020         sc->last_status_idx = sblk->status_idx;
5021
5022         /* Make sure status index is extracted before rx/tx cons */
5023         cpu_lfence();
5024
5025         if (sc->bce_npoll.ifpc_stcount-- == 0) {
5026                 uint32_t status_attn_bits;
5027
5028                 sc->bce_npoll.ifpc_stcount = sc->bce_npoll.ifpc_stfrac;
5029
5030                 status_attn_bits = sblk->status_attn_bits;
5031
5032                 /* Was it a link change interrupt? */
5033                 if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5034                     (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
5035                         bce_phy_intr(sc);
5036
5037                 /*
5038                  * Clear any transient status updates during link state change.
5039                  */
5040                 REG_WR(sc, BCE_HC_COMMAND,
5041                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5042                 REG_RD(sc, BCE_HC_COMMAND);
5043
5044                 /*
5045                  * If any other attention is asserted then the chip is toast.
5046                  */
5047                 if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5048                      (sblk->status_attn_bits_ack &
5049                       ~STATUS_ATTN_BITS_LINK_STATE)) {
5050                         if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5051                                   sblk->status_attn_bits);
5052                         bce_init(sc);
5053                         return;
5054                 }
5055         }
5056
5057         hw_rx_cons = bce_get_hw_rx_cons(sc);
5058         hw_tx_cons = bce_get_hw_tx_cons(sc);
5059
5060         /* Check for any completed RX frames. */
5061         if (hw_rx_cons != rxr->rx_cons)
5062                 bce_rx_intr(rxr, count, hw_rx_cons);
5063
5064         /* Check for any completed TX frames. */
5065         if (hw_tx_cons != txr->tx_cons)
5066                 bce_tx_intr(txr, hw_tx_cons);
5067
5068         if (sc->bce_coalchg_mask)
5069                 bce_coal_change(sc);
5070
5071         /* Check for new frames to transmit. */
5072         if (!ifq_is_empty(&ifp->if_snd))
5073                 if_devstart(ifp);
5074 }
5075
5076 static void
5077 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5078 {
5079         struct bce_softc *sc = ifp->if_softc;
5080
5081         ASSERT_SERIALIZED(ifp->if_serializer);
5082
5083         if (info != NULL) {
5084                 int cpuid = sc->bce_npoll.ifpc_cpuid;
5085
5086                 info->ifpi_rx[cpuid].poll_func = bce_npoll_compat;
5087                 info->ifpi_rx[cpuid].arg = NULL;
5088                 info->ifpi_rx[cpuid].serializer = ifp->if_serializer;
5089
5090                 if (ifp->if_flags & IFF_RUNNING) {
5091                         bce_disable_intr(sc);
5092
5093                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5094                                (1 << 16) | sc->bce_rx_quick_cons_trip);
5095                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5096                                (1 << 16) | sc->bce_tx_quick_cons_trip);
5097                 }
5098                 ifq_set_cpuid(&ifp->if_snd, cpuid);
5099         } else {
5100                 if (ifp->if_flags & IFF_RUNNING) {
5101                         bce_enable_intr(sc);
5102
5103                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5104                                (sc->bce_tx_quick_cons_trip_int << 16) |
5105                                sc->bce_tx_quick_cons_trip);
5106                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5107                                (sc->bce_rx_quick_cons_trip_int << 16) |
5108                                sc->bce_rx_quick_cons_trip);
5109                 }
5110                 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
5111         }
5112 }
5113
5114 #endif  /* IFPOLL_ENABLE */
5115
5116
5117 /*
5118  * Interrupt handler.
5119  */
5120 /****************************************************************************/
5121 /* Main interrupt entry point.  Verifies that the controller generated the  */
5122 /* interrupt and then calls a separate routine for handle the various       */
5123 /* interrupt causes (PHY, TX, RX).                                          */
5124 /*                                                                          */
5125 /* Returns:                                                                 */
5126 /*   0 for success, positive value for failure.                             */
5127 /****************************************************************************/
5128 static void
5129 bce_intr(struct bce_softc *sc)
5130 {
5131         struct ifnet *ifp = &sc->arpcom.ac_if;
5132         struct status_block *sblk;
5133         uint16_t hw_rx_cons, hw_tx_cons;
5134         uint32_t status_attn_bits;
5135         struct bce_tx_ring *txr = &sc->tx_rings[0];
5136         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5137
5138         ASSERT_SERIALIZED(ifp->if_serializer);
5139
5140         sblk = sc->status_block;
5141
5142         /*
5143          * Save the status block index value for use during
5144          * the next interrupt.
5145          */
5146         sc->last_status_idx = sblk->status_idx;
5147
5148         /* Make sure status index is extracted before rx/tx cons */
5149         cpu_lfence();
5150
5151         /* Check if the hardware has finished any work. */
5152         hw_rx_cons = bce_get_hw_rx_cons(sc);
5153         hw_tx_cons = bce_get_hw_tx_cons(sc);
5154
5155         status_attn_bits = sblk->status_attn_bits;
5156
5157         /* Was it a link change interrupt? */
5158         if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5159             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5160                 bce_phy_intr(sc);
5161
5162                 /*
5163                  * Clear any transient status updates during link state
5164                  * change.
5165                  */
5166                 REG_WR(sc, BCE_HC_COMMAND,
5167                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5168                 REG_RD(sc, BCE_HC_COMMAND);
5169         }
5170
5171         /*
5172          * If any other attention is asserted then
5173          * the chip is toast.
5174          */
5175         if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5176             (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5177                 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5178                           sblk->status_attn_bits);
5179                 bce_init(sc);
5180                 return;
5181         }
5182
5183         /* Check for any completed RX frames. */
5184         if (hw_rx_cons != rxr->rx_cons)
5185                 bce_rx_intr(rxr, -1, hw_rx_cons);
5186
5187         /* Check for any completed TX frames. */
5188         if (hw_tx_cons != txr->tx_cons)
5189                 bce_tx_intr(txr, hw_tx_cons);
5190
5191         /* Re-enable interrupts. */
5192         bce_reenable_intr(sc);
5193
5194         if (sc->bce_coalchg_mask)
5195                 bce_coal_change(sc);
5196
5197         /* Handle any frames that arrived while handling the interrupt. */
5198         if (!ifq_is_empty(&ifp->if_snd))
5199                 if_devstart(ifp);
5200 }
5201
5202 static void
5203 bce_intr_legacy(void *xsc)
5204 {
5205         struct bce_softc *sc = xsc;
5206         struct status_block *sblk;
5207
5208         sblk = sc->status_block;
5209
5210         /*
5211          * If the hardware status block index matches the last value
5212          * read by the driver and we haven't asserted our interrupt
5213          * then there's nothing to do.
5214          */
5215         if (sblk->status_idx == sc->last_status_idx &&
5216             (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5217              BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5218                 return;
5219
5220         /* Ack the interrupt and stop others from occuring. */
5221         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5222                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5223                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5224
5225         /*
5226          * Read back to deassert IRQ immediately to avoid too
5227          * many spurious interrupts.
5228          */
5229         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5230
5231         bce_intr(sc);
5232 }
5233
5234 static void
5235 bce_intr_msi(void *xsc)
5236 {
5237         struct bce_softc *sc = xsc;
5238
5239         /* Ack the interrupt and stop others from occuring. */
5240         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5241                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5242                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5243
5244         bce_intr(sc);
5245 }
5246
5247 static void
5248 bce_intr_msi_oneshot(void *xsc)
5249 {
5250         bce_intr(xsc);
5251 }
5252
5253
5254 /****************************************************************************/
5255 /* Programs the various packet receive modes (broadcast and multicast).     */
5256 /*                                                                          */
5257 /* Returns:                                                                 */
5258 /*   Nothing.                                                               */
5259 /****************************************************************************/
5260 static void
5261 bce_set_rx_mode(struct bce_softc *sc)
5262 {
5263         struct ifnet *ifp = &sc->arpcom.ac_if;
5264         struct ifmultiaddr *ifma;
5265         uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5266         uint32_t rx_mode, sort_mode;
5267         int h, i;
5268
5269         ASSERT_SERIALIZED(ifp->if_serializer);
5270
5271         /* Initialize receive mode default settings. */
5272         rx_mode = sc->rx_mode &
5273                   ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5274                     BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5275         sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5276
5277         /*
5278          * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5279          * be enbled.
5280          */
5281         if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5282             !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5283                 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5284
5285         /*
5286          * Check for promiscuous, all multicast, or selected
5287          * multicast address filtering.
5288          */
5289         if (ifp->if_flags & IFF_PROMISC) {
5290                 /* Enable promiscuous mode. */
5291                 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5292                 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5293         } else if (ifp->if_flags & IFF_ALLMULTI) {
5294                 /* Enable all multicast addresses. */
5295                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5296                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5297                                0xffffffff);
5298                 }
5299                 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5300         } else {
5301                 /* Accept one or more multicast(s). */
5302                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5303                         if (ifma->ifma_addr->sa_family != AF_LINK)
5304                                 continue;
5305                         h = ether_crc32_le(
5306                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5307                             ETHER_ADDR_LEN) & 0xFF;
5308                         hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5309                 }
5310
5311                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5312                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5313                                hashes[i]);
5314                 }
5315                 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5316         }
5317
5318         /* Only make changes if the recive mode has actually changed. */
5319         if (rx_mode != sc->rx_mode) {
5320                 sc->rx_mode = rx_mode;
5321                 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5322         }
5323
5324         /* Disable and clear the exisitng sort before enabling a new sort. */
5325         REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5326         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5327         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5328 }
5329
5330
5331 /****************************************************************************/
5332 /* Called periodically to updates statistics from the controllers           */
5333 /* statistics block.                                                        */
5334 /*                                                                          */
5335 /* Returns:                                                                 */
5336 /*   Nothing.                                                               */
5337 /****************************************************************************/
5338 static void
5339 bce_stats_update(struct bce_softc *sc)
5340 {
5341         struct ifnet *ifp = &sc->arpcom.ac_if;
5342         struct statistics_block *stats = sc->stats_block;
5343
5344         ASSERT_SERIALIZED(ifp->if_serializer);
5345
5346         /* 
5347          * Certain controllers don't report carrier sense errors correctly.
5348          * See errata E11_5708CA0_1165.
5349          */
5350         if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5351             !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5352                 IFNET_STAT_INC(ifp, oerrors,
5353                         (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5354         }
5355
5356         /*
5357          * Update the sysctl statistics from the hardware statistics.
5358          */
5359         sc->stat_IfHCInOctets =
5360                 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5361                  (uint64_t)stats->stat_IfHCInOctets_lo;
5362
5363         sc->stat_IfHCInBadOctets =
5364                 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5365                  (uint64_t)stats->stat_IfHCInBadOctets_lo;
5366
5367         sc->stat_IfHCOutOctets =
5368                 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5369                  (uint64_t)stats->stat_IfHCOutOctets_lo;
5370
5371         sc->stat_IfHCOutBadOctets =
5372                 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5373                  (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5374
5375         sc->stat_IfHCInUcastPkts =
5376                 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5377                  (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5378
5379         sc->stat_IfHCInMulticastPkts =
5380                 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5381                  (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5382
5383         sc->stat_IfHCInBroadcastPkts =
5384                 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5385                  (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5386
5387         sc->stat_IfHCOutUcastPkts =
5388                 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5389                  (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5390
5391         sc->stat_IfHCOutMulticastPkts =
5392                 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5393                  (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5394
5395         sc->stat_IfHCOutBroadcastPkts =
5396                 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5397                  (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5398
5399         sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5400                 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5401
5402         sc->stat_Dot3StatsCarrierSenseErrors =
5403                 stats->stat_Dot3StatsCarrierSenseErrors;
5404
5405         sc->stat_Dot3StatsFCSErrors =
5406                 stats->stat_Dot3StatsFCSErrors;
5407
5408         sc->stat_Dot3StatsAlignmentErrors =
5409                 stats->stat_Dot3StatsAlignmentErrors;
5410
5411         sc->stat_Dot3StatsSingleCollisionFrames =
5412                 stats->stat_Dot3StatsSingleCollisionFrames;
5413
5414         sc->stat_Dot3StatsMultipleCollisionFrames =
5415                 stats->stat_Dot3StatsMultipleCollisionFrames;
5416
5417         sc->stat_Dot3StatsDeferredTransmissions =
5418                 stats->stat_Dot3StatsDeferredTransmissions;
5419
5420         sc->stat_Dot3StatsExcessiveCollisions =
5421                 stats->stat_Dot3StatsExcessiveCollisions;
5422
5423         sc->stat_Dot3StatsLateCollisions =
5424                 stats->stat_Dot3StatsLateCollisions;
5425
5426         sc->stat_EtherStatsCollisions =
5427                 stats->stat_EtherStatsCollisions;
5428
5429         sc->stat_EtherStatsFragments =
5430                 stats->stat_EtherStatsFragments;
5431
5432         sc->stat_EtherStatsJabbers =
5433                 stats->stat_EtherStatsJabbers;
5434
5435         sc->stat_EtherStatsUndersizePkts =
5436                 stats->stat_EtherStatsUndersizePkts;
5437
5438         sc->stat_EtherStatsOverrsizePkts =
5439                 stats->stat_EtherStatsOverrsizePkts;
5440
5441         sc->stat_EtherStatsPktsRx64Octets =
5442                 stats->stat_EtherStatsPktsRx64Octets;
5443
5444         sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5445                 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5446
5447         sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5448                 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5449
5450         sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5451                 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5452
5453         sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5454                 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5455
5456         sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5457                 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5458
5459         sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5460                 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5461
5462         sc->stat_EtherStatsPktsTx64Octets =
5463                 stats->stat_EtherStatsPktsTx64Octets;
5464
5465         sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5466                 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5467
5468         sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5469                 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5470
5471         sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5472                 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5473
5474         sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5475                 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5476
5477         sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5478                 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5479
5480         sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5481                 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5482
5483         sc->stat_XonPauseFramesReceived =
5484                 stats->stat_XonPauseFramesReceived;
5485
5486         sc->stat_XoffPauseFramesReceived =
5487                 stats->stat_XoffPauseFramesReceived;
5488
5489         sc->stat_OutXonSent =
5490                 stats->stat_OutXonSent;
5491
5492         sc->stat_OutXoffSent =
5493                 stats->stat_OutXoffSent;
5494
5495         sc->stat_FlowControlDone =
5496                 stats->stat_FlowControlDone;
5497
5498         sc->stat_MacControlFramesReceived =
5499                 stats->stat_MacControlFramesReceived;
5500
5501         sc->stat_XoffStateEntered =
5502                 stats->stat_XoffStateEntered;
5503
5504         sc->stat_IfInFramesL2FilterDiscards =
5505                 stats->stat_IfInFramesL2FilterDiscards;
5506
5507         sc->stat_IfInRuleCheckerDiscards =
5508                 stats->stat_IfInRuleCheckerDiscards;
5509
5510         sc->stat_IfInFTQDiscards =
5511                 stats->stat_IfInFTQDiscards;
5512
5513         sc->stat_IfInMBUFDiscards =
5514                 stats->stat_IfInMBUFDiscards;
5515
5516         sc->stat_IfInRuleCheckerP4Hit =
5517                 stats->stat_IfInRuleCheckerP4Hit;
5518
5519         sc->stat_CatchupInRuleCheckerDiscards =
5520                 stats->stat_CatchupInRuleCheckerDiscards;
5521
5522         sc->stat_CatchupInFTQDiscards =
5523                 stats->stat_CatchupInFTQDiscards;
5524
5525         sc->stat_CatchupInMBUFDiscards =
5526                 stats->stat_CatchupInMBUFDiscards;
5527
5528         sc->stat_CatchupInRuleCheckerP4Hit =
5529                 stats->stat_CatchupInRuleCheckerP4Hit;
5530
5531         sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5532
5533         /*
5534          * Update the interface statistics from the
5535          * hardware statistics.
5536          */
5537         IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5538
5539         IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5540             (u_long)sc->stat_EtherStatsOverrsizePkts +
5541             (u_long)sc->stat_IfInMBUFDiscards +
5542             (u_long)sc->stat_Dot3StatsAlignmentErrors +
5543             (u_long)sc->stat_Dot3StatsFCSErrors +
5544             (u_long)sc->stat_IfInRuleCheckerDiscards +
5545             (u_long)sc->stat_IfInFTQDiscards +
5546             (u_long)sc->com_no_buffers);
5547
5548         IFNET_STAT_SET(ifp, oerrors,
5549             (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5550             (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5551             (u_long)sc->stat_Dot3StatsLateCollisions);
5552 }
5553
5554
5555 /****************************************************************************/
5556 /* Periodic function to notify the bootcode that the driver is still        */
5557 /* present.                                                                 */
5558 /*                                                                          */
5559 /* Returns:                                                                 */
5560 /*   Nothing.                                                               */
5561 /****************************************************************************/
5562 static void
5563 bce_pulse(void *xsc)
5564 {
5565         struct bce_softc *sc = xsc;
5566         struct ifnet *ifp = &sc->arpcom.ac_if;
5567         uint32_t msg;
5568
5569         lwkt_serialize_enter(ifp->if_serializer);
5570
5571         /* Tell the firmware that the driver is still running. */
5572         msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5573         bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5574
5575         /* Update the bootcode condition. */
5576         sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5577
5578         /* Report whether the bootcode still knows the driver is running. */
5579         if (!sc->bce_drv_cardiac_arrest) {
5580                 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5581                         sc->bce_drv_cardiac_arrest = 1;
5582                         if_printf(ifp, "Bootcode lost the driver pulse! "
5583                             "(bc_state = 0x%08X)\n", sc->bc_state);
5584                 }
5585         } else {
5586                 /*
5587                  * Not supported by all bootcode versions.
5588                  * (v5.0.11+ and v5.2.1+)  Older bootcode
5589                  * will require the driver to reset the
5590                  * controller to clear this condition.
5591                  */
5592                 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5593                         sc->bce_drv_cardiac_arrest = 0;
5594                         if_printf(ifp, "Bootcode found the driver pulse! "
5595                             "(bc_state = 0x%08X)\n", sc->bc_state);
5596                 }
5597         }
5598
5599         /* Schedule the next pulse. */
5600         callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5601             sc->bce_intr_cpuid);
5602
5603         lwkt_serialize_exit(ifp->if_serializer);
5604 }
5605
5606
5607 /****************************************************************************/
5608 /* Periodic function to check whether MSI is lost                           */
5609 /*                                                                          */
5610 /* Returns:                                                                 */
5611 /*   Nothing.                                                               */
5612 /****************************************************************************/
5613 static void
5614 bce_check_msi(void *xsc)
5615 {
5616         struct bce_softc *sc = xsc;
5617         struct ifnet *ifp = &sc->arpcom.ac_if;
5618         struct status_block *sblk = sc->status_block;
5619         struct bce_tx_ring *txr = &sc->tx_rings[0];
5620         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5621
5622         lwkt_serialize_enter(ifp->if_serializer);
5623
5624         KKASSERT(mycpuid == sc->bce_intr_cpuid);
5625
5626         if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5627                 lwkt_serialize_exit(ifp->if_serializer);
5628                 return;
5629         }
5630
5631         if (bce_get_hw_rx_cons(sc) != rxr->rx_cons ||
5632             bce_get_hw_tx_cons(sc) != txr->tx_cons ||
5633             (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5634             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5635                 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5636                     sc->bce_check_tx_cons == txr->tx_cons &&
5637                     sc->bce_check_status_idx == sc->last_status_idx) {
5638                         uint32_t msi_ctrl;
5639
5640                         if (!sc->bce_msi_maylose) {
5641                                 sc->bce_msi_maylose = TRUE;
5642                                 goto done;
5643                         }
5644
5645                         msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5646                         if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5647                                 if (bootverbose)
5648                                         if_printf(ifp, "lost MSI\n");
5649
5650                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5651                                     msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5652                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5653
5654                                 bce_intr_msi(sc);
5655                         } else if (bootverbose) {
5656                                 if_printf(ifp, "MSI may be lost\n");
5657                         }
5658                 }
5659         }
5660         sc->bce_msi_maylose = FALSE;
5661         sc->bce_check_rx_cons = rxr->rx_cons;
5662         sc->bce_check_tx_cons = txr->tx_cons;
5663         sc->bce_check_status_idx = sc->last_status_idx;
5664
5665 done:
5666         callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5667             bce_check_msi, sc);
5668         lwkt_serialize_exit(ifp->if_serializer);
5669 }
5670
5671
5672 /****************************************************************************/
5673 /* Periodic function to perform maintenance tasks.                          */
5674 /*                                                                          */
5675 /* Returns:                                                                 */
5676 /*   Nothing.                                                               */
5677 /****************************************************************************/
5678 static void
5679 bce_tick_serialized(struct bce_softc *sc)
5680 {
5681         struct ifnet *ifp = &sc->arpcom.ac_if;
5682         struct mii_data *mii;
5683
5684         ASSERT_SERIALIZED(ifp->if_serializer);
5685
5686         /* Update the statistics from the hardware statistics block. */
5687         bce_stats_update(sc);
5688
5689         /* Schedule the next tick. */
5690         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5691             sc->bce_intr_cpuid);
5692
5693         /* If link is up already up then we're done. */
5694         if (sc->bce_link)
5695                 return;
5696
5697         mii = device_get_softc(sc->bce_miibus);
5698         mii_tick(mii);
5699
5700         /* Check if the link has come up. */
5701         if ((mii->mii_media_status & IFM_ACTIVE) &&
5702             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5703                 sc->bce_link++;
5704                 /* Now that link is up, handle any outstanding TX traffic. */
5705                 if (!ifq_is_empty(&ifp->if_snd))
5706                         if_devstart(ifp);
5707         }
5708 }
5709
5710
5711 static void
5712 bce_tick(void *xsc)
5713 {
5714         struct bce_softc *sc = xsc;
5715         struct ifnet *ifp = &sc->arpcom.ac_if;
5716
5717         lwkt_serialize_enter(ifp->if_serializer);
5718         bce_tick_serialized(sc);
5719         lwkt_serialize_exit(ifp->if_serializer);
5720 }
5721
5722
5723 /****************************************************************************/
5724 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5725 /*                                                                          */
5726 /* Returns:                                                                 */
5727 /*   0 for success, positive value for failure.                             */
5728 /****************************************************************************/
5729 static void
5730 bce_add_sysctls(struct bce_softc *sc)
5731 {
5732         struct sysctl_ctx_list *ctx;
5733         struct sysctl_oid_list *children;
5734
5735         sysctl_ctx_init(&sc->bce_sysctl_ctx);
5736         sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5737                                               SYSCTL_STATIC_CHILDREN(_hw),
5738                                               OID_AUTO,
5739                                               device_get_nameunit(sc->bce_dev),
5740                                               CTLFLAG_RD, 0, "");
5741         if (sc->bce_sysctl_tree == NULL) {
5742                 device_printf(sc->bce_dev, "can't add sysctl node\n");
5743                 return;
5744         }
5745
5746         ctx = &sc->bce_sysctl_ctx;
5747         children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5748
5749         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5750                         CTLTYPE_INT | CTLFLAG_RW,
5751                         sc, 0, bce_sysctl_tx_bds_int, "I",
5752                         "Send max coalesced BD count during interrupt");
5753         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5754                         CTLTYPE_INT | CTLFLAG_RW,
5755                         sc, 0, bce_sysctl_tx_bds, "I",
5756                         "Send max coalesced BD count");
5757         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5758                         CTLTYPE_INT | CTLFLAG_RW,
5759                         sc, 0, bce_sysctl_tx_ticks_int, "I",
5760                         "Send coalescing ticks during interrupt");
5761         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5762                         CTLTYPE_INT | CTLFLAG_RW,
5763                         sc, 0, bce_sysctl_tx_ticks, "I",
5764                         "Send coalescing ticks");
5765
5766         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5767                         CTLTYPE_INT | CTLFLAG_RW,
5768                         sc, 0, bce_sysctl_rx_bds_int, "I",
5769                         "Receive max coalesced BD count during interrupt");
5770         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5771                         CTLTYPE_INT | CTLFLAG_RW,
5772                         sc, 0, bce_sysctl_rx_bds, "I",
5773                         "Receive max coalesced BD count");
5774         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5775                         CTLTYPE_INT | CTLFLAG_RW,
5776                         sc, 0, bce_sysctl_rx_ticks_int, "I",
5777                         "Receive coalescing ticks during interrupt");
5778         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5779                         CTLTYPE_INT | CTLFLAG_RW,
5780                         sc, 0, bce_sysctl_rx_ticks, "I",
5781                         "Receive coalescing ticks");
5782
5783         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
5784                 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
5785         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
5786                 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
5787
5788         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
5789                 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
5790                 "# segments before write to hardware registers");
5791
5792         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5793                 "stat_IfHCInOctets",
5794                 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5795                 "Bytes received");
5796
5797         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5798                 "stat_IfHCInBadOctets",
5799                 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5800                 "Bad bytes received");
5801
5802         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5803                 "stat_IfHCOutOctets",
5804                 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5805                 "Bytes sent");
5806
5807         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5808                 "stat_IfHCOutBadOctets",
5809                 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5810                 "Bad bytes sent");
5811
5812         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5813                 "stat_IfHCInUcastPkts",
5814                 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5815                 "Unicast packets received");
5816
5817         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5818                 "stat_IfHCInMulticastPkts",
5819                 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5820                 "Multicast packets received");
5821
5822         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5823                 "stat_IfHCInBroadcastPkts",
5824                 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5825                 "Broadcast packets received");
5826
5827         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5828                 "stat_IfHCOutUcastPkts",
5829                 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5830                 "Unicast packets sent");
5831
5832         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5833                 "stat_IfHCOutMulticastPkts",
5834                 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5835                 "Multicast packets sent");
5836
5837         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5838                 "stat_IfHCOutBroadcastPkts",
5839                 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5840                 "Broadcast packets sent");
5841
5842         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5843                 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5844                 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5845                 0, "Internal MAC transmit errors");
5846
5847         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5848                 "stat_Dot3StatsCarrierSenseErrors",
5849                 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5850                 0, "Carrier sense errors");
5851
5852         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5853                 "stat_Dot3StatsFCSErrors",
5854                 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5855                 0, "Frame check sequence errors");
5856
5857         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5858                 "stat_Dot3StatsAlignmentErrors",
5859                 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5860                 0, "Alignment errors");
5861
5862         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5863                 "stat_Dot3StatsSingleCollisionFrames",
5864                 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5865                 0, "Single Collision Frames");
5866
5867         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5868                 "stat_Dot3StatsMultipleCollisionFrames",
5869                 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5870                 0, "Multiple Collision Frames");
5871
5872         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5873                 "stat_Dot3StatsDeferredTransmissions",
5874                 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5875                 0, "Deferred Transmissions");
5876
5877         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5878                 "stat_Dot3StatsExcessiveCollisions",
5879                 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5880                 0, "Excessive Collisions");
5881
5882         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5883                 "stat_Dot3StatsLateCollisions",
5884                 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5885                 0, "Late Collisions");
5886
5887         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5888                 "stat_EtherStatsCollisions",
5889                 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5890                 0, "Collisions");
5891
5892         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5893                 "stat_EtherStatsFragments",
5894                 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5895                 0, "Fragments");
5896
5897         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5898                 "stat_EtherStatsJabbers",
5899                 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5900                 0, "Jabbers");
5901
5902         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5903                 "stat_EtherStatsUndersizePkts",
5904                 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5905                 0, "Undersize packets");
5906
5907         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5908                 "stat_EtherStatsOverrsizePkts",
5909                 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5910                 0, "stat_EtherStatsOverrsizePkts");
5911
5912         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5913                 "stat_EtherStatsPktsRx64Octets",
5914                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5915                 0, "Bytes received in 64 byte packets");
5916
5917         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5918                 "stat_EtherStatsPktsRx65Octetsto127Octets",
5919                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
5920                 0, "Bytes received in 65 to 127 byte packets");
5921
5922         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5923                 "stat_EtherStatsPktsRx128Octetsto255Octets",
5924                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
5925                 0, "Bytes received in 128 to 255 byte packets");
5926
5927         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5928                 "stat_EtherStatsPktsRx256Octetsto511Octets",
5929                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
5930                 0, "Bytes received in 256 to 511 byte packets");
5931
5932         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5933                 "stat_EtherStatsPktsRx512Octetsto1023Octets",
5934                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
5935                 0, "Bytes received in 512 to 1023 byte packets");
5936
5937         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5938                 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
5939                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
5940                 0, "Bytes received in 1024 t0 1522 byte packets");
5941
5942         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5943                 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
5944                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
5945                 0, "Bytes received in 1523 to 9022 byte packets");
5946
5947         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5948                 "stat_EtherStatsPktsTx64Octets",
5949                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
5950                 0, "Bytes sent in 64 byte packets");
5951
5952         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5953                 "stat_EtherStatsPktsTx65Octetsto127Octets",
5954                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
5955                 0, "Bytes sent in 65 to 127 byte packets");
5956
5957         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5958                 "stat_EtherStatsPktsTx128Octetsto255Octets",
5959                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
5960                 0, "Bytes sent in 128 to 255 byte packets");
5961
5962         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5963                 "stat_EtherStatsPktsTx256Octetsto511Octets",
5964                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
5965                 0, "Bytes sent in 256 to 511 byte packets");
5966
5967         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5968                 "stat_EtherStatsPktsTx512Octetsto1023Octets",
5969                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
5970                 0, "Bytes sent in 512 to 1023 byte packets");
5971
5972         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5973                 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
5974                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
5975                 0, "Bytes sent in 1024 to 1522 byte packets");
5976
5977         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5978                 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
5979                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
5980                 0, "Bytes sent in 1523 to 9022 byte packets");
5981
5982         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5983                 "stat_XonPauseFramesReceived",
5984                 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
5985                 0, "XON pause frames receved");
5986
5987         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5988                 "stat_XoffPauseFramesReceived",
5989                 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
5990                 0, "XOFF pause frames received");
5991
5992         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5993                 "stat_OutXonSent",
5994                 CTLFLAG_RD, &sc->stat_OutXonSent,
5995                 0, "XON pause frames sent");
5996
5997         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5998                 "stat_OutXoffSent",
5999                 CTLFLAG_RD, &sc->stat_OutXoffSent,
6000                 0, "XOFF pause frames sent");
6001
6002         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6003                 "stat_FlowControlDone",
6004                 CTLFLAG_RD, &sc->stat_FlowControlDone,
6005                 0, "Flow control done");
6006
6007         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6008                 "stat_MacControlFramesReceived",
6009                 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6010                 0, "MAC control frames received");
6011
6012         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6013                 "stat_XoffStateEntered",
6014                 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6015                 0, "XOFF state entered");
6016
6017         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6018                 "stat_IfInFramesL2FilterDiscards",
6019                 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6020                 0, "Received L2 packets discarded");
6021
6022         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6023                 "stat_IfInRuleCheckerDiscards",
6024                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6025                 0, "Received packets discarded by rule");
6026
6027         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6028                 "stat_IfInFTQDiscards",
6029                 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6030                 0, "Received packet FTQ discards");
6031
6032         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6033                 "stat_IfInMBUFDiscards",
6034                 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6035                 0, "Received packets discarded due to lack of controller buffer memory");
6036
6037         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6038                 "stat_IfInRuleCheckerP4Hit",
6039                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6040                 0, "Received packets rule checker hits");
6041
6042         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6043                 "stat_CatchupInRuleCheckerDiscards",
6044                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6045                 0, "Received packets discarded in Catchup path");
6046
6047         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6048                 "stat_CatchupInFTQDiscards",
6049                 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6050                 0, "Received packets discarded in FTQ in Catchup path");
6051
6052         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6053                 "stat_CatchupInMBUFDiscards",
6054                 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6055                 0, "Received packets discarded in controller buffer memory in Catchup path");
6056
6057         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6058                 "stat_CatchupInRuleCheckerP4Hit",
6059                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6060                 0, "Received packets rule checker hits in Catchup path");
6061
6062         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6063                 "com_no_buffers",
6064                 CTLFLAG_RD, &sc->com_no_buffers,
6065                 0, "Valid packets received but no RX buffers available");
6066 }
6067
6068 static int
6069 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6070 {
6071         struct bce_softc *sc = arg1;
6072
6073         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6074                         &sc->bce_tx_quick_cons_trip_int,
6075                         BCE_COALMASK_TX_BDS_INT);
6076 }
6077
6078 static int
6079 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6080 {
6081         struct bce_softc *sc = arg1;
6082
6083         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6084                         &sc->bce_tx_quick_cons_trip,
6085                         BCE_COALMASK_TX_BDS);
6086 }
6087
6088 static int
6089 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6090 {
6091         struct bce_softc *sc = arg1;
6092
6093         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6094                         &sc->bce_tx_ticks_int,
6095                         BCE_COALMASK_TX_TICKS_INT);
6096 }
6097
6098 static int
6099 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6100 {
6101         struct bce_softc *sc = arg1;
6102
6103         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6104                         &sc->bce_tx_ticks,
6105                         BCE_COALMASK_TX_TICKS);
6106 }
6107
6108 static int
6109 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6110 {
6111         struct bce_softc *sc = arg1;
6112
6113         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6114                         &sc->bce_rx_quick_cons_trip_int,
6115                         BCE_COALMASK_RX_BDS_INT);
6116 }
6117
6118 static int
6119 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6120 {
6121         struct bce_softc *sc = arg1;
6122
6123         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6124                         &sc->bce_rx_quick_cons_trip,
6125                         BCE_COALMASK_RX_BDS);
6126 }
6127
6128 static int
6129 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6130 {
6131         struct bce_softc *sc = arg1;
6132
6133         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6134                         &sc->bce_rx_ticks_int,
6135                         BCE_COALMASK_RX_TICKS_INT);
6136 }
6137
6138 static int
6139 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6140 {
6141         struct bce_softc *sc = arg1;
6142
6143         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6144                         &sc->bce_rx_ticks,
6145                         BCE_COALMASK_RX_TICKS);
6146 }
6147
6148 static int
6149 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6150                        uint32_t coalchg_mask)
6151 {
6152         struct bce_softc *sc = arg1;
6153         struct ifnet *ifp = &sc->arpcom.ac_if;
6154         int error = 0, v;
6155
6156         lwkt_serialize_enter(ifp->if_serializer);
6157
6158         v = *coal;
6159         error = sysctl_handle_int(oidp, &v, 0, req);
6160         if (!error && req->newptr != NULL) {
6161                 if (v < 0) {
6162                         error = EINVAL;
6163                 } else {
6164                         *coal = v;
6165                         sc->bce_coalchg_mask |= coalchg_mask;
6166                 }
6167         }
6168
6169         lwkt_serialize_exit(ifp->if_serializer);
6170         return error;
6171 }
6172
6173 static void
6174 bce_coal_change(struct bce_softc *sc)
6175 {
6176         struct ifnet *ifp = &sc->arpcom.ac_if;
6177
6178         ASSERT_SERIALIZED(ifp->if_serializer);
6179
6180         if ((ifp->if_flags & IFF_RUNNING) == 0) {
6181                 sc->bce_coalchg_mask = 0;
6182                 return;
6183         }
6184
6185         if (sc->bce_coalchg_mask &
6186             (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6187                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6188                        (sc->bce_tx_quick_cons_trip_int << 16) |
6189                        sc->bce_tx_quick_cons_trip);
6190                 if (bootverbose) {
6191                         if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6192                                   sc->bce_tx_quick_cons_trip,
6193                                   sc->bce_tx_quick_cons_trip_int);
6194                 }
6195         }
6196
6197         if (sc->bce_coalchg_mask &
6198             (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6199                 REG_WR(sc, BCE_HC_TX_TICKS,
6200                        (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6201                 if (bootverbose) {
6202                         if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6203                                   sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6204                 }
6205         }
6206
6207         if (sc->bce_coalchg_mask &
6208             (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6209                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6210                        (sc->bce_rx_quick_cons_trip_int << 16) |
6211                        sc->bce_rx_quick_cons_trip);
6212                 if (bootverbose) {
6213                         if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6214                                   sc->bce_rx_quick_cons_trip,
6215                                   sc->bce_rx_quick_cons_trip_int);
6216                 }
6217         }
6218
6219         if (sc->bce_coalchg_mask &
6220             (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6221                 REG_WR(sc, BCE_HC_RX_TICKS,
6222                        (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6223                 if (bootverbose) {
6224                         if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6225                                   sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6226                 }
6227         }
6228
6229         sc->bce_coalchg_mask = 0;
6230 }
6231
6232 static int
6233 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6234     uint16_t *flags0, uint16_t *mss0)
6235 {
6236         struct mbuf *m;
6237         uint16_t flags;
6238         int thoff, iphlen, hoff;
6239
6240         m = *mp;
6241         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6242
6243         hoff = m->m_pkthdr.csum_lhlen;
6244         iphlen = m->m_pkthdr.csum_iphlen;
6245         thoff = m->m_pkthdr.csum_thlen;
6246
6247         KASSERT(hoff >= sizeof(struct ether_header),
6248             ("invalid ether header len %d", hoff));
6249         KASSERT(iphlen >= sizeof(struct ip),
6250             ("invalid ip header len %d", iphlen));
6251         KASSERT(thoff >= sizeof(struct tcphdr),
6252             ("invalid tcp header len %d", thoff));
6253
6254         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6255                 m = m_pullup(m, hoff + iphlen + thoff);
6256                 if (m == NULL) {
6257                         *mp = NULL;
6258                         return ENOBUFS;
6259                 }
6260                 *mp = m;
6261         }
6262
6263         /* Set the LSO flag in the TX BD */
6264         flags = TX_BD_FLAGS_SW_LSO;
6265
6266         /* Set the length of IP + TCP options (in 32 bit words) */
6267         flags |= (((iphlen + thoff -
6268             sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6269
6270         *mss0 = htole16(m->m_pkthdr.tso_segsz);
6271         *flags0 = flags;
6272
6273         return 0;
6274 }