kernel: Use DEVMETHOD_END in the drivers.
[dragonfly.git] / sys / dev / netif / bce / if_bce.c
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *      David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  */
50
51 #include "opt_bce.h"
52 #include "opt_ifpoll.h"
53
54 #include <sys/param.h>
55 #include <sys/bus.h>
56 #include <sys/endian.h>
57 #include <sys/kernel.h>
58 #include <sys/interrupt.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/queue.h>
62 #include <sys/rman.h>
63 #include <sys/serialize.h>
64 #include <sys/socket.h>
65 #include <sys/sockio.h>
66 #include <sys/sysctl.h>
67
68 #include <netinet/ip.h>
69 #include <netinet/tcp.h>
70
71 #include <net/bpf.h>
72 #include <net/ethernet.h>
73 #include <net/if.h>
74 #include <net/if_arp.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_poll.h>
78 #include <net/if_types.h>
79 #include <net/ifq_var.h>
80 #include <net/vlan/if_vlan_var.h>
81 #include <net/vlan/if_vlan_ether.h>
82
83 #include <dev/netif/mii_layer/mii.h>
84 #include <dev/netif/mii_layer/miivar.h>
85 #include <dev/netif/mii_layer/brgphyreg.h>
86
87 #include <bus/pci/pcireg.h>
88 #include <bus/pci/pcivar.h>
89
90 #include "miibus_if.h"
91
92 #include <dev/netif/bce/if_bcereg.h>
93 #include <dev/netif/bce/if_bcefw.h>
94
95 #define BCE_MSI_CKINTVL         ((10 * hz) / 1000)      /* 10ms */
96
97 /****************************************************************************/
98 /* PCI Device ID Table                                                      */
99 /*                                                                          */
100 /* Used by bce_probe() to identify the devices supported by this driver.    */
101 /****************************************************************************/
102 #define BCE_DEVDESC_MAX         64
103
104 static struct bce_type bce_devs[] = {
105         /* BCM5706C Controllers and OEM boards. */
106         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
107                 "HP NC370T Multifunction Gigabit Server Adapter" },
108         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
109                 "HP NC370i Multifunction Gigabit Server Adapter" },
110         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
111                 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
112         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
113                 "HP NC371i Multifunction Gigabit Server Adapter" },
114         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
115                 "Broadcom NetXtreme II BCM5706 1000Base-T" },
116
117         /* BCM5706S controllers and OEM boards. */
118         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
119                 "HP NC370F Multifunction Gigabit Server Adapter" },
120         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
121                 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
122
123         /* BCM5708C controllers and OEM boards. */
124         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
125                 "HP NC373T PCIe Multifunction Gig Server Adapter" },
126         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
127                 "HP NC373i Multifunction Gigabit Server Adapter" },
128         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
129                 "HP NC374m PCIe Multifunction Adapter" },
130         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
131                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
132
133         /* BCM5708S controllers and OEM boards. */
134         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
135                 "HP NC373m Multifunction Gigabit Server Adapter" },
136         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
137                 "HP NC373i Multifunction Gigabit Server Adapter" },
138         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
139                 "HP NC373F PCIe Multifunc Giga Server Adapter" },
140         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
141                 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
142
143         /* BCM5709C controllers and OEM boards. */
144         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
145                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
146         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
147                 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
148         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
149                 "Broadcom NetXtreme II BCM5709 1000Base-T" },
150
151         /* BCM5709S controllers and OEM boards. */
152         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
153                 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
154         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
155                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
156         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
157                 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
158
159         /* BCM5716 controllers and OEM boards. */
160         { BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
161                 "Broadcom NetXtreme II BCM5716 1000Base-T" },
162
163         { 0, 0, 0, 0, NULL }
164 };
165
166
167 /****************************************************************************/
168 /* Supported Flash NVRAM device data.                                       */
169 /****************************************************************************/
170 static const struct flash_spec flash_table[] =
171 {
172 #define BUFFERED_FLAGS          (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
173 #define NONBUFFERED_FLAGS       (BCE_NV_WREN)
174
175         /* Slow EEPROM */
176         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
177          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
178          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
179          "EEPROM - slow"},
180         /* Expansion entry 0001 */
181         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
183          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
184          "Entry 0001"},
185         /* Saifun SA25F010 (non-buffered flash) */
186         /* strap, cfg1, & write1 need updates */
187         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
190          "Non-buffered flash (128kB)"},
191         /* Saifun SA25F020 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
196          "Non-buffered flash (256kB)"},
197         /* Expansion entry 0100 */
198         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
199          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201          "Entry 0100"},
202         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
203         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
205          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
206          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
207         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
208         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
210          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
211          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
212         /* Saifun SA25F005 (non-buffered flash) */
213         /* strap, cfg1, & write1 need updates */
214         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
215          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
217          "Non-buffered flash (64kB)"},
218         /* Fast EEPROM */
219         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
220          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
221          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
222          "EEPROM - fast"},
223         /* Expansion entry 1001 */
224         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
225          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
226          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
227          "Entry 1001"},
228         /* Expansion entry 1010 */
229         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
230          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
231          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
232          "Entry 1010"},
233         /* ATMEL AT45DB011B (buffered flash) */
234         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
235          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
236          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
237          "Buffered flash (128kB)"},
238         /* Expansion entry 1100 */
239         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
240          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
241          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
242          "Entry 1100"},
243         /* Expansion entry 1101 */
244         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
245          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
246          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
247          "Entry 1101"},
248         /* Ateml Expansion entry 1110 */
249         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
250          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
251          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
252          "Entry 1110 (Atmel)"},
253         /* ATMEL AT45DB021B (buffered flash) */
254         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
255          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
256          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
257          "Buffered flash (256kB)"},
258 };
259
260 /*
261  * The BCM5709 controllers transparently handle the
262  * differences between Atmel 264 byte pages and all
263  * flash devices which use 256 byte pages, so no
264  * logical-to-physical mapping is required in the
265  * driver.
266  */
267 static struct flash_spec flash_5709 = {
268         .flags          = BCE_NV_BUFFERED,
269         .page_bits      = BCM5709_FLASH_PAGE_BITS,
270         .page_size      = BCM5709_FLASH_PAGE_SIZE,
271         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
272         .total_size     = BUFFERED_FLASH_TOTAL_SIZE * 2,
273         .name           = "5709/5716 buffered flash (256kB)",
274 };
275
276
277 /****************************************************************************/
278 /* DragonFly device entry points.                                           */
279 /****************************************************************************/
280 static int      bce_probe(device_t);
281 static int      bce_attach(device_t);
282 static int      bce_detach(device_t);
283 static void     bce_shutdown(device_t);
284 static int      bce_miibus_read_reg(device_t, int, int);
285 static int      bce_miibus_write_reg(device_t, int, int, int);
286 static void     bce_miibus_statchg(device_t);
287
288 /****************************************************************************/
289 /* BCE Register/Memory Access Routines                                      */
290 /****************************************************************************/
291 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
292 static void     bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
293 static void     bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
294 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
295 static void     bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
296
297 /****************************************************************************/
298 /* BCE NVRAM Access Routines                                                */
299 /****************************************************************************/
300 static int      bce_acquire_nvram_lock(struct bce_softc *);
301 static int      bce_release_nvram_lock(struct bce_softc *);
302 static void     bce_enable_nvram_access(struct bce_softc *);
303 static void     bce_disable_nvram_access(struct bce_softc *);
304 static int      bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
305                     uint32_t);
306 static int      bce_init_nvram(struct bce_softc *);
307 static int      bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
308 static int      bce_nvram_test(struct bce_softc *);
309
310 /****************************************************************************/
311 /* BCE DMA Allocate/Free Routines                                           */
312 /****************************************************************************/
313 static int      bce_dma_alloc(struct bce_softc *);
314 static void     bce_dma_free(struct bce_softc *);
315 static void     bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
316
317 /****************************************************************************/
318 /* BCE Firmware Synchronization and Load                                    */
319 /****************************************************************************/
320 static int      bce_fw_sync(struct bce_softc *, uint32_t);
321 static void     bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
322                     uint32_t, uint32_t);
323 static void     bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
324                     struct fw_info *);
325 static void     bce_start_cpu(struct bce_softc *, struct cpu_reg *);
326 static void     bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
327 static void     bce_start_rxp_cpu(struct bce_softc *);
328 static void     bce_init_rxp_cpu(struct bce_softc *);
329 static void     bce_init_txp_cpu(struct bce_softc *);
330 static void     bce_init_tpat_cpu(struct bce_softc *);
331 static void     bce_init_cp_cpu(struct bce_softc *);
332 static void     bce_init_com_cpu(struct bce_softc *);
333 static void     bce_init_cpus(struct bce_softc *);
334
335 static void     bce_stop(struct bce_softc *);
336 static int      bce_reset(struct bce_softc *, uint32_t);
337 static int      bce_chipinit(struct bce_softc *);
338 static int      bce_blockinit(struct bce_softc *);
339 static void     bce_probe_pci_caps(struct bce_softc *);
340 static void     bce_print_adapter_info(struct bce_softc *);
341 static void     bce_get_media(struct bce_softc *);
342 static void     bce_mgmt_init(struct bce_softc *);
343 static int      bce_init_ctx(struct bce_softc *);
344 static void     bce_get_mac_addr(struct bce_softc *);
345 static void     bce_set_mac_addr(struct bce_softc *);
346 static void     bce_set_rx_mode(struct bce_softc *);
347 static void     bce_coal_change(struct bce_softc *);
348 static void     bce_setup_serialize(struct bce_softc *);
349 static void     bce_serialize_skipmain(struct bce_softc *);
350 static void     bce_deserialize_skipmain(struct bce_softc *);
351
352 static int      bce_create_tx_ring(struct bce_tx_ring *);
353 static void     bce_destroy_tx_ring(struct bce_tx_ring *);
354 static void     bce_init_tx_context(struct bce_tx_ring *);
355 static int      bce_init_tx_chain(struct bce_tx_ring *);
356 static void     bce_free_tx_chain(struct bce_tx_ring *);
357 static void     bce_xmit(struct bce_tx_ring *);
358 static int      bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
359 static int      bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
360                     uint16_t *, uint16_t *);
361
362 static int      bce_create_rx_ring(struct bce_rx_ring *);
363 static void     bce_destroy_rx_ring(struct bce_rx_ring *);
364 static void     bce_init_rx_context(struct bce_rx_ring *);
365 static int      bce_init_rx_chain(struct bce_rx_ring *);
366 static void     bce_free_rx_chain(struct bce_rx_ring *);
367 static int      bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t *,
368                     uint32_t *, int);
369 static void     bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
370                     uint32_t *);
371
372 static void     bce_start(struct ifnet *, struct ifaltq_subque *);
373 static int      bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
374 static void     bce_watchdog(struct ifnet *);
375 static int      bce_ifmedia_upd(struct ifnet *);
376 static void     bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
377 static void     bce_init(void *);
378 #ifdef IFPOLL_ENABLE
379 static void     bce_npoll(struct ifnet *, struct ifpoll_info *);
380 static void     bce_npoll_rx(struct ifnet *, void *, int);
381 static void     bce_npoll_tx(struct ifnet *, void *, int);
382 static void     bce_npoll_status(struct ifnet *);
383 #endif
384 static void     bce_serialize(struct ifnet *, enum ifnet_serialize);
385 static void     bce_deserialize(struct ifnet *, enum ifnet_serialize);
386 static int      bce_tryserialize(struct ifnet *, enum ifnet_serialize);
387 #ifdef INVARIANTS
388 static void     bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
389                     boolean_t);
390 #endif
391
392 static void     bce_intr(struct bce_softc *);
393 static void     bce_intr_legacy(void *);
394 static void     bce_intr_msi(void *);
395 static void     bce_intr_msi_oneshot(void *);
396 static void     bce_tx_intr(struct bce_tx_ring *, uint16_t);
397 static void     bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
398 static void     bce_phy_intr(struct bce_softc *);
399 static void     bce_disable_intr(struct bce_softc *);
400 static void     bce_enable_intr(struct bce_softc *);
401 static void     bce_reenable_intr(struct bce_softc *);
402 static void     bce_check_msi(void *);
403
404 static void     bce_stats_update(struct bce_softc *);
405 static void     bce_tick(void *);
406 static void     bce_tick_serialized(struct bce_softc *);
407 static void     bce_pulse(void *);
408
409 static void     bce_add_sysctls(struct bce_softc *);
410 static int      bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
411 static int      bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
412 static int      bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
413 static int      bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
414 static int      bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
415 static int      bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
416 static int      bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
417 static int      bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
418 #ifdef IFPOLL_ENABLE
419 static int      bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
420 #endif
421 static int      bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
422                     uint32_t *, uint32_t);
423
424 /*
425  * NOTE:
426  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
427  * takes 1023 as the TX ticks limit.  However, using 1023 will
428  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
429  * there is _no_ network activity on the NIC.
430  */
431 static uint32_t bce_tx_bds_int = 255;           /* bcm: 20 */
432 static uint32_t bce_tx_bds = 255;               /* bcm: 20 */
433 static uint32_t bce_tx_ticks_int = 1022;        /* bcm: 80 */
434 static uint32_t bce_tx_ticks = 1022;            /* bcm: 80 */
435 static uint32_t bce_rx_bds_int = 128;           /* bcm: 6 */
436 static uint32_t bce_rx_bds = 0;                 /* bcm: 6 */
437 static uint32_t bce_rx_ticks_int = 150;         /* bcm: 18 */
438 static uint32_t bce_rx_ticks = 150;             /* bcm: 18 */
439
440 static int      bce_tx_wreg = 8;
441
442 static int      bce_msi_enable = 1;
443
444 static int      bce_rx_pages = RX_PAGES_DEFAULT;
445 static int      bce_tx_pages = TX_PAGES_DEFAULT;
446
447 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
448 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
449 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
450 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
451 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
452 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
453 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
454 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
455 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
456 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
457 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
458 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
459
460 /****************************************************************************/
461 /* DragonFly device dispatch table.                                         */
462 /****************************************************************************/
463 static device_method_t bce_methods[] = {
464         /* Device interface */
465         DEVMETHOD(device_probe,         bce_probe),
466         DEVMETHOD(device_attach,        bce_attach),
467         DEVMETHOD(device_detach,        bce_detach),
468         DEVMETHOD(device_shutdown,      bce_shutdown),
469
470         /* bus interface */
471         DEVMETHOD(bus_print_child,      bus_generic_print_child),
472         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
473
474         /* MII interface */
475         DEVMETHOD(miibus_readreg,       bce_miibus_read_reg),
476         DEVMETHOD(miibus_writereg,      bce_miibus_write_reg),
477         DEVMETHOD(miibus_statchg,       bce_miibus_statchg),
478
479         DEVMETHOD_END
480 };
481
482 static driver_t bce_driver = {
483         "bce",
484         bce_methods,
485         sizeof(struct bce_softc)
486 };
487
488 static devclass_t bce_devclass;
489
490
491 DECLARE_DUMMY_MODULE(if_bce);
492 MODULE_DEPEND(bce, miibus, 1, 1, 1);
493 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
494 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
495
496
497 /****************************************************************************/
498 /* Device probe function.                                                   */
499 /*                                                                          */
500 /* Compares the device to the driver's list of supported devices and        */
501 /* reports back to the OS whether this is the right driver for the device.  */
502 /*                                                                          */
503 /* Returns:                                                                 */
504 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
505 /****************************************************************************/
506 static int
507 bce_probe(device_t dev)
508 {
509         struct bce_type *t;
510         uint16_t vid, did, svid, sdid;
511
512         /* Get the data for the device to be probed. */
513         vid  = pci_get_vendor(dev);
514         did  = pci_get_device(dev);
515         svid = pci_get_subvendor(dev);
516         sdid = pci_get_subdevice(dev);
517
518         /* Look through the list of known devices for a match. */
519         for (t = bce_devs; t->bce_name != NULL; ++t) {
520                 if (vid == t->bce_vid && did == t->bce_did && 
521                     (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
522                     (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
523                         uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
524                         char *descbuf;
525
526                         descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
527
528                         /* Print out the device identity. */
529                         ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
530                                   t->bce_name,
531                                   ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
532
533                         device_set_desc_copy(dev, descbuf);
534                         kfree(descbuf, M_TEMP);
535                         return 0;
536                 }
537         }
538         return ENXIO;
539 }
540
541
542 /****************************************************************************/
543 /* PCI Capabilities Probe Function.                                         */
544 /*                                                                          */
545 /* Walks the PCI capabiites list for the device to find what features are   */
546 /* supported.                                                               */
547 /*                                                                          */
548 /* Returns:                                                                 */
549 /*   None.                                                                  */
550 /****************************************************************************/
551 static void
552 bce_print_adapter_info(struct bce_softc *sc)
553 {
554         device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
555
556         kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
557                 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
558
559         /* Bus info. */
560         if (sc->bce_flags & BCE_PCIE_FLAG) {
561                 kprintf("Bus (PCIe x%d, ", sc->link_width);
562                 switch (sc->link_speed) {
563                 case 1:
564                         kprintf("2.5Gbps); ");
565                         break;
566                 case 2:
567                         kprintf("5Gbps); ");
568                         break;
569                 default:
570                         kprintf("Unknown link speed); ");
571                         break;
572                 }
573         } else {
574                 kprintf("Bus (PCI%s, %s, %dMHz); ",
575                     ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
576                     ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
577                     sc->bus_speed_mhz);
578         }
579
580         /* Firmware version and device features. */
581         kprintf("B/C (%s)", sc->bce_bc_ver);
582
583         if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
584             (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
585                 kprintf("; Flags(");
586                 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
587                         kprintf("MFW[%s]", sc->bce_mfw_ver);
588                 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
589                         kprintf(" 2.5G");
590                 kprintf(")");
591         }
592         kprintf("\n");
593 }
594
595
596 /****************************************************************************/
597 /* PCI Capabilities Probe Function.                                         */
598 /*                                                                          */
599 /* Walks the PCI capabiites list for the device to find what features are   */
600 /* supported.                                                               */
601 /*                                                                          */
602 /* Returns:                                                                 */
603 /*   None.                                                                  */
604 /****************************************************************************/
605 static void
606 bce_probe_pci_caps(struct bce_softc *sc)
607 {
608         device_t dev = sc->bce_dev;
609         uint8_t ptr;
610
611         if (pci_is_pcix(dev))
612                 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
613
614         ptr = pci_get_pciecap_ptr(dev);
615         if (ptr) {
616                 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
617
618                 sc->link_speed = link_status & 0xf;
619                 sc->link_width = (link_status >> 4) & 0x3f;
620                 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
621                 sc->bce_flags |= BCE_PCIE_FLAG;
622         }
623 }
624
625
626 /****************************************************************************/
627 /* Device attach function.                                                  */
628 /*                                                                          */
629 /* Allocates device resources, performs secondary chip identification,      */
630 /* resets and initializes the hardware, and initializes driver instance     */
631 /* variables.                                                               */
632 /*                                                                          */
633 /* Returns:                                                                 */
634 /*   0 on success, positive value on failure.                               */
635 /****************************************************************************/
636 static int
637 bce_attach(device_t dev)
638 {
639         struct bce_softc *sc = device_get_softc(dev);
640         struct ifnet *ifp = &sc->arpcom.ac_if;
641         uint32_t val;
642         u_int irq_flags;
643         void (*irq_handle)(void *);
644         int rid, rc = 0;
645         int i, j;
646         struct mii_probe_args mii_args;
647         uintptr_t mii_priv = 0;
648 #ifdef IFPOLL_ENABLE
649         int offset, offset_def;
650 #endif
651
652         sc->bce_dev = dev;
653         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
654
655         lwkt_serialize_init(&sc->main_serialize);
656
657         pci_enable_busmaster(dev);
658
659         bce_probe_pci_caps(sc);
660
661         /* Allocate PCI memory resources. */
662         rid = PCIR_BAR(0);
663         sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
664                                                  RF_ACTIVE | PCI_RF_DENSE);
665         if (sc->bce_res_mem == NULL) {
666                 device_printf(dev, "PCI memory allocation failed\n");
667                 return ENXIO;
668         }
669         sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
670         sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
671
672         /*
673          * Configure byte swap and enable indirect register access.
674          * Rely on CPU to do target byte swapping on big endian systems.
675          * Access to registers outside of PCI configurtion space are not
676          * valid until this is done.
677          */
678         pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
679                          BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
680                          BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
681
682         /* Save ASIC revsion info. */
683         sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
684
685         /* Weed out any non-production controller revisions. */
686         switch (BCE_CHIP_ID(sc)) {
687         case BCE_CHIP_ID_5706_A0:
688         case BCE_CHIP_ID_5706_A1:
689         case BCE_CHIP_ID_5708_A0:
690         case BCE_CHIP_ID_5708_B0:
691         case BCE_CHIP_ID_5709_A0:
692         case BCE_CHIP_ID_5709_B0:
693         case BCE_CHIP_ID_5709_B1:
694 #ifdef foo
695         /* 5709C B2 seems to work fine */
696         case BCE_CHIP_ID_5709_B2:
697 #endif
698                 device_printf(dev, "Unsupported chip id 0x%08x!\n",
699                               BCE_CHIP_ID(sc));
700                 rc = ENODEV;
701                 goto fail;
702         }
703
704         mii_priv |= BRGPHY_FLAG_WIRESPEED;
705         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
706                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
707                     BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
708                         mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
709         } else {
710                 mii_priv |= BRGPHY_FLAG_BER_BUG;
711         }
712
713         /*
714          * Find the base address for shared memory access.
715          * Newer versions of bootcode use a signature and offset
716          * while older versions use a fixed address.
717          */
718         val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
719         if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
720             BCE_SHM_HDR_SIGNATURE_SIG) {
721                 /* Multi-port devices use different offsets in shared memory. */
722                 sc->bce_shmem_base = REG_RD_IND(sc,
723                     BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
724         } else {
725                 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
726         }
727
728         /* Fetch the bootcode revision. */
729         val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
730         for (i = 0, j = 0; i < 3; i++) {
731                 uint8_t num;
732                 int k, skip0;
733
734                 num = (uint8_t)(val >> (24 - (i * 8)));
735                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
736                         if (num >= k || !skip0 || k == 1) {
737                                 sc->bce_bc_ver[j++] = (num / k) + '0';
738                                 skip0 = 0;
739                         }
740                 }
741                 if (i != 2)
742                         sc->bce_bc_ver[j++] = '.';
743         }
744
745         /* Check if any management firwmare is running. */
746         val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
747         if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
748                 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
749
750                 /* Allow time for firmware to enter the running state. */
751                 for (i = 0; i < 30; i++) {
752                         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
753                         if (val & BCE_CONDITION_MFW_RUN_MASK)
754                                 break;
755                         DELAY(10000);
756                 }
757         }
758
759         /* Check the current bootcode state. */
760         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
761             BCE_CONDITION_MFW_RUN_MASK;
762         if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
763             val != BCE_CONDITION_MFW_RUN_NONE) {
764                 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
765
766                 for (i = 0, j = 0; j < 3; j++) {
767                         val = bce_reg_rd_ind(sc, addr + j * 4);
768                         val = bswap32(val);
769                         memcpy(&sc->bce_mfw_ver[i], &val, 4);
770                         i += 4;
771                 }
772         }
773
774         /* Get PCI bus information (speed and type). */
775         val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
776         if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
777                 uint32_t clkreg;
778
779                 sc->bce_flags |= BCE_PCIX_FLAG;
780
781                 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
782                          BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
783                 switch (clkreg) {
784                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
785                         sc->bus_speed_mhz = 133;
786                         break;
787
788                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
789                         sc->bus_speed_mhz = 100;
790                         break;
791
792                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
793                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
794                         sc->bus_speed_mhz = 66;
795                         break;
796
797                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
798                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
799                         sc->bus_speed_mhz = 50;
800                         break;
801
802                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
803                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
804                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
805                         sc->bus_speed_mhz = 33;
806                         break;
807                 }
808         } else {
809                 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
810                         sc->bus_speed_mhz = 66;
811                 else
812                         sc->bus_speed_mhz = 33;
813         }
814
815         if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
816                 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
817
818         /* Reset the controller. */
819         rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
820         if (rc != 0)
821                 goto fail;
822
823         /* Initialize the controller. */
824         rc = bce_chipinit(sc);
825         if (rc != 0) {
826                 device_printf(dev, "Controller initialization failed!\n");
827                 goto fail;
828         }
829
830         /* Perform NVRAM test. */
831         rc = bce_nvram_test(sc);
832         if (rc != 0) {
833                 device_printf(dev, "NVRAM test failed!\n");
834                 goto fail;
835         }
836
837         /* Fetch the permanent Ethernet MAC address. */
838         bce_get_mac_addr(sc);
839
840         /*
841          * Trip points control how many BDs
842          * should be ready before generating an
843          * interrupt while ticks control how long
844          * a BD can sit in the chain before
845          * generating an interrupt.  Set the default 
846          * values for the RX and TX rings.
847          */
848
849 #ifdef BCE_DRBUG
850         /* Force more frequent interrupts. */
851         sc->bce_tx_quick_cons_trip_int = 1;
852         sc->bce_tx_quick_cons_trip     = 1;
853         sc->bce_tx_ticks_int           = 0;
854         sc->bce_tx_ticks               = 0;
855
856         sc->bce_rx_quick_cons_trip_int = 1;
857         sc->bce_rx_quick_cons_trip     = 1;
858         sc->bce_rx_ticks_int           = 0;
859         sc->bce_rx_ticks               = 0;
860 #else
861         sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
862         sc->bce_tx_quick_cons_trip     = bce_tx_bds;
863         sc->bce_tx_ticks_int           = bce_tx_ticks_int;
864         sc->bce_tx_ticks               = bce_tx_ticks;
865
866         sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
867         sc->bce_rx_quick_cons_trip     = bce_rx_bds;
868         sc->bce_rx_ticks_int           = bce_rx_ticks_int;
869         sc->bce_rx_ticks               = bce_rx_ticks;
870 #endif
871
872         /* Update statistics once every second. */
873         sc->bce_stats_ticks = 1000000 & 0xffff00;
874
875         /* Find the media type for the adapter. */
876         bce_get_media(sc);
877
878         /* Find out RX/TX ring count */
879         sc->ring_cnt = 1; /* XXX */
880
881         /* Allocate DMA memory resources. */
882         rc = bce_dma_alloc(sc);
883         if (rc != 0) {
884                 device_printf(dev, "DMA resource allocation failed!\n");
885                 goto fail;
886         }
887
888 #ifdef IFPOLL_ENABLE
889         /*
890          * NPOLLING RX/TX CPU offset
891          */
892         if (sc->ring_cnt == ncpus2) {
893                 offset = 0;
894         } else {
895                 offset_def = (sc->ring_cnt * device_get_unit(dev)) % ncpus2;
896                 offset = device_getenv_int(dev, "npoll.offset", offset_def);
897                 if (offset >= ncpus2 ||
898                     offset % sc->ring_cnt != 0) {
899                         device_printf(dev, "invalid npoll.offset %d, use %d\n",
900                             offset, offset_def);
901                         offset = offset_def;
902                 }
903         }
904         sc->npoll_ofs = offset;
905 #endif
906
907         /* Allocate PCI IRQ resources. */
908         sc->bce_irq_type = pci_alloc_1intr(dev, bce_msi_enable,
909             &sc->bce_irq_rid, &irq_flags);
910
911         sc->bce_res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
912             &sc->bce_irq_rid, irq_flags);
913         if (sc->bce_res_irq == NULL) {
914                 device_printf(dev, "PCI map interrupt failed\n");
915                 rc = ENXIO;
916                 goto fail;
917         }
918
919         if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
920                 irq_handle = bce_intr_legacy;
921         } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
922                 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
923                         irq_handle = bce_intr_msi_oneshot;
924                         sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
925                 } else {
926                         irq_handle = bce_intr_msi;
927                         sc->bce_flags |= BCE_CHECK_MSI_FLAG;
928                 }
929         } else {
930                 panic("%s: unsupported intr type %d",
931                     device_get_nameunit(dev), sc->bce_irq_type);
932         }
933
934         /* Setup serializer */
935         bce_setup_serialize(sc);
936
937         /* Initialize the ifnet interface. */
938         ifp->if_softc = sc;
939         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
940         ifp->if_ioctl = bce_ioctl;
941         ifp->if_start = bce_start;
942         ifp->if_init = bce_init;
943         ifp->if_watchdog = bce_watchdog;
944         ifp->if_serialize = bce_serialize;
945         ifp->if_deserialize = bce_deserialize;
946         ifp->if_tryserialize = bce_tryserialize;
947 #ifdef INVARIANTS
948         ifp->if_serialize_assert = bce_serialize_assert;
949 #endif
950 #ifdef IFPOLL_ENABLE
951         ifp->if_npoll = bce_npoll;
952 #endif
953
954         ifp->if_mtu = ETHERMTU;
955         ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
956         ifp->if_capabilities = BCE_IF_CAPABILITIES;
957         ifp->if_capenable = ifp->if_capabilities;
958
959         if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
960                 ifp->if_baudrate = IF_Gbps(2.5);
961         else
962                 ifp->if_baudrate = IF_Gbps(1);
963
964         ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
965         ifq_set_ready(&ifp->if_snd);
966
967         /*
968          * Look for our PHY.
969          */
970         mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
971         mii_args.mii_probemask = 1 << sc->bce_phy_addr;
972         mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
973         mii_args.mii_priv = mii_priv;
974
975         rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
976         if (rc != 0) {
977                 device_printf(dev, "PHY probe failed!\n");
978                 goto fail;
979         }
980
981         /* Attach to the Ethernet interface list. */
982         ether_ifattach(ifp, sc->eaddr, NULL);
983
984         callout_init_mp(&sc->bce_tick_callout);
985         callout_init_mp(&sc->bce_pulse_callout);
986         callout_init_mp(&sc->bce_ckmsi_callout);
987
988         /* Hookup IRQ last. */
989         rc = bus_setup_intr(dev, sc->bce_res_irq, INTR_MPSAFE, irq_handle, sc,
990             &sc->bce_intrhand, &sc->main_serialize);
991         if (rc != 0) {
992                 device_printf(dev, "Failed to setup IRQ!\n");
993                 ether_ifdetach(ifp);
994                 goto fail;
995         }
996
997         sc->bce_intr_cpuid = rman_get_cpuid(sc->bce_res_irq);
998         ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
999
1000         /* Add the supported sysctls to the kernel. */
1001         bce_add_sysctls(sc);
1002
1003         /*
1004          * The chip reset earlier notified the bootcode that
1005          * a driver is present.  We now need to start our pulse
1006          * routine so that the bootcode is reminded that we're
1007          * still running.
1008          */
1009         bce_pulse(sc);
1010
1011         /* Get the firmware running so IPMI still works */
1012         bce_mgmt_init(sc);
1013
1014         if (bootverbose)
1015                 bce_print_adapter_info(sc);
1016
1017         return 0;
1018 fail:
1019         bce_detach(dev);
1020         return(rc);
1021 }
1022
1023
1024 /****************************************************************************/
1025 /* Device detach function.                                                  */
1026 /*                                                                          */
1027 /* Stops the controller, resets the controller, and releases resources.     */
1028 /*                                                                          */
1029 /* Returns:                                                                 */
1030 /*   0 on success, positive value on failure.                               */
1031 /****************************************************************************/
1032 static int
1033 bce_detach(device_t dev)
1034 {
1035         struct bce_softc *sc = device_get_softc(dev);
1036
1037         if (device_is_attached(dev)) {
1038                 struct ifnet *ifp = &sc->arpcom.ac_if;
1039                 uint32_t msg;
1040
1041                 ifnet_serialize_all(ifp);
1042
1043                 /* Stop and reset the controller. */
1044                 callout_stop(&sc->bce_pulse_callout);
1045                 bce_stop(sc);
1046                 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1047                         msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1048                 else
1049                         msg = BCE_DRV_MSG_CODE_UNLOAD;
1050                 bce_reset(sc, msg);
1051                 bus_teardown_intr(dev, sc->bce_res_irq, sc->bce_intrhand);
1052
1053                 ifnet_deserialize_all(ifp);
1054
1055                 ether_ifdetach(ifp);
1056         }
1057
1058         /* If we have a child device on the MII bus remove it too. */
1059         if (sc->bce_miibus)
1060                 device_delete_child(dev, sc->bce_miibus);
1061         bus_generic_detach(dev);
1062
1063         if (sc->bce_res_irq != NULL) {
1064                 bus_release_resource(dev, SYS_RES_IRQ, sc->bce_irq_rid,
1065                     sc->bce_res_irq);
1066         }
1067
1068         if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
1069                 pci_release_msi(dev);
1070
1071         if (sc->bce_res_mem != NULL) {
1072                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1073                                      sc->bce_res_mem);
1074         }
1075
1076         bce_dma_free(sc);
1077
1078         if (sc->bce_sysctl_tree != NULL)
1079                 sysctl_ctx_free(&sc->bce_sysctl_ctx);
1080
1081         return 0;
1082 }
1083
1084
1085 /****************************************************************************/
1086 /* Device shutdown function.                                                */
1087 /*                                                                          */
1088 /* Stops and resets the controller.                                         */
1089 /*                                                                          */
1090 /* Returns:                                                                 */
1091 /*   Nothing                                                                */
1092 /****************************************************************************/
1093 static void
1094 bce_shutdown(device_t dev)
1095 {
1096         struct bce_softc *sc = device_get_softc(dev);
1097         struct ifnet *ifp = &sc->arpcom.ac_if;
1098         uint32_t msg;
1099
1100         ifnet_serialize_all(ifp);
1101
1102         bce_stop(sc);
1103         if (sc->bce_flags & BCE_NO_WOL_FLAG)
1104                 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1105         else
1106                 msg = BCE_DRV_MSG_CODE_UNLOAD;
1107         bce_reset(sc, msg);
1108
1109         ifnet_deserialize_all(ifp);
1110 }
1111
1112
1113 /****************************************************************************/
1114 /* Indirect register read.                                                  */
1115 /*                                                                          */
1116 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1117 /* configuration space.  Using this mechanism avoids issues with posted     */
1118 /* reads but is much slower than memory-mapped I/O.                         */
1119 /*                                                                          */
1120 /* Returns:                                                                 */
1121 /*   The value of the register.                                             */
1122 /****************************************************************************/
1123 static uint32_t
1124 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1125 {
1126         device_t dev = sc->bce_dev;
1127
1128         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1129         return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1130 }
1131
1132
1133 /****************************************************************************/
1134 /* Indirect register write.                                                 */
1135 /*                                                                          */
1136 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1137 /* configuration space.  Using this mechanism avoids issues with posted     */
1138 /* writes but is muchh slower than memory-mapped I/O.                       */
1139 /*                                                                          */
1140 /* Returns:                                                                 */
1141 /*   Nothing.                                                               */
1142 /****************************************************************************/
1143 static void
1144 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1145 {
1146         device_t dev = sc->bce_dev;
1147
1148         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1149         pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1150 }
1151
1152
1153 /****************************************************************************/
1154 /* Shared memory write.                                                     */
1155 /*                                                                          */
1156 /* Writes NetXtreme II shared memory region.                                */
1157 /*                                                                          */
1158 /* Returns:                                                                 */
1159 /*   Nothing.                                                               */
1160 /****************************************************************************/
1161 static void
1162 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1163 {
1164         bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1165 }
1166
1167
1168 /****************************************************************************/
1169 /* Shared memory read.                                                      */
1170 /*                                                                          */
1171 /* Reads NetXtreme II shared memory region.                                 */
1172 /*                                                                          */
1173 /* Returns:                                                                 */
1174 /*   The 32 bit value read.                                                 */
1175 /****************************************************************************/
1176 static u32
1177 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1178 {
1179         return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1180 }
1181
1182
1183 /****************************************************************************/
1184 /* Context memory write.                                                    */
1185 /*                                                                          */
1186 /* The NetXtreme II controller uses context memory to track connection      */
1187 /* information for L2 and higher network protocols.                         */
1188 /*                                                                          */
1189 /* Returns:                                                                 */
1190 /*   Nothing.                                                               */
1191 /****************************************************************************/
1192 static void
1193 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1194     uint32_t ctx_val)
1195 {
1196         uint32_t idx, offset = ctx_offset + cid_addr;
1197         uint32_t val, retry_cnt = 5;
1198
1199         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1200             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1201                 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1202                 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1203
1204                 for (idx = 0; idx < retry_cnt; idx++) {
1205                         val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1206                         if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1207                                 break;
1208                         DELAY(5);
1209                 }
1210
1211                 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1212                         device_printf(sc->bce_dev,
1213                             "Unable to write CTX memory: "
1214                             "cid_addr = 0x%08X, offset = 0x%08X!\n",
1215                             cid_addr, ctx_offset);
1216                 }
1217         } else {
1218                 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1219                 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1220         }
1221 }
1222
1223
1224 /****************************************************************************/
1225 /* PHY register read.                                                       */
1226 /*                                                                          */
1227 /* Implements register reads on the MII bus.                                */
1228 /*                                                                          */
1229 /* Returns:                                                                 */
1230 /*   The value of the register.                                             */
1231 /****************************************************************************/
1232 static int
1233 bce_miibus_read_reg(device_t dev, int phy, int reg)
1234 {
1235         struct bce_softc *sc = device_get_softc(dev);
1236         uint32_t val;
1237         int i;
1238
1239         /* Make sure we are accessing the correct PHY address. */
1240         KASSERT(phy == sc->bce_phy_addr,
1241             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1242
1243         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1244                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1245                 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1246
1247                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1248                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1249
1250                 DELAY(40);
1251         }
1252
1253         val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1254               BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1255               BCE_EMAC_MDIO_COMM_START_BUSY;
1256         REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1257
1258         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1259                 DELAY(10);
1260
1261                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1262                 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1263                         DELAY(5);
1264
1265                         val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1266                         val &= BCE_EMAC_MDIO_COMM_DATA;
1267                         break;
1268                 }
1269         }
1270
1271         if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1272                 if_printf(&sc->arpcom.ac_if,
1273                           "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1274                           phy, reg);
1275                 val = 0x0;
1276         } else {
1277                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1278         }
1279
1280         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1281                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1282                 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1283
1284                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1285                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1286
1287                 DELAY(40);
1288         }
1289         return (val & 0xffff);
1290 }
1291
1292
1293 /****************************************************************************/
1294 /* PHY register write.                                                      */
1295 /*                                                                          */
1296 /* Implements register writes on the MII bus.                               */
1297 /*                                                                          */
1298 /* Returns:                                                                 */
1299 /*   The value of the register.                                             */
1300 /****************************************************************************/
1301 static int
1302 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1303 {
1304         struct bce_softc *sc = device_get_softc(dev);
1305         uint32_t val1;
1306         int i;
1307
1308         /* Make sure we are accessing the correct PHY address. */
1309         KASSERT(phy == sc->bce_phy_addr,
1310             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1311
1312         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1313                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1314                 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1315
1316                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1317                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1318
1319                 DELAY(40);
1320         }
1321
1322         val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1323                 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1324                 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1325         REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1326
1327         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1328                 DELAY(10);
1329
1330                 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1331                 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1332                         DELAY(5);
1333                         break;
1334                 }
1335         }
1336
1337         if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1338                 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1339
1340         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1341                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1342                 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1343
1344                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1345                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1346
1347                 DELAY(40);
1348         }
1349         return 0;
1350 }
1351
1352
1353 /****************************************************************************/
1354 /* MII bus status change.                                                   */
1355 /*                                                                          */
1356 /* Called by the MII bus driver when the PHY establishes link to set the    */
1357 /* MAC interface registers.                                                 */
1358 /*                                                                          */
1359 /* Returns:                                                                 */
1360 /*   Nothing.                                                               */
1361 /****************************************************************************/
1362 static void
1363 bce_miibus_statchg(device_t dev)
1364 {
1365         struct bce_softc *sc = device_get_softc(dev);
1366         struct mii_data *mii = device_get_softc(sc->bce_miibus);
1367
1368         BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1369
1370         /*
1371          * Set MII or GMII interface based on the speed negotiated
1372          * by the PHY.
1373          */
1374         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 
1375             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1376                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1377         } else {
1378                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1379         }
1380
1381         /*
1382          * Set half or full duplex based on the duplicity negotiated
1383          * by the PHY.
1384          */
1385         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1386                 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1387         } else {
1388                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1389         }
1390 }
1391
1392
1393 /****************************************************************************/
1394 /* Acquire NVRAM lock.                                                      */
1395 /*                                                                          */
1396 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1397 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1398 /* for use by the driver.                                                   */
1399 /*                                                                          */
1400 /* Returns:                                                                 */
1401 /*   0 on success, positive value on failure.                               */
1402 /****************************************************************************/
1403 static int
1404 bce_acquire_nvram_lock(struct bce_softc *sc)
1405 {
1406         uint32_t val;
1407         int j;
1408
1409         /* Request access to the flash interface. */
1410         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1411         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1412                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1413                 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1414                         break;
1415
1416                 DELAY(5);
1417         }
1418
1419         if (j >= NVRAM_TIMEOUT_COUNT) {
1420                 return EBUSY;
1421         }
1422         return 0;
1423 }
1424
1425
1426 /****************************************************************************/
1427 /* Release NVRAM lock.                                                      */
1428 /*                                                                          */
1429 /* When the caller is finished accessing NVRAM the lock must be released.   */
1430 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1431 /* for use by the driver.                                                   */
1432 /*                                                                          */
1433 /* Returns:                                                                 */
1434 /*   0 on success, positive value on failure.                               */
1435 /****************************************************************************/
1436 static int
1437 bce_release_nvram_lock(struct bce_softc *sc)
1438 {
1439         int j;
1440         uint32_t val;
1441
1442         /*
1443          * Relinquish nvram interface.
1444          */
1445         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1446
1447         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1448                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1449                 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1450                         break;
1451
1452                 DELAY(5);
1453         }
1454
1455         if (j >= NVRAM_TIMEOUT_COUNT) {
1456                 return EBUSY;
1457         }
1458         return 0;
1459 }
1460
1461
1462 /****************************************************************************/
1463 /* Enable NVRAM access.                                                     */
1464 /*                                                                          */
1465 /* Before accessing NVRAM for read or write operations the caller must      */
1466 /* enabled NVRAM access.                                                    */
1467 /*                                                                          */
1468 /* Returns:                                                                 */
1469 /*   Nothing.                                                               */
1470 /****************************************************************************/
1471 static void
1472 bce_enable_nvram_access(struct bce_softc *sc)
1473 {
1474         uint32_t val;
1475
1476         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1477         /* Enable both bits, even on read. */
1478         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1479                val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1480 }
1481
1482
1483 /****************************************************************************/
1484 /* Disable NVRAM access.                                                    */
1485 /*                                                                          */
1486 /* When the caller is finished accessing NVRAM access must be disabled.     */
1487 /*                                                                          */
1488 /* Returns:                                                                 */
1489 /*   Nothing.                                                               */
1490 /****************************************************************************/
1491 static void
1492 bce_disable_nvram_access(struct bce_softc *sc)
1493 {
1494         uint32_t val;
1495
1496         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1497
1498         /* Disable both bits, even after read. */
1499         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1500                val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1501 }
1502
1503
1504 /****************************************************************************/
1505 /* Read a dword (32 bits) from NVRAM.                                       */
1506 /*                                                                          */
1507 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1508 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1509 /*                                                                          */
1510 /* Returns:                                                                 */
1511 /*   0 on success and the 32 bit value read, positive value on failure.     */
1512 /****************************************************************************/
1513 static int
1514 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1515                      uint32_t cmd_flags)
1516 {
1517         uint32_t cmd;
1518         int i, rc = 0;
1519
1520         /* Build the command word. */
1521         cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1522
1523         /* Calculate the offset for buffered flash. */
1524         if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1525                 offset = ((offset / sc->bce_flash_info->page_size) <<
1526                           sc->bce_flash_info->page_bits) +
1527                          (offset % sc->bce_flash_info->page_size);
1528         }
1529
1530         /*
1531          * Clear the DONE bit separately, set the address to read,
1532          * and issue the read.
1533          */
1534         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1535         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1536         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1537
1538         /* Wait for completion. */
1539         for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1540                 uint32_t val;
1541
1542                 DELAY(5);
1543
1544                 val = REG_RD(sc, BCE_NVM_COMMAND);
1545                 if (val & BCE_NVM_COMMAND_DONE) {
1546                         val = REG_RD(sc, BCE_NVM_READ);
1547
1548                         val = be32toh(val);
1549                         memcpy(ret_val, &val, 4);
1550                         break;
1551                 }
1552         }
1553
1554         /* Check for errors. */
1555         if (i >= NVRAM_TIMEOUT_COUNT) {
1556                 if_printf(&sc->arpcom.ac_if,
1557                           "Timeout error reading NVRAM at offset 0x%08X!\n",
1558                           offset);
1559                 rc = EBUSY;
1560         }
1561         return rc;
1562 }
1563
1564
1565 /****************************************************************************/
1566 /* Initialize NVRAM access.                                                 */
1567 /*                                                                          */
1568 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1569 /* access that device.                                                      */
1570 /*                                                                          */
1571 /* Returns:                                                                 */
1572 /*   0 on success, positive value on failure.                               */
1573 /****************************************************************************/
1574 static int
1575 bce_init_nvram(struct bce_softc *sc)
1576 {
1577         uint32_t val;
1578         int j, entry_count, rc = 0;
1579         const struct flash_spec *flash;
1580
1581         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1582             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1583                 sc->bce_flash_info = &flash_5709;
1584                 goto bce_init_nvram_get_flash_size;
1585         }
1586
1587         /* Determine the selected interface. */
1588         val = REG_RD(sc, BCE_NVM_CFG1);
1589
1590         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1591
1592         /*
1593          * Flash reconfiguration is required to support additional
1594          * NVRAM devices not directly supported in hardware.
1595          * Check if the flash interface was reconfigured
1596          * by the bootcode.
1597          */
1598
1599         if (val & 0x40000000) {
1600                 /* Flash interface reconfigured by bootcode. */
1601                 for (j = 0, flash = flash_table; j < entry_count;
1602                      j++, flash++) {
1603                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
1604                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1605                                 sc->bce_flash_info = flash;
1606                                 break;
1607                         }
1608                 }
1609         } else {
1610                 /* Flash interface not yet reconfigured. */
1611                 uint32_t mask;
1612
1613                 if (val & (1 << 23))
1614                         mask = FLASH_BACKUP_STRAP_MASK;
1615                 else
1616                         mask = FLASH_STRAP_MASK;
1617
1618                 /* Look for the matching NVRAM device configuration data. */
1619                 for (j = 0, flash = flash_table; j < entry_count;
1620                      j++, flash++) {
1621                         /* Check if the device matches any of the known devices. */
1622                         if ((val & mask) == (flash->strapping & mask)) {
1623                                 /* Found a device match. */
1624                                 sc->bce_flash_info = flash;
1625
1626                                 /* Request access to the flash interface. */
1627                                 rc = bce_acquire_nvram_lock(sc);
1628                                 if (rc != 0)
1629                                         return rc;
1630
1631                                 /* Reconfigure the flash interface. */
1632                                 bce_enable_nvram_access(sc);
1633                                 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1634                                 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1635                                 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1636                                 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1637                                 bce_disable_nvram_access(sc);
1638                                 bce_release_nvram_lock(sc);
1639                                 break;
1640                         }
1641                 }
1642         }
1643
1644         /* Check if a matching device was found. */
1645         if (j == entry_count) {
1646                 sc->bce_flash_info = NULL;
1647                 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1648                 return ENODEV;
1649         }
1650
1651 bce_init_nvram_get_flash_size:
1652         /* Write the flash config data to the shared memory interface. */
1653         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1654             BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1655         if (val)
1656                 sc->bce_flash_size = val;
1657         else
1658                 sc->bce_flash_size = sc->bce_flash_info->total_size;
1659
1660         return rc;
1661 }
1662
1663
1664 /****************************************************************************/
1665 /* Read an arbitrary range of data from NVRAM.                              */
1666 /*                                                                          */
1667 /* Prepares the NVRAM interface for access and reads the requested data     */
1668 /* into the supplied buffer.                                                */
1669 /*                                                                          */
1670 /* Returns:                                                                 */
1671 /*   0 on success and the data read, positive value on failure.             */
1672 /****************************************************************************/
1673 static int
1674 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1675                int buf_size)
1676 {
1677         uint32_t cmd_flags, offset32, len32, extra;
1678         int rc = 0;
1679
1680         if (buf_size == 0)
1681                 return 0;
1682
1683         /* Request access to the flash interface. */
1684         rc = bce_acquire_nvram_lock(sc);
1685         if (rc != 0)
1686                 return rc;
1687
1688         /* Enable access to flash interface */
1689         bce_enable_nvram_access(sc);
1690
1691         len32 = buf_size;
1692         offset32 = offset;
1693         extra = 0;
1694
1695         cmd_flags = 0;
1696
1697         /* XXX should we release nvram lock if read_dword() fails? */
1698         if (offset32 & 3) {
1699                 uint8_t buf[4];
1700                 uint32_t pre_len;
1701
1702                 offset32 &= ~3;
1703                 pre_len = 4 - (offset & 3);
1704
1705                 if (pre_len >= len32) {
1706                         pre_len = len32;
1707                         cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1708                 } else {
1709                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1710                 }
1711
1712                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1713                 if (rc)
1714                         return rc;
1715
1716                 memcpy(ret_buf, buf + (offset & 3), pre_len);
1717
1718                 offset32 += 4;
1719                 ret_buf += pre_len;
1720                 len32 -= pre_len;
1721         }
1722
1723         if (len32 & 3) {
1724                 extra = 4 - (len32 & 3);
1725                 len32 = (len32 + 4) & ~3;
1726         }
1727
1728         if (len32 == 4) {
1729                 uint8_t buf[4];
1730
1731                 if (cmd_flags)
1732                         cmd_flags = BCE_NVM_COMMAND_LAST;
1733                 else
1734                         cmd_flags = BCE_NVM_COMMAND_FIRST |
1735                                     BCE_NVM_COMMAND_LAST;
1736
1737                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1738
1739                 memcpy(ret_buf, buf, 4 - extra);
1740         } else if (len32 > 0) {
1741                 uint8_t buf[4];
1742
1743                 /* Read the first word. */
1744                 if (cmd_flags)
1745                         cmd_flags = 0;
1746                 else
1747                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1748
1749                 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1750
1751                 /* Advance to the next dword. */
1752                 offset32 += 4;
1753                 ret_buf += 4;
1754                 len32 -= 4;
1755
1756                 while (len32 > 4 && rc == 0) {
1757                         rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1758
1759                         /* Advance to the next dword. */
1760                         offset32 += 4;
1761                         ret_buf += 4;
1762                         len32 -= 4;
1763                 }
1764
1765                 if (rc)
1766                         goto bce_nvram_read_locked_exit;
1767
1768                 cmd_flags = BCE_NVM_COMMAND_LAST;
1769                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1770
1771                 memcpy(ret_buf, buf, 4 - extra);
1772         }
1773
1774 bce_nvram_read_locked_exit:
1775         /* Disable access to flash interface and release the lock. */
1776         bce_disable_nvram_access(sc);
1777         bce_release_nvram_lock(sc);
1778
1779         return rc;
1780 }
1781
1782
1783 /****************************************************************************/
1784 /* Verifies that NVRAM is accessible and contains valid data.               */
1785 /*                                                                          */
1786 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1787 /* correct.                                                                 */
1788 /*                                                                          */
1789 /* Returns:                                                                 */
1790 /*   0 on success, positive value on failure.                               */
1791 /****************************************************************************/
1792 static int
1793 bce_nvram_test(struct bce_softc *sc)
1794 {
1795         uint32_t buf[BCE_NVRAM_SIZE / 4];
1796         uint32_t magic, csum;
1797         uint8_t *data = (uint8_t *)buf;
1798         int rc = 0;
1799
1800         /*
1801          * Check that the device NVRAM is valid by reading
1802          * the magic value at offset 0.
1803          */
1804         rc = bce_nvram_read(sc, 0, data, 4);
1805         if (rc != 0)
1806                 return rc;
1807
1808         magic = be32toh(buf[0]);
1809         if (magic != BCE_NVRAM_MAGIC) {
1810                 if_printf(&sc->arpcom.ac_if,
1811                           "Invalid NVRAM magic value! Expected: 0x%08X, "
1812                           "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1813                 return ENODEV;
1814         }
1815
1816         /*
1817          * Verify that the device NVRAM includes valid
1818          * configuration data.
1819          */
1820         rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1821         if (rc != 0)
1822                 return rc;
1823
1824         csum = ether_crc32_le(data, 0x100);
1825         if (csum != BCE_CRC32_RESIDUAL) {
1826                 if_printf(&sc->arpcom.ac_if,
1827                           "Invalid Manufacturing Information NVRAM CRC! "
1828                           "Expected: 0x%08X, Found: 0x%08X\n",
1829                           BCE_CRC32_RESIDUAL, csum);
1830                 return ENODEV;
1831         }
1832
1833         csum = ether_crc32_le(data + 0x100, 0x100);
1834         if (csum != BCE_CRC32_RESIDUAL) {
1835                 if_printf(&sc->arpcom.ac_if,
1836                           "Invalid Feature Configuration Information "
1837                           "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1838                           BCE_CRC32_RESIDUAL, csum);
1839                 rc = ENODEV;
1840         }
1841         return rc;
1842 }
1843
1844
1845 /****************************************************************************/
1846 /* Identifies the current media type of the controller and sets the PHY     */
1847 /* address.                                                                 */
1848 /*                                                                          */
1849 /* Returns:                                                                 */
1850 /*   Nothing.                                                               */
1851 /****************************************************************************/
1852 static void
1853 bce_get_media(struct bce_softc *sc)
1854 {
1855         uint32_t val;
1856
1857         sc->bce_phy_addr = 1;
1858
1859         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1860             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1861                 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1862                 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1863                 uint32_t strap;
1864
1865                 /*
1866                  * The BCM5709S is software configurable
1867                  * for Copper or SerDes operation.
1868                  */
1869                 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1870                         return;
1871                 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1872                         sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1873                         return;
1874                 }
1875
1876                 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1877                         strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1878                 } else {
1879                         strap =
1880                         (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1881                 }
1882
1883                 if (pci_get_function(sc->bce_dev) == 0) {
1884                         switch (strap) {
1885                         case 0x4:
1886                         case 0x5:
1887                         case 0x6:
1888                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1889                                 break;
1890                         }
1891                 } else {
1892                         switch (strap) {
1893                         case 0x1:
1894                         case 0x2:
1895                         case 0x4:
1896                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1897                                 break;
1898                         }
1899                 }
1900         } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1901                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1902         }
1903
1904         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1905                 sc->bce_flags |= BCE_NO_WOL_FLAG;
1906                 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1907                         sc->bce_phy_addr = 2;
1908                         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1909                         if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1910                                 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1911                 }
1912         } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1913             (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1914                 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1915         }
1916 }
1917
1918
1919 static void
1920 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1921 {
1922         int i;
1923
1924         /* Destroy the TX buffer descriptor DMA stuffs. */
1925         if (txr->tx_bd_chain_tag != NULL) {
1926                 for (i = 0; i < txr->tx_pages; i++) {
1927                         if (txr->tx_bd_chain[i] != NULL) {
1928                                 bus_dmamap_unload(txr->tx_bd_chain_tag,
1929                                     txr->tx_bd_chain_map[i]);
1930                                 bus_dmamem_free(txr->tx_bd_chain_tag,
1931                                     txr->tx_bd_chain[i],
1932                                     txr->tx_bd_chain_map[i]);
1933                         }
1934                 }
1935                 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1936         }
1937
1938         /* Destroy the TX mbuf DMA stuffs. */
1939         if (txr->tx_mbuf_tag != NULL) {
1940                 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1941                         /* Must have been unloaded in bce_stop() */
1942                         KKASSERT(txr->tx_mbuf_ptr[i] == NULL);
1943                         bus_dmamap_destroy(txr->tx_mbuf_tag,
1944                             txr->tx_mbuf_map[i]);
1945                 }
1946                 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1947         }
1948
1949         if (txr->tx_bd_chain_map != NULL)
1950                 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1951         if (txr->tx_bd_chain != NULL)
1952                 kfree(txr->tx_bd_chain, M_DEVBUF);
1953         if (txr->tx_bd_chain_paddr != NULL)
1954                 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1955
1956         if (txr->tx_mbuf_map != NULL)
1957                 kfree(txr->tx_mbuf_map, M_DEVBUF);
1958         if (txr->tx_mbuf_ptr != NULL)
1959                 kfree(txr->tx_mbuf_ptr, M_DEVBUF);
1960 }
1961
1962
1963 static void
1964 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1965 {
1966         int i;
1967
1968         /* Destroy the RX buffer descriptor DMA stuffs. */
1969         if (rxr->rx_bd_chain_tag != NULL) {
1970                 for (i = 0; i < rxr->rx_pages; i++) {
1971                         if (rxr->rx_bd_chain[i] != NULL) {
1972                                 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1973                                     rxr->rx_bd_chain_map[i]);
1974                                 bus_dmamem_free(rxr->rx_bd_chain_tag,
1975                                     rxr->rx_bd_chain[i],
1976                                     rxr->rx_bd_chain_map[i]);
1977                         }
1978                 }
1979                 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
1980         }
1981
1982         /* Destroy the RX mbuf DMA stuffs. */
1983         if (rxr->rx_mbuf_tag != NULL) {
1984                 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
1985                         /* Must have been unloaded in bce_stop() */
1986                         KKASSERT(rxr->rx_mbuf_ptr[i] == NULL);
1987                         bus_dmamap_destroy(rxr->rx_mbuf_tag,
1988                             rxr->rx_mbuf_map[i]);
1989                 }
1990                 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
1991                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
1992         }
1993
1994         if (rxr->rx_bd_chain_map != NULL)
1995                 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
1996         if (rxr->rx_bd_chain != NULL)
1997                 kfree(rxr->rx_bd_chain, M_DEVBUF);
1998         if (rxr->rx_bd_chain_paddr != NULL)
1999                 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2000
2001         if (rxr->rx_mbuf_map != NULL)
2002                 kfree(rxr->rx_mbuf_map, M_DEVBUF);
2003         if (rxr->rx_mbuf_ptr != NULL)
2004                 kfree(rxr->rx_mbuf_ptr, M_DEVBUF);
2005         if (rxr->rx_mbuf_paddr != NULL)
2006                 kfree(rxr->rx_mbuf_paddr, M_DEVBUF);
2007 }
2008
2009
2010 /****************************************************************************/
2011 /* Free any DMA memory owned by the driver.                                 */
2012 /*                                                                          */
2013 /* Scans through each data structre that requires DMA memory and frees      */
2014 /* the memory if allocated.                                                 */
2015 /*                                                                          */
2016 /* Returns:                                                                 */
2017 /*   Nothing.                                                               */
2018 /****************************************************************************/
2019 static void
2020 bce_dma_free(struct bce_softc *sc)
2021 {
2022         int i;
2023
2024         /* Destroy the status block. */
2025         if (sc->status_tag != NULL) {
2026                 if (sc->status_block != NULL) {
2027                         bus_dmamap_unload(sc->status_tag, sc->status_map);
2028                         bus_dmamem_free(sc->status_tag, sc->status_block,
2029                                         sc->status_map);
2030                 }
2031                 bus_dma_tag_destroy(sc->status_tag);
2032         }
2033
2034         /* Destroy the statistics block. */
2035         if (sc->stats_tag != NULL) {
2036                 if (sc->stats_block != NULL) {
2037                         bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2038                         bus_dmamem_free(sc->stats_tag, sc->stats_block,
2039                                         sc->stats_map);
2040                 }
2041                 bus_dma_tag_destroy(sc->stats_tag);
2042         }
2043
2044         /* Destroy the CTX DMA stuffs. */
2045         if (sc->ctx_tag != NULL) {
2046                 for (i = 0; i < sc->ctx_pages; i++) {
2047                         if (sc->ctx_block[i] != NULL) {
2048                                 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2049                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2050                                                 sc->ctx_map[i]);
2051                         }
2052                 }
2053                 bus_dma_tag_destroy(sc->ctx_tag);
2054         }
2055
2056         /* Free TX rings */
2057         if (sc->tx_rings != NULL) {
2058                 for (i = 0; i < sc->ring_cnt; ++i)
2059                         bce_destroy_tx_ring(&sc->tx_rings[i]);
2060                 kfree(sc->tx_rings, M_DEVBUF);
2061         }
2062
2063         /* Free RX rings */
2064         if (sc->rx_rings != NULL) {
2065                 for (i = 0; i < sc->ring_cnt; ++i)
2066                         bce_destroy_rx_ring(&sc->rx_rings[i]);
2067                 kfree(sc->rx_rings, M_DEVBUF);
2068         }
2069
2070         /* Destroy the parent tag */
2071         if (sc->parent_tag != NULL)
2072                 bus_dma_tag_destroy(sc->parent_tag);
2073 }
2074
2075
2076 /****************************************************************************/
2077 /* Get DMA memory from the OS.                                              */
2078 /*                                                                          */
2079 /* Validates that the OS has provided DMA buffers in response to a          */
2080 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2081 /* When the callback is used the OS will return 0 for the mapping function  */
2082 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2083 /* failures back to the caller.                                             */
2084 /*                                                                          */
2085 /* Returns:                                                                 */
2086 /*   Nothing.                                                               */
2087 /****************************************************************************/
2088 static void
2089 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2090 {
2091         bus_addr_t *busaddr = arg;
2092
2093         /* Check for an error and signal the caller that an error occurred. */
2094         if (error)
2095                 return;
2096
2097         KASSERT(nseg == 1, ("only one segment is allowed"));
2098         *busaddr = segs->ds_addr;
2099 }
2100
2101
2102 static int
2103 bce_create_tx_ring(struct bce_tx_ring *txr)
2104 {
2105         int pages, rc, i;
2106
2107         lwkt_serialize_init(&txr->tx_serialize);
2108         txr->tx_wreg = bce_tx_wreg;
2109
2110         pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2111         if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2112                 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2113                 pages = TX_PAGES_DEFAULT;
2114         }
2115         txr->tx_pages = pages;
2116
2117         txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2118             M_DEVBUF, M_WAITOK | M_ZERO);
2119         txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2120             M_DEVBUF, M_WAITOK | M_ZERO);
2121         txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2122             M_DEVBUF, M_WAITOK | M_ZERO);
2123
2124         txr->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(txr),
2125             M_DEVBUF, M_WAITOK | M_ZERO);
2126         txr->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(txr),
2127             M_DEVBUF, M_WAITOK | M_ZERO);
2128
2129         /*
2130          * Create a DMA tag for the TX buffer descriptor chain,
2131          * allocate and clear the  memory, and fetch the
2132          * physical address of the block.
2133          */
2134         rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2135             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2136             BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2137             0, &txr->tx_bd_chain_tag);
2138         if (rc != 0) {
2139                 device_printf(txr->sc->bce_dev, "Could not allocate "
2140                     "TX descriptor chain DMA tag!\n");
2141                 return rc;
2142         }
2143
2144         for (i = 0; i < txr->tx_pages; i++) {
2145                 bus_addr_t busaddr;
2146
2147                 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2148                     (void **)&txr->tx_bd_chain[i],
2149                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2150                     &txr->tx_bd_chain_map[i]);
2151                 if (rc != 0) {
2152                         device_printf(txr->sc->bce_dev,
2153                             "Could not allocate %dth TX descriptor "
2154                             "chain DMA memory!\n", i);
2155                         return rc;
2156                 }
2157
2158                 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2159                     txr->tx_bd_chain_map[i],
2160                     txr->tx_bd_chain[i],
2161                     BCE_TX_CHAIN_PAGE_SZ,
2162                     bce_dma_map_addr, &busaddr,
2163                     BUS_DMA_WAITOK);
2164                 if (rc != 0) {
2165                         if (rc == EINPROGRESS) {
2166                                 panic("%s coherent memory loading "
2167                                     "is still in progress!",
2168                                     txr->sc->arpcom.ac_if.if_xname);
2169                         }
2170                         device_printf(txr->sc->bce_dev, "Could not map %dth "
2171                             "TX descriptor chain DMA memory!\n", i);
2172                         bus_dmamem_free(txr->tx_bd_chain_tag,
2173                             txr->tx_bd_chain[i],
2174                             txr->tx_bd_chain_map[i]);
2175                         txr->tx_bd_chain[i] = NULL;
2176                         return rc;
2177                 }
2178
2179                 txr->tx_bd_chain_paddr[i] = busaddr;
2180         }
2181
2182         /* Create a DMA tag for TX mbufs. */
2183         rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2184             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2185             IP_MAXPACKET + sizeof(struct ether_vlan_header),
2186             BCE_MAX_SEGMENTS, PAGE_SIZE,
2187             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2188             &txr->tx_mbuf_tag);
2189         if (rc != 0) {
2190                 device_printf(txr->sc->bce_dev,
2191                     "Could not allocate TX mbuf DMA tag!\n");
2192                 return rc;
2193         }
2194
2195         /* Create DMA maps for the TX mbufs clusters. */
2196         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2197                 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2198                     BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2199                     &txr->tx_mbuf_map[i]);
2200                 if (rc != 0) {
2201                         int j;
2202
2203                         for (j = 0; j < i; ++j) {
2204                                 bus_dmamap_destroy(txr->tx_mbuf_tag,
2205                                     txr->tx_mbuf_map[i]);
2206                         }
2207                         bus_dma_tag_destroy(txr->tx_mbuf_tag);
2208                         txr->tx_mbuf_tag = NULL;
2209
2210                         device_printf(txr->sc->bce_dev, "Unable to create "
2211                             "%dth TX mbuf DMA map!\n", i);
2212                         return rc;
2213                 }
2214         }
2215         return 0;
2216 }
2217
2218
2219 static int
2220 bce_create_rx_ring(struct bce_rx_ring *rxr)
2221 {
2222         int pages, rc, i;
2223
2224         lwkt_serialize_init(&rxr->rx_serialize);
2225
2226         pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2227         if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2228                 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2229                 pages = RX_PAGES_DEFAULT;
2230         }
2231         rxr->rx_pages = pages;
2232
2233         rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2234             M_DEVBUF, M_WAITOK | M_ZERO);
2235         rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2236             M_DEVBUF, M_WAITOK | M_ZERO);
2237         rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2238             M_DEVBUF, M_WAITOK | M_ZERO);
2239
2240         rxr->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(rxr),
2241             M_DEVBUF, M_WAITOK | M_ZERO);
2242         rxr->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(rxr),
2243             M_DEVBUF, M_WAITOK | M_ZERO);
2244         rxr->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(rxr),
2245             M_DEVBUF, M_WAITOK | M_ZERO);
2246
2247         /*
2248          * Create a DMA tag for the RX buffer descriptor chain,
2249          * allocate and clear the  memory, and fetch the physical
2250          * address of the blocks.
2251          */
2252         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2253             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2254             BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2255             0, &rxr->rx_bd_chain_tag);
2256         if (rc != 0) {
2257                 device_printf(rxr->sc->bce_dev, "Could not allocate "
2258                     "RX descriptor chain DMA tag!\n");
2259                 return rc;
2260         }
2261
2262         for (i = 0; i < rxr->rx_pages; i++) {
2263                 bus_addr_t busaddr;
2264
2265                 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2266                     (void **)&rxr->rx_bd_chain[i],
2267                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2268                     &rxr->rx_bd_chain_map[i]);
2269                 if (rc != 0) {
2270                         device_printf(rxr->sc->bce_dev,
2271                             "Could not allocate %dth RX descriptor "
2272                             "chain DMA memory!\n", i);
2273                         return rc;
2274                 }
2275
2276                 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2277                     rxr->rx_bd_chain_map[i],
2278                     rxr->rx_bd_chain[i],
2279                     BCE_RX_CHAIN_PAGE_SZ,
2280                     bce_dma_map_addr, &busaddr,
2281                     BUS_DMA_WAITOK);
2282                 if (rc != 0) {
2283                         if (rc == EINPROGRESS) {
2284                                 panic("%s coherent memory loading "
2285                                     "is still in progress!",
2286                                     rxr->sc->arpcom.ac_if.if_xname);
2287                         }
2288                         device_printf(rxr->sc->bce_dev,
2289                             "Could not map %dth RX descriptor "
2290                             "chain DMA memory!\n", i);
2291                         bus_dmamem_free(rxr->rx_bd_chain_tag,
2292                             rxr->rx_bd_chain[i],
2293                             rxr->rx_bd_chain_map[i]);
2294                         rxr->rx_bd_chain[i] = NULL;
2295                         return rc;
2296                 }
2297
2298                 rxr->rx_bd_chain_paddr[i] = busaddr;
2299         }
2300
2301         /* Create a DMA tag for RX mbufs. */
2302         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2303             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2304             MCLBYTES, 1, MCLBYTES,
2305             BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2306             &rxr->rx_mbuf_tag);
2307         if (rc != 0) {
2308                 device_printf(rxr->sc->bce_dev,
2309                     "Could not allocate RX mbuf DMA tag!\n");
2310                 return rc;
2311         }
2312
2313         /* Create tmp DMA map for RX mbuf clusters. */
2314         rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2315             &rxr->rx_mbuf_tmpmap);
2316         if (rc != 0) {
2317                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2318                 rxr->rx_mbuf_tag = NULL;
2319
2320                 device_printf(rxr->sc->bce_dev,
2321                     "Could not create RX mbuf tmp DMA map!\n");
2322                 return rc;
2323         }
2324
2325         /* Create DMA maps for the RX mbuf clusters. */
2326         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2327                 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2328                     &rxr->rx_mbuf_map[i]);
2329                 if (rc != 0) {
2330                         int j;
2331
2332                         for (j = 0; j < i; ++j) {
2333                                 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2334                                     rxr->rx_mbuf_map[j]);
2335                         }
2336                         bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2337                         rxr->rx_mbuf_tag = NULL;
2338
2339                         device_printf(rxr->sc->bce_dev, "Unable to create "
2340                             "%dth RX mbuf DMA map!\n", i);
2341                         return rc;
2342                 }
2343         }
2344         return 0;
2345 }
2346
2347
2348 /****************************************************************************/
2349 /* Allocate any DMA memory needed by the driver.                            */
2350 /*                                                                          */
2351 /* Allocates DMA memory needed for the various global structures needed by  */
2352 /* hardware.                                                                */
2353 /*                                                                          */
2354 /* Memory alignment requirements:                                           */
2355 /* -----------------+----------+----------+----------+----------+           */
2356 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2357 /* -----------------+----------+----------+----------+----------+           */
2358 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2359 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2360 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2361 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2362 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2363 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2364 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2365 /* -----------------+----------+----------+----------+----------+           */
2366 /*                                                                          */
2367 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2368 /*                                                                          */
2369 /* Returns:                                                                 */
2370 /*   0 for success, positive value for failure.                             */
2371 /****************************************************************************/
2372 static int
2373 bce_dma_alloc(struct bce_softc *sc)
2374 {
2375         struct ifnet *ifp = &sc->arpcom.ac_if;
2376         int i, rc = 0;
2377         bus_addr_t busaddr, max_busaddr;
2378         bus_size_t status_align, stats_align;
2379
2380         /*
2381          * The embedded PCIe to PCI-X bridge (EPB) 
2382          * in the 5708 cannot address memory above 
2383          * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 
2384          */
2385         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2386                 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2387         else
2388                 max_busaddr = BUS_SPACE_MAXADDR;
2389
2390         /*
2391          * BCM5709 and BCM5716 uses host memory as cache for context memory.
2392          */
2393         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2394             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2395                 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2396                 if (sc->ctx_pages == 0)
2397                         sc->ctx_pages = 1;
2398                 if (sc->ctx_pages > BCE_CTX_PAGES) {
2399                         device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2400                             sc->ctx_pages);
2401                         return ENOMEM;
2402                 }
2403                 status_align = 16;
2404                 stats_align = 16;
2405         } else {
2406                 status_align = 8;
2407                 stats_align = 8;
2408         }
2409
2410         /*
2411          * Allocate the parent bus DMA tag appropriate for PCI.
2412          */
2413         rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2414                                 max_busaddr, BUS_SPACE_MAXADDR,
2415                                 NULL, NULL,
2416                                 BUS_SPACE_MAXSIZE_32BIT, 0,
2417                                 BUS_SPACE_MAXSIZE_32BIT,
2418                                 0, &sc->parent_tag);
2419         if (rc != 0) {
2420                 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2421                 return rc;
2422         }
2423
2424         /*
2425          * Allocate status block.
2426          */
2427         sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2428                                 status_align, BCE_STATUS_BLK_SZ,
2429                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2430                                 &sc->status_tag, &sc->status_map,
2431                                 &sc->status_block_paddr);
2432         if (sc->status_block == NULL) {
2433                 if_printf(ifp, "Could not allocate status block!\n");
2434                 return ENOMEM;
2435         }
2436
2437         /*
2438          * Allocate statistics block.
2439          */
2440         sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2441                                 stats_align, BCE_STATS_BLK_SZ,
2442                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2443                                 &sc->stats_tag, &sc->stats_map,
2444                                 &sc->stats_block_paddr);
2445         if (sc->stats_block == NULL) {
2446                 if_printf(ifp, "Could not allocate statistics block!\n");
2447                 return ENOMEM;
2448         }
2449
2450         /*
2451          * Allocate context block, if needed
2452          */
2453         if (sc->ctx_pages != 0) {
2454                 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2455                                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2456                                         NULL, NULL,
2457                                         BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2458                                         0, &sc->ctx_tag);
2459                 if (rc != 0) {
2460                         if_printf(ifp, "Could not allocate "
2461                                   "context block DMA tag!\n");
2462                         return rc;
2463                 }
2464
2465                 for (i = 0; i < sc->ctx_pages; i++) {
2466                         rc = bus_dmamem_alloc(sc->ctx_tag,
2467                                               (void **)&sc->ctx_block[i],
2468                                               BUS_DMA_WAITOK | BUS_DMA_ZERO |
2469                                               BUS_DMA_COHERENT,
2470                                               &sc->ctx_map[i]);
2471                         if (rc != 0) {
2472                                 if_printf(ifp, "Could not allocate %dth context "
2473                                           "DMA memory!\n", i);
2474                                 return rc;
2475                         }
2476
2477                         rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2478                                              sc->ctx_block[i], BCM_PAGE_SIZE,
2479                                              bce_dma_map_addr, &busaddr,
2480                                              BUS_DMA_WAITOK);
2481                         if (rc != 0) {
2482                                 if (rc == EINPROGRESS) {
2483                                         panic("%s coherent memory loading "
2484                                               "is still in progress!", ifp->if_xname);
2485                                 }
2486                                 if_printf(ifp, "Could not map %dth context "
2487                                           "DMA memory!\n", i);
2488                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2489                                                 sc->ctx_map[i]);
2490                                 sc->ctx_block[i] = NULL;
2491                                 return rc;
2492                         }
2493                         sc->ctx_paddr[i] = busaddr;
2494                 }
2495         }
2496
2497         sc->tx_rings = kmalloc_cachealign(
2498             sizeof(struct bce_tx_ring) * sc->ring_cnt, M_DEVBUF,
2499             M_WAITOK | M_ZERO);
2500         for (i = 0; i < sc->ring_cnt; ++i) {
2501                 sc->tx_rings[i].sc = sc;
2502
2503                 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2504                 if (rc != 0) {
2505                         device_printf(sc->bce_dev,
2506                             "can't create %dth tx ring\n", i);
2507                         return rc;
2508                 }
2509         }
2510
2511         sc->rx_rings = kmalloc_cachealign(
2512             sizeof(struct bce_rx_ring) * sc->ring_cnt, M_DEVBUF,
2513             M_WAITOK | M_ZERO);
2514         for (i = 0; i < sc->ring_cnt; ++i) {
2515                 sc->rx_rings[i].sc = sc;
2516
2517                 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2518                 if (rc != 0) {
2519                         device_printf(sc->bce_dev,
2520                             "can't create %dth rx ring\n", i);
2521                         return rc;
2522                 }
2523         }
2524
2525         return 0;
2526 }
2527
2528
2529 /****************************************************************************/
2530 /* Firmware synchronization.                                                */
2531 /*                                                                          */
2532 /* Before performing certain events such as a chip reset, synchronize with  */
2533 /* the firmware first.                                                      */
2534 /*                                                                          */
2535 /* Returns:                                                                 */
2536 /*   0 for success, positive value for failure.                             */
2537 /****************************************************************************/
2538 static int
2539 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2540 {
2541         int i, rc = 0;
2542         uint32_t val;
2543
2544         /* Don't waste any time if we've timed out before. */
2545         if (sc->bce_fw_timed_out)
2546                 return EBUSY;
2547
2548         /* Increment the message sequence number. */
2549         sc->bce_fw_wr_seq++;
2550         msg_data |= sc->bce_fw_wr_seq;
2551
2552         /* Send the message to the bootcode driver mailbox. */
2553         bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2554
2555         /* Wait for the bootcode to acknowledge the message. */
2556         for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2557                 /* Check for a response in the bootcode firmware mailbox. */
2558                 val = bce_shmem_rd(sc, BCE_FW_MB);
2559                 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2560                         break;
2561                 DELAY(1000);
2562         }
2563
2564         /* If we've timed out, tell the bootcode that we've stopped waiting. */
2565         if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2566             (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2567                 if_printf(&sc->arpcom.ac_if,
2568                           "Firmware synchronization timeout! "
2569                           "msg_data = 0x%08X\n", msg_data);
2570
2571                 msg_data &= ~BCE_DRV_MSG_CODE;
2572                 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2573
2574                 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2575
2576                 sc->bce_fw_timed_out = 1;
2577                 rc = EBUSY;
2578         }
2579         return rc;
2580 }
2581
2582
2583 /****************************************************************************/
2584 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2585 /*                                                                          */
2586 /* Returns:                                                                 */
2587 /*   Nothing.                                                               */
2588 /****************************************************************************/
2589 static void
2590 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2591                  uint32_t rv2p_code_len, uint32_t rv2p_proc)
2592 {
2593         int i;
2594         uint32_t val;
2595
2596         for (i = 0; i < rv2p_code_len; i += 8) {
2597                 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2598                 rv2p_code++;
2599                 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2600                 rv2p_code++;
2601
2602                 if (rv2p_proc == RV2P_PROC1) {
2603                         val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2604                         REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2605                 } else {
2606                         val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2607                         REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2608                 }
2609         }
2610
2611         /* Reset the processor, un-stall is done later. */
2612         if (rv2p_proc == RV2P_PROC1)
2613                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2614         else
2615                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2616 }
2617
2618
2619 /****************************************************************************/
2620 /* Load RISC processor firmware.                                            */
2621 /*                                                                          */
2622 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2623 /* associated with a particular processor.                                  */
2624 /*                                                                          */
2625 /* Returns:                                                                 */
2626 /*   Nothing.                                                               */
2627 /****************************************************************************/
2628 static void
2629 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2630                 struct fw_info *fw)
2631 {
2632         uint32_t offset;
2633         int j;
2634
2635         bce_halt_cpu(sc, cpu_reg);
2636
2637         /* Load the Text area. */
2638         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2639         if (fw->text) {
2640                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2641                         REG_WR_IND(sc, offset, fw->text[j]);
2642         }
2643
2644         /* Load the Data area. */
2645         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2646         if (fw->data) {
2647                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2648                         REG_WR_IND(sc, offset, fw->data[j]);
2649         }
2650
2651         /* Load the SBSS area. */
2652         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2653         if (fw->sbss) {
2654                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2655                         REG_WR_IND(sc, offset, fw->sbss[j]);
2656         }
2657
2658         /* Load the BSS area. */
2659         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2660         if (fw->bss) {
2661                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2662                         REG_WR_IND(sc, offset, fw->bss[j]);
2663         }
2664
2665         /* Load the Read-Only area. */
2666         offset = cpu_reg->spad_base +
2667                 (fw->rodata_addr - cpu_reg->mips_view_base);
2668         if (fw->rodata) {
2669                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2670                         REG_WR_IND(sc, offset, fw->rodata[j]);
2671         }
2672
2673         /* Clear the pre-fetch instruction and set the FW start address. */
2674         REG_WR_IND(sc, cpu_reg->inst, 0);
2675         REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2676 }
2677
2678
2679 /****************************************************************************/
2680 /* Starts the RISC processor.                                               */
2681 /*                                                                          */
2682 /* Assumes the CPU starting address has already been set.                   */
2683 /*                                                                          */
2684 /* Returns:                                                                 */
2685 /*   Nothing.                                                               */
2686 /****************************************************************************/
2687 static void
2688 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2689 {
2690         uint32_t val;
2691
2692         /* Start the CPU. */
2693         val = REG_RD_IND(sc, cpu_reg->mode);
2694         val &= ~cpu_reg->mode_value_halt;
2695         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2696         REG_WR_IND(sc, cpu_reg->mode, val);
2697 }
2698
2699
2700 /****************************************************************************/
2701 /* Halts the RISC processor.                                                */
2702 /*                                                                          */
2703 /* Returns:                                                                 */
2704 /*   Nothing.                                                               */
2705 /****************************************************************************/
2706 static void
2707 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2708 {
2709         uint32_t val;
2710
2711         /* Halt the CPU. */
2712         val = REG_RD_IND(sc, cpu_reg->mode);
2713         val |= cpu_reg->mode_value_halt;
2714         REG_WR_IND(sc, cpu_reg->mode, val);
2715         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2716 }
2717
2718
2719 /****************************************************************************/
2720 /* Start the RX CPU.                                                        */
2721 /*                                                                          */
2722 /* Returns:                                                                 */
2723 /*   Nothing.                                                               */
2724 /****************************************************************************/
2725 static void
2726 bce_start_rxp_cpu(struct bce_softc *sc)
2727 {
2728         struct cpu_reg cpu_reg;
2729
2730         cpu_reg.mode = BCE_RXP_CPU_MODE;
2731         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2732         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2733         cpu_reg.state = BCE_RXP_CPU_STATE;
2734         cpu_reg.state_value_clear = 0xffffff;
2735         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2736         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2737         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2738         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2739         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2740         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2741         cpu_reg.mips_view_base = 0x8000000;
2742
2743         bce_start_cpu(sc, &cpu_reg);
2744 }
2745
2746
2747 /****************************************************************************/
2748 /* Initialize the RX CPU.                                                   */
2749 /*                                                                          */
2750 /* Returns:                                                                 */
2751 /*   Nothing.                                                               */
2752 /****************************************************************************/
2753 static void
2754 bce_init_rxp_cpu(struct bce_softc *sc)
2755 {
2756         struct cpu_reg cpu_reg;
2757         struct fw_info fw;
2758
2759         cpu_reg.mode = BCE_RXP_CPU_MODE;
2760         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2761         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2762         cpu_reg.state = BCE_RXP_CPU_STATE;
2763         cpu_reg.state_value_clear = 0xffffff;
2764         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2765         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2766         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2767         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2768         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2769         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2770         cpu_reg.mips_view_base = 0x8000000;
2771
2772         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2773             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2774                 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2775                 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2776                 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2777                 fw.start_addr = bce_RXP_b09FwStartAddr;
2778
2779                 fw.text_addr = bce_RXP_b09FwTextAddr;
2780                 fw.text_len = bce_RXP_b09FwTextLen;
2781                 fw.text_index = 0;
2782                 fw.text = bce_RXP_b09FwText;
2783
2784                 fw.data_addr = bce_RXP_b09FwDataAddr;
2785                 fw.data_len = bce_RXP_b09FwDataLen;
2786                 fw.data_index = 0;
2787                 fw.data = bce_RXP_b09FwData;
2788
2789                 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2790                 fw.sbss_len = bce_RXP_b09FwSbssLen;
2791                 fw.sbss_index = 0;
2792                 fw.sbss = bce_RXP_b09FwSbss;
2793
2794                 fw.bss_addr = bce_RXP_b09FwBssAddr;
2795                 fw.bss_len = bce_RXP_b09FwBssLen;
2796                 fw.bss_index = 0;
2797                 fw.bss = bce_RXP_b09FwBss;
2798
2799                 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2800                 fw.rodata_len = bce_RXP_b09FwRodataLen;
2801                 fw.rodata_index = 0;
2802                 fw.rodata = bce_RXP_b09FwRodata;
2803         } else {
2804                 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2805                 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2806                 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2807                 fw.start_addr = bce_RXP_b06FwStartAddr;
2808
2809                 fw.text_addr = bce_RXP_b06FwTextAddr;
2810                 fw.text_len = bce_RXP_b06FwTextLen;
2811                 fw.text_index = 0;
2812                 fw.text = bce_RXP_b06FwText;
2813
2814                 fw.data_addr = bce_RXP_b06FwDataAddr;
2815                 fw.data_len = bce_RXP_b06FwDataLen;
2816                 fw.data_index = 0;
2817                 fw.data = bce_RXP_b06FwData;
2818
2819                 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2820                 fw.sbss_len = bce_RXP_b06FwSbssLen;
2821                 fw.sbss_index = 0;
2822                 fw.sbss = bce_RXP_b06FwSbss;
2823
2824                 fw.bss_addr = bce_RXP_b06FwBssAddr;
2825                 fw.bss_len = bce_RXP_b06FwBssLen;
2826                 fw.bss_index = 0;
2827                 fw.bss = bce_RXP_b06FwBss;
2828
2829                 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2830                 fw.rodata_len = bce_RXP_b06FwRodataLen;
2831                 fw.rodata_index = 0;
2832                 fw.rodata = bce_RXP_b06FwRodata;
2833         }
2834
2835         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2836         /* Delay RXP start until initialization is complete. */
2837 }
2838
2839
2840 /****************************************************************************/
2841 /* Initialize the TX CPU.                                                   */
2842 /*                                                                          */
2843 /* Returns:                                                                 */
2844 /*   Nothing.                                                               */
2845 /****************************************************************************/
2846 static void
2847 bce_init_txp_cpu(struct bce_softc *sc)
2848 {
2849         struct cpu_reg cpu_reg;
2850         struct fw_info fw;
2851
2852         cpu_reg.mode = BCE_TXP_CPU_MODE;
2853         cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2854         cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2855         cpu_reg.state = BCE_TXP_CPU_STATE;
2856         cpu_reg.state_value_clear = 0xffffff;
2857         cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2858         cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2859         cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2860         cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2861         cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2862         cpu_reg.spad_base = BCE_TXP_SCRATCH;
2863         cpu_reg.mips_view_base = 0x8000000;
2864
2865         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2866             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2867                 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2868                 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2869                 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2870                 fw.start_addr = bce_TXP_b09FwStartAddr;
2871
2872                 fw.text_addr = bce_TXP_b09FwTextAddr;
2873                 fw.text_len = bce_TXP_b09FwTextLen;
2874                 fw.text_index = 0;
2875                 fw.text = bce_TXP_b09FwText;
2876
2877                 fw.data_addr = bce_TXP_b09FwDataAddr;
2878                 fw.data_len = bce_TXP_b09FwDataLen;
2879                 fw.data_index = 0;
2880                 fw.data = bce_TXP_b09FwData;
2881
2882                 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2883                 fw.sbss_len = bce_TXP_b09FwSbssLen;
2884                 fw.sbss_index = 0;
2885                 fw.sbss = bce_TXP_b09FwSbss;
2886
2887                 fw.bss_addr = bce_TXP_b09FwBssAddr;
2888                 fw.bss_len = bce_TXP_b09FwBssLen;
2889                 fw.bss_index = 0;
2890                 fw.bss = bce_TXP_b09FwBss;
2891
2892                 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2893                 fw.rodata_len = bce_TXP_b09FwRodataLen;
2894                 fw.rodata_index = 0;
2895                 fw.rodata = bce_TXP_b09FwRodata;
2896         } else {
2897                 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2898                 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2899                 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2900                 fw.start_addr = bce_TXP_b06FwStartAddr;
2901
2902                 fw.text_addr = bce_TXP_b06FwTextAddr;
2903                 fw.text_len = bce_TXP_b06FwTextLen;
2904                 fw.text_index = 0;
2905                 fw.text = bce_TXP_b06FwText;
2906
2907                 fw.data_addr = bce_TXP_b06FwDataAddr;
2908                 fw.data_len = bce_TXP_b06FwDataLen;
2909                 fw.data_index = 0;
2910                 fw.data = bce_TXP_b06FwData;
2911
2912                 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2913                 fw.sbss_len = bce_TXP_b06FwSbssLen;
2914                 fw.sbss_index = 0;
2915                 fw.sbss = bce_TXP_b06FwSbss;
2916
2917                 fw.bss_addr = bce_TXP_b06FwBssAddr;
2918                 fw.bss_len = bce_TXP_b06FwBssLen;
2919                 fw.bss_index = 0;
2920                 fw.bss = bce_TXP_b06FwBss;
2921
2922                 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2923                 fw.rodata_len = bce_TXP_b06FwRodataLen;
2924                 fw.rodata_index = 0;
2925                 fw.rodata = bce_TXP_b06FwRodata;
2926         }
2927
2928         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2929         bce_start_cpu(sc, &cpu_reg);
2930 }
2931
2932
2933 /****************************************************************************/
2934 /* Initialize the TPAT CPU.                                                 */
2935 /*                                                                          */
2936 /* Returns:                                                                 */
2937 /*   Nothing.                                                               */
2938 /****************************************************************************/
2939 static void
2940 bce_init_tpat_cpu(struct bce_softc *sc)
2941 {
2942         struct cpu_reg cpu_reg;
2943         struct fw_info fw;
2944
2945         cpu_reg.mode = BCE_TPAT_CPU_MODE;
2946         cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
2947         cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
2948         cpu_reg.state = BCE_TPAT_CPU_STATE;
2949         cpu_reg.state_value_clear = 0xffffff;
2950         cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
2951         cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
2952         cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
2953         cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
2954         cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
2955         cpu_reg.spad_base = BCE_TPAT_SCRATCH;
2956         cpu_reg.mips_view_base = 0x8000000;
2957
2958         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2959             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2960                 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
2961                 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
2962                 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
2963                 fw.start_addr = bce_TPAT_b09FwStartAddr;
2964
2965                 fw.text_addr = bce_TPAT_b09FwTextAddr;
2966                 fw.text_len = bce_TPAT_b09FwTextLen;
2967                 fw.text_index = 0;
2968                 fw.text = bce_TPAT_b09FwText;
2969
2970                 fw.data_addr = bce_TPAT_b09FwDataAddr;
2971                 fw.data_len = bce_TPAT_b09FwDataLen;
2972                 fw.data_index = 0;
2973                 fw.data = bce_TPAT_b09FwData;
2974
2975                 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
2976                 fw.sbss_len = bce_TPAT_b09FwSbssLen;
2977                 fw.sbss_index = 0;
2978                 fw.sbss = bce_TPAT_b09FwSbss;
2979
2980                 fw.bss_addr = bce_TPAT_b09FwBssAddr;
2981                 fw.bss_len = bce_TPAT_b09FwBssLen;
2982                 fw.bss_index = 0;
2983                 fw.bss = bce_TPAT_b09FwBss;
2984
2985                 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
2986                 fw.rodata_len = bce_TPAT_b09FwRodataLen;
2987                 fw.rodata_index = 0;
2988                 fw.rodata = bce_TPAT_b09FwRodata;
2989         } else {
2990                 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
2991                 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
2992                 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
2993                 fw.start_addr = bce_TPAT_b06FwStartAddr;
2994
2995                 fw.text_addr = bce_TPAT_b06FwTextAddr;
2996                 fw.text_len = bce_TPAT_b06FwTextLen;
2997                 fw.text_index = 0;
2998                 fw.text = bce_TPAT_b06FwText;
2999
3000                 fw.data_addr = bce_TPAT_b06FwDataAddr;
3001                 fw.data_len = bce_TPAT_b06FwDataLen;
3002                 fw.data_index = 0;
3003                 fw.data = bce_TPAT_b06FwData;
3004
3005                 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3006                 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3007                 fw.sbss_index = 0;
3008                 fw.sbss = bce_TPAT_b06FwSbss;
3009
3010                 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3011                 fw.bss_len = bce_TPAT_b06FwBssLen;
3012                 fw.bss_index = 0;
3013                 fw.bss = bce_TPAT_b06FwBss;
3014
3015                 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3016                 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3017                 fw.rodata_index = 0;
3018                 fw.rodata = bce_TPAT_b06FwRodata;
3019         }
3020
3021         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3022         bce_start_cpu(sc, &cpu_reg);
3023 }
3024
3025
3026 /****************************************************************************/
3027 /* Initialize the CP CPU.                                                   */
3028 /*                                                                          */
3029 /* Returns:                                                                 */
3030 /*   Nothing.                                                               */
3031 /****************************************************************************/
3032 static void
3033 bce_init_cp_cpu(struct bce_softc *sc)
3034 {
3035         struct cpu_reg cpu_reg;
3036         struct fw_info fw;
3037
3038         cpu_reg.mode = BCE_CP_CPU_MODE;
3039         cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3040         cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3041         cpu_reg.state = BCE_CP_CPU_STATE;
3042         cpu_reg.state_value_clear = 0xffffff;
3043         cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3044         cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3045         cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3046         cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3047         cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3048         cpu_reg.spad_base = BCE_CP_SCRATCH;
3049         cpu_reg.mips_view_base = 0x8000000;
3050
3051         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3052             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3053                 fw.ver_major = bce_CP_b09FwReleaseMajor;
3054                 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3055                 fw.ver_fix = bce_CP_b09FwReleaseFix;
3056                 fw.start_addr = bce_CP_b09FwStartAddr;
3057
3058                 fw.text_addr = bce_CP_b09FwTextAddr;
3059                 fw.text_len = bce_CP_b09FwTextLen;
3060                 fw.text_index = 0;
3061                 fw.text = bce_CP_b09FwText;
3062
3063                 fw.data_addr = bce_CP_b09FwDataAddr;
3064                 fw.data_len = bce_CP_b09FwDataLen;
3065                 fw.data_index = 0;
3066                 fw.data = bce_CP_b09FwData;
3067
3068                 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3069                 fw.sbss_len = bce_CP_b09FwSbssLen;
3070                 fw.sbss_index = 0;
3071                 fw.sbss = bce_CP_b09FwSbss;
3072
3073                 fw.bss_addr = bce_CP_b09FwBssAddr;
3074                 fw.bss_len = bce_CP_b09FwBssLen;
3075                 fw.bss_index = 0;
3076                 fw.bss = bce_CP_b09FwBss;
3077
3078                 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3079                 fw.rodata_len = bce_CP_b09FwRodataLen;
3080                 fw.rodata_index = 0;
3081                 fw.rodata = bce_CP_b09FwRodata;
3082         } else {
3083                 fw.ver_major = bce_CP_b06FwReleaseMajor;
3084                 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3085                 fw.ver_fix = bce_CP_b06FwReleaseFix;
3086                 fw.start_addr = bce_CP_b06FwStartAddr;
3087
3088                 fw.text_addr = bce_CP_b06FwTextAddr;
3089                 fw.text_len = bce_CP_b06FwTextLen;
3090                 fw.text_index = 0;
3091                 fw.text = bce_CP_b06FwText;
3092
3093                 fw.data_addr = bce_CP_b06FwDataAddr;
3094                 fw.data_len = bce_CP_b06FwDataLen;
3095                 fw.data_index = 0;
3096                 fw.data = bce_CP_b06FwData;
3097
3098                 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3099                 fw.sbss_len = bce_CP_b06FwSbssLen;
3100                 fw.sbss_index = 0;
3101                 fw.sbss = bce_CP_b06FwSbss;
3102
3103                 fw.bss_addr = bce_CP_b06FwBssAddr;
3104                 fw.bss_len = bce_CP_b06FwBssLen;
3105                 fw.bss_index = 0;
3106                 fw.bss = bce_CP_b06FwBss;
3107
3108                 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3109                 fw.rodata_len = bce_CP_b06FwRodataLen;
3110                 fw.rodata_index = 0;
3111                 fw.rodata = bce_CP_b06FwRodata;
3112         }
3113
3114         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3115         bce_start_cpu(sc, &cpu_reg);
3116 }
3117
3118
3119 /****************************************************************************/
3120 /* Initialize the COM CPU.                                                 */
3121 /*                                                                          */
3122 /* Returns:                                                                 */
3123 /*   Nothing.                                                               */
3124 /****************************************************************************/
3125 static void
3126 bce_init_com_cpu(struct bce_softc *sc)
3127 {
3128         struct cpu_reg cpu_reg;
3129         struct fw_info fw;
3130
3131         cpu_reg.mode = BCE_COM_CPU_MODE;
3132         cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3133         cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3134         cpu_reg.state = BCE_COM_CPU_STATE;
3135         cpu_reg.state_value_clear = 0xffffff;
3136         cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3137         cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3138         cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3139         cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3140         cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3141         cpu_reg.spad_base = BCE_COM_SCRATCH;
3142         cpu_reg.mips_view_base = 0x8000000;
3143
3144         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3145             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3146                 fw.ver_major = bce_COM_b09FwReleaseMajor;
3147                 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3148                 fw.ver_fix = bce_COM_b09FwReleaseFix;
3149                 fw.start_addr = bce_COM_b09FwStartAddr;
3150
3151                 fw.text_addr = bce_COM_b09FwTextAddr;
3152                 fw.text_len = bce_COM_b09FwTextLen;
3153                 fw.text_index = 0;
3154                 fw.text = bce_COM_b09FwText;
3155
3156                 fw.data_addr = bce_COM_b09FwDataAddr;
3157                 fw.data_len = bce_COM_b09FwDataLen;
3158                 fw.data_index = 0;
3159                 fw.data = bce_COM_b09FwData;
3160
3161                 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3162                 fw.sbss_len = bce_COM_b09FwSbssLen;
3163                 fw.sbss_index = 0;
3164                 fw.sbss = bce_COM_b09FwSbss;
3165
3166                 fw.bss_addr = bce_COM_b09FwBssAddr;
3167                 fw.bss_len = bce_COM_b09FwBssLen;
3168                 fw.bss_index = 0;
3169                 fw.bss = bce_COM_b09FwBss;
3170
3171                 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3172                 fw.rodata_len = bce_COM_b09FwRodataLen;
3173                 fw.rodata_index = 0;
3174                 fw.rodata = bce_COM_b09FwRodata;
3175         } else {
3176                 fw.ver_major = bce_COM_b06FwReleaseMajor;
3177                 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3178                 fw.ver_fix = bce_COM_b06FwReleaseFix;
3179                 fw.start_addr = bce_COM_b06FwStartAddr;
3180
3181                 fw.text_addr = bce_COM_b06FwTextAddr;
3182                 fw.text_len = bce_COM_b06FwTextLen;
3183                 fw.text_index = 0;
3184                 fw.text = bce_COM_b06FwText;
3185
3186                 fw.data_addr = bce_COM_b06FwDataAddr;
3187                 fw.data_len = bce_COM_b06FwDataLen;
3188                 fw.data_index = 0;
3189                 fw.data = bce_COM_b06FwData;
3190
3191                 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3192                 fw.sbss_len = bce_COM_b06FwSbssLen;
3193                 fw.sbss_index = 0;
3194                 fw.sbss = bce_COM_b06FwSbss;
3195
3196                 fw.bss_addr = bce_COM_b06FwBssAddr;
3197                 fw.bss_len = bce_COM_b06FwBssLen;
3198                 fw.bss_index = 0;
3199                 fw.bss = bce_COM_b06FwBss;
3200
3201                 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3202                 fw.rodata_len = bce_COM_b06FwRodataLen;
3203                 fw.rodata_index = 0;
3204                 fw.rodata = bce_COM_b06FwRodata;
3205         }
3206
3207         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3208         bce_start_cpu(sc, &cpu_reg);
3209 }
3210
3211
3212 /****************************************************************************/
3213 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3214 /*                                                                          */
3215 /* Loads the firmware for each CPU and starts the CPU.                      */
3216 /*                                                                          */
3217 /* Returns:                                                                 */
3218 /*   Nothing.                                                               */
3219 /****************************************************************************/
3220 static void
3221 bce_init_cpus(struct bce_softc *sc)
3222 {
3223         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3224             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3225                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3226                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3227                             sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3228                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3229                             sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3230                 } else {
3231                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3232                             sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3233                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3234                             sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3235                 }
3236         } else {
3237                 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3238                     sizeof(bce_rv2p_proc1), RV2P_PROC1);
3239                 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3240                     sizeof(bce_rv2p_proc2), RV2P_PROC2);
3241         }
3242
3243         bce_init_rxp_cpu(sc);
3244         bce_init_txp_cpu(sc);
3245         bce_init_tpat_cpu(sc);
3246         bce_init_com_cpu(sc);
3247         bce_init_cp_cpu(sc);
3248 }
3249
3250
3251 /****************************************************************************/
3252 /* Initialize context memory.                                               */
3253 /*                                                                          */
3254 /* Clears the memory associated with each Context ID (CID).                 */
3255 /*                                                                          */
3256 /* Returns:                                                                 */
3257 /*   Nothing.                                                               */
3258 /****************************************************************************/
3259 static int
3260 bce_init_ctx(struct bce_softc *sc)
3261 {
3262         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3263             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3264                 /* DRC: Replace this constant value with a #define. */
3265                 int i, retry_cnt = 10;
3266                 uint32_t val;
3267
3268                 /*
3269                  * BCM5709 context memory may be cached
3270                  * in host memory so prepare the host memory
3271                  * for access.
3272                  */
3273                 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3274                     (1 << 12);
3275                 val |= (BCM_PAGE_BITS - 8) << 16;
3276                 REG_WR(sc, BCE_CTX_COMMAND, val);
3277
3278                 /* Wait for mem init command to complete. */
3279                 for (i = 0; i < retry_cnt; i++) {
3280                         val = REG_RD(sc, BCE_CTX_COMMAND);
3281                         if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3282                                 break;
3283                         DELAY(2);
3284                 }
3285                 if (i == retry_cnt) {
3286                         device_printf(sc->bce_dev,
3287                             "Context memory initialization failed!\n");
3288                         return ETIMEDOUT;
3289                 }
3290
3291                 for (i = 0; i < sc->ctx_pages; i++) {
3292                         int j;
3293
3294                         /*
3295                          * Set the physical address of the context
3296                          * memory cache.
3297                          */
3298                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3299                             BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3300                             BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3301                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3302                             BCE_ADDR_HI(sc->ctx_paddr[i]));
3303                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3304                             i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3305
3306                         /*
3307                          * Verify that the context memory write was successful.
3308                          */
3309                         for (j = 0; j < retry_cnt; j++) {
3310                                 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3311                                 if ((val &
3312                                     BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3313                                         break;
3314                                 DELAY(5);
3315                         }
3316                         if (j == retry_cnt) {
3317                                 device_printf(sc->bce_dev,
3318                                     "Failed to initialize context page!\n");
3319                                 return ETIMEDOUT;
3320                         }
3321                 }
3322         } else {
3323                 uint32_t vcid_addr, offset;
3324
3325                 /*
3326                  * For the 5706/5708, context memory is local to
3327                  * the controller, so initialize the controller
3328                  * context memory.
3329                  */
3330
3331                 vcid_addr = GET_CID_ADDR(96);
3332                 while (vcid_addr) {
3333                         vcid_addr -= PHY_CTX_SIZE;
3334
3335                         REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3336                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3337
3338                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3339                                 CTX_WR(sc, 0x00, offset, 0);
3340
3341                         REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3342                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3343                 }
3344         }
3345         return 0;
3346 }
3347
3348
3349 /****************************************************************************/
3350 /* Fetch the permanent MAC address of the controller.                       */
3351 /*                                                                          */
3352 /* Returns:                                                                 */
3353 /*   Nothing.                                                               */
3354 /****************************************************************************/
3355 static void
3356 bce_get_mac_addr(struct bce_softc *sc)
3357 {
3358         uint32_t mac_lo = 0, mac_hi = 0;
3359
3360         /*
3361          * The NetXtreme II bootcode populates various NIC
3362          * power-on and runtime configuration items in a
3363          * shared memory area.  The factory configured MAC
3364          * address is available from both NVRAM and the
3365          * shared memory area so we'll read the value from
3366          * shared memory for speed.
3367          */
3368
3369         mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3370         mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3371
3372         if (mac_lo == 0 && mac_hi == 0) {
3373                 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3374         } else {
3375                 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3376                 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3377                 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3378                 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3379                 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3380                 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3381         }
3382 }
3383
3384
3385 /****************************************************************************/
3386 /* Program the MAC address.                                                 */
3387 /*                                                                          */
3388 /* Returns:                                                                 */
3389 /*   Nothing.                                                               */
3390 /****************************************************************************/
3391 static void
3392 bce_set_mac_addr(struct bce_softc *sc)
3393 {
3394         const uint8_t *mac_addr = sc->eaddr;
3395         uint32_t val;
3396
3397         val = (mac_addr[0] << 8) | mac_addr[1];
3398         REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3399
3400         val = (mac_addr[2] << 24) |
3401               (mac_addr[3] << 16) |
3402               (mac_addr[4] << 8) |
3403               mac_addr[5];
3404         REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3405 }
3406
3407
3408 /****************************************************************************/
3409 /* Stop the controller.                                                     */
3410 /*                                                                          */
3411 /* Returns:                                                                 */
3412 /*   Nothing.                                                               */
3413 /****************************************************************************/
3414 static void
3415 bce_stop(struct bce_softc *sc)
3416 {
3417         struct ifnet *ifp = &sc->arpcom.ac_if;
3418         int i;
3419
3420         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3421
3422         callout_stop(&sc->bce_tick_callout);
3423
3424         /* Disable the transmit/receive blocks. */
3425         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3426         REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3427         DELAY(20);
3428
3429         bce_disable_intr(sc);
3430
3431         /* Free the RX lists. */
3432         for (i = 0; i < sc->ring_cnt; ++i)
3433                 bce_free_rx_chain(&sc->rx_rings[i]);
3434
3435         /* Free TX buffers. */
3436         for (i = 0; i < sc->ring_cnt; ++i)
3437                 bce_free_tx_chain(&sc->tx_rings[i]);
3438
3439         sc->bce_link = 0;
3440         sc->bce_coalchg_mask = 0;
3441
3442         ifp->if_flags &= ~IFF_RUNNING;
3443         ifq_clr_oactive(&ifp->if_snd);
3444         ifp->if_timer = 0;
3445 }
3446
3447
3448 static int
3449 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3450 {
3451         uint32_t val;
3452         int i, rc = 0;
3453
3454         /* Wait for pending PCI transactions to complete. */
3455         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3456                BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3457                BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3458                BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3459                BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3460         val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3461         DELAY(5);
3462
3463         /* Disable DMA */
3464         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3465             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3466                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3467                 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3468                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3469         }
3470
3471         /* Assume bootcode is running. */
3472         sc->bce_fw_timed_out = 0;
3473         sc->bce_drv_cardiac_arrest = 0;
3474
3475         /* Give the firmware a chance to prepare for the reset. */
3476         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3477         if (rc) {
3478                 if_printf(&sc->arpcom.ac_if,
3479                           "Firmware is not ready for reset\n");
3480                 return rc;
3481         }
3482
3483         /* Set a firmware reminder that this is a soft reset. */
3484         bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3485             BCE_DRV_RESET_SIGNATURE_MAGIC);
3486
3487         /* Dummy read to force the chip to complete all current transactions. */
3488         val = REG_RD(sc, BCE_MISC_ID);
3489
3490         /* Chip reset. */
3491         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3492             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3493                 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3494                 REG_RD(sc, BCE_MISC_COMMAND);
3495                 DELAY(5);
3496
3497                 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3498                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3499
3500                 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3501         } else {
3502                 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3503                     BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3504                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3505                 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3506
3507                 /* Allow up to 30us for reset to complete. */
3508                 for (i = 0; i < 10; i++) {
3509                         val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3510                         if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3511                             BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3512                                 break;
3513                         DELAY(10);
3514                 }
3515
3516                 /* Check that reset completed successfully. */
3517                 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3518                     BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3519                         if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3520                         return EBUSY;
3521                 }
3522         }
3523
3524         /* Make sure byte swapping is properly configured. */
3525         val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3526         if (val != 0x01020304) {
3527                 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3528                 return ENODEV;
3529         }
3530
3531         /* Just completed a reset, assume that firmware is running again. */
3532         sc->bce_fw_timed_out = 0;
3533         sc->bce_drv_cardiac_arrest = 0;
3534
3535         /* Wait for the firmware to finish its initialization. */
3536         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3537         if (rc) {
3538                 if_printf(&sc->arpcom.ac_if,
3539                           "Firmware did not complete initialization!\n");
3540         }
3541         return rc;
3542 }
3543
3544
3545 static int
3546 bce_chipinit(struct bce_softc *sc)
3547 {
3548         uint32_t val;
3549         int rc = 0;
3550
3551         /* Make sure the interrupt is not active. */
3552         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3553         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3554
3555         /*
3556          * Initialize DMA byte/word swapping, configure the number of DMA
3557          * channels and PCI clock compensation delay.
3558          */
3559         val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3560               BCE_DMA_CONFIG_DATA_WORD_SWAP |
3561 #if BYTE_ORDER == BIG_ENDIAN
3562               BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3563 #endif
3564               BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3565               DMA_READ_CHANS << 12 |
3566               DMA_WRITE_CHANS << 16;
3567
3568         val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3569
3570         if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3571                 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3572
3573         /*
3574          * This setting resolves a problem observed on certain Intel PCI
3575          * chipsets that cannot handle multiple outstanding DMA operations.
3576          * See errata E9_5706A1_65.
3577          */
3578         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3579             BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3580             !(sc->bce_flags & BCE_PCIX_FLAG))
3581                 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3582
3583         REG_WR(sc, BCE_DMA_CONFIG, val);
3584
3585         /* Enable the RX_V2P and Context state machines before access. */
3586         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3587                BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3588                BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3589                BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3590
3591         /* Initialize context mapping and zero out the quick contexts. */
3592         rc = bce_init_ctx(sc);
3593         if (rc != 0)
3594                 return rc;
3595
3596         /* Initialize the on-boards CPUs */
3597         bce_init_cpus(sc);
3598
3599         /* Enable management frames (NC-SI) to flow to the MCP. */
3600         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3601                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3602                     BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3603                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3604         }
3605
3606         /* Prepare NVRAM for access. */
3607         rc = bce_init_nvram(sc);
3608         if (rc != 0)
3609                 return rc;
3610
3611         /* Set the kernel bypass block size */
3612         val = REG_RD(sc, BCE_MQ_CONFIG);
3613         val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3614         val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3615
3616         /* Enable bins used on the 5709/5716. */
3617         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3618             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3619                 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3620                 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3621                         val |= BCE_MQ_CONFIG_HALT_DIS;
3622         }
3623
3624         REG_WR(sc, BCE_MQ_CONFIG, val);
3625
3626         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3627         REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3628         REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3629
3630         /* Set the page size and clear the RV2P processor stall bits. */
3631         val = (BCM_PAGE_BITS - 8) << 24;
3632         REG_WR(sc, BCE_RV2P_CONFIG, val);
3633
3634         /* Configure page size. */
3635         val = REG_RD(sc, BCE_TBDR_CONFIG);
3636         val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3637         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3638         REG_WR(sc, BCE_TBDR_CONFIG, val);
3639
3640         /* Set the perfect match control register to default. */
3641         REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3642
3643         return 0;
3644 }
3645
3646
3647 /****************************************************************************/
3648 /* Initialize the controller in preparation to send/receive traffic.        */
3649 /*                                                                          */
3650 /* Returns:                                                                 */
3651 /*   0 for success, positive value for failure.                             */
3652 /****************************************************************************/
3653 static int
3654 bce_blockinit(struct bce_softc *sc)
3655 {
3656         uint32_t reg, val;
3657
3658         /* Load the hardware default MAC address. */
3659         bce_set_mac_addr(sc);
3660
3661         /* Set the Ethernet backoff seed value */
3662         val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3663               sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3664         REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3665
3666         sc->last_status_idx = 0;
3667         sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3668
3669         /* Set up link change interrupt generation. */
3670         REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3671
3672         /* Program the physical address of the status block. */
3673         REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3674         REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3675
3676         /* Program the physical address of the statistics block. */
3677         REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3678                BCE_ADDR_LO(sc->stats_block_paddr));
3679         REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3680                BCE_ADDR_HI(sc->stats_block_paddr));
3681
3682         /* Program various host coalescing parameters. */
3683         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3684                (sc->bce_tx_quick_cons_trip_int << 16) |
3685                sc->bce_tx_quick_cons_trip);
3686         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3687                (sc->bce_rx_quick_cons_trip_int << 16) |
3688                sc->bce_rx_quick_cons_trip);
3689         REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3690                (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3691         REG_WR(sc, BCE_HC_TX_TICKS,
3692                (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3693         REG_WR(sc, BCE_HC_RX_TICKS,
3694                (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3695         REG_WR(sc, BCE_HC_COM_TICKS,
3696                (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3697         REG_WR(sc, BCE_HC_CMD_TICKS,
3698                (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3699         REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3700         REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);   /* 3ms */
3701
3702         val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3703         if (sc->bce_flags & BCE_ONESHOT_MSI_FLAG) {
3704                 if (bootverbose)
3705                         if_printf(&sc->arpcom.ac_if, "oneshot MSI\n");
3706                 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3707         }
3708         REG_WR(sc, BCE_HC_CONFIG, val);
3709
3710         /* Clear the internal statistics counters. */
3711         REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3712
3713         /* Verify that bootcode is running. */
3714         reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3715
3716         if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3717             BCE_DEV_INFO_SIGNATURE_MAGIC) {
3718                 if_printf(&sc->arpcom.ac_if,
3719                           "Bootcode not running! Found: 0x%08X, "
3720                           "Expected: 08%08X\n",
3721                           reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3722                           BCE_DEV_INFO_SIGNATURE_MAGIC);
3723                 return ENODEV;
3724         }
3725
3726         /* Enable DMA */
3727         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3728             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3729                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3730                 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3731                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3732         }
3733
3734         /* Allow bootcode to apply any additional fixes before enabling MAC. */
3735         bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3736
3737         /* Enable link state change interrupt generation. */
3738         REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3739
3740         /* Enable the RXP. */
3741         bce_start_rxp_cpu(sc);
3742
3743         /* Disable management frames (NC-SI) from flowing to the MCP. */
3744         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3745                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3746                     ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3747                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3748         }
3749
3750         /* Enable all remaining blocks in the MAC. */
3751         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3752             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3753                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3754                     BCE_MISC_ENABLE_DEFAULT_XI);
3755         } else {
3756                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3757         }
3758         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3759         DELAY(20);
3760
3761         /* Save the current host coalescing block settings. */
3762         sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3763
3764         return 0;
3765 }
3766
3767
3768 /****************************************************************************/
3769 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3770 /*                                                                          */
3771 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3772 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3773 /* necessary.                                                               */
3774 /*                                                                          */
3775 /* Returns:                                                                 */
3776 /*   0 for success, positive value for failure.                             */
3777 /****************************************************************************/
3778 static int
3779 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t *chain_prod,
3780     uint32_t *prod_bseq, int init)
3781 {
3782         bus_dmamap_t map;
3783         bus_dma_segment_t seg;
3784         struct mbuf *m_new;
3785         int error, nseg;
3786
3787         /* This is a new mbuf allocation. */
3788         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3789         if (m_new == NULL)
3790                 return ENOBUFS;
3791
3792         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3793
3794         /* Map the mbuf cluster into device memory. */
3795         error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3796             rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3797         if (error) {
3798                 m_freem(m_new);
3799                 if (init) {
3800                         if_printf(&rxr->sc->arpcom.ac_if,
3801                             "Error mapping mbuf into RX chain!\n");
3802                 }
3803                 return error;
3804         }
3805
3806         if (rxr->rx_mbuf_ptr[*chain_prod] != NULL) {
3807                 bus_dmamap_unload(rxr->rx_mbuf_tag,
3808                     rxr->rx_mbuf_map[*chain_prod]);
3809         }
3810
3811         map = rxr->rx_mbuf_map[*chain_prod];
3812         rxr->rx_mbuf_map[*chain_prod] = rxr->rx_mbuf_tmpmap;
3813         rxr->rx_mbuf_tmpmap = map;
3814
3815         /* Save the mbuf and update our counter. */
3816         rxr->rx_mbuf_ptr[*chain_prod] = m_new;
3817         rxr->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3818         rxr->free_rx_bd--;
3819
3820         bce_setup_rxdesc_std(rxr, *chain_prod, prod_bseq);
3821
3822         return 0;
3823 }
3824
3825
3826 static void
3827 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3828     uint32_t *prod_bseq)
3829 {
3830         struct rx_bd *rxbd;
3831         bus_addr_t paddr;
3832         int len;
3833
3834         paddr = rxr->rx_mbuf_paddr[chain_prod];
3835         len = rxr->rx_mbuf_ptr[chain_prod]->m_len;
3836
3837         /* Setup the rx_bd for the first segment. */
3838         rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3839
3840         rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3841         rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3842         rxbd->rx_bd_len = htole32(len);
3843         rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3844         *prod_bseq += len;
3845
3846         rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3847 }
3848
3849
3850 /****************************************************************************/
3851 /* Initialize the TX context memory.                                        */
3852 /*                                                                          */
3853 /* Returns:                                                                 */
3854 /*   Nothing                                                                */
3855 /****************************************************************************/
3856 static void
3857 bce_init_tx_context(struct bce_tx_ring *txr)
3858 {
3859         uint32_t val;
3860
3861         /* Initialize the context ID for an L2 TX chain. */
3862         if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3863             BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3864                 /* Set the CID type to support an L2 connection. */
3865                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3866                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3867                     BCE_L2CTX_TX_TYPE_XI, val);
3868                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3869                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3870                     BCE_L2CTX_TX_CMD_TYPE_XI, val);
3871
3872                 /* Point the hardware to the first page in the chain. */
3873                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3874                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3875                     BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3876                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3877                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3878                     BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3879         } else {
3880                 /* Set the CID type to support an L2 connection. */
3881                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3882                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID), BCE_L2CTX_TX_TYPE, val);
3883                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3884                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3885                     BCE_L2CTX_TX_CMD_TYPE, val);
3886
3887                 /* Point the hardware to the first page in the chain. */
3888                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3889                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3890                     BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3891                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3892                 CTX_WR(txr->sc, GET_CID_ADDR(TX_CID),
3893                     BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3894         }
3895 }
3896
3897
3898 /****************************************************************************/
3899 /* Allocate memory and initialize the TX data structures.                   */
3900 /*                                                                          */
3901 /* Returns:                                                                 */
3902 /*   0 for success, positive value for failure.                             */
3903 /****************************************************************************/
3904 static int
3905 bce_init_tx_chain(struct bce_tx_ring *txr)
3906 {
3907         struct tx_bd *txbd;
3908         int i, rc = 0;
3909
3910         /* Set the initial TX producer/consumer indices. */
3911         txr->tx_prod = 0;
3912         txr->tx_cons = 0;
3913         txr->tx_prod_bseq = 0;
3914         txr->used_tx_bd = 0;
3915         txr->max_tx_bd = USABLE_TX_BD(txr);
3916
3917         /*
3918          * The NetXtreme II supports a linked-list structre called
3919          * a Buffer Descriptor Chain (or BD chain).  A BD chain
3920          * consists of a series of 1 or more chain pages, each of which
3921          * consists of a fixed number of BD entries.
3922          * The last BD entry on each page is a pointer to the next page
3923          * in the chain, and the last pointer in the BD chain
3924          * points back to the beginning of the chain.
3925          */
3926
3927         /* Set the TX next pointer chain entries. */
3928         for (i = 0; i < txr->tx_pages; i++) {
3929                 int j;
3930
3931                 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
3932
3933                 /* Check if we've reached the last page. */
3934                 if (i == (txr->tx_pages - 1))
3935                         j = 0;
3936                 else
3937                         j = i + 1;
3938
3939                 txbd->tx_bd_haddr_hi =
3940                     htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
3941                 txbd->tx_bd_haddr_lo =
3942                     htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
3943         }
3944         bce_init_tx_context(txr);
3945
3946         return(rc);
3947 }
3948
3949
3950 /****************************************************************************/
3951 /* Free memory and clear the TX data structures.                            */
3952 /*                                                                          */
3953 /* Returns:                                                                 */
3954 /*   Nothing.                                                               */
3955 /****************************************************************************/
3956 static void
3957 bce_free_tx_chain(struct bce_tx_ring *txr)
3958 {
3959         int i;
3960
3961         /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
3962         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
3963                 if (txr->tx_mbuf_ptr[i] != NULL) {
3964                         bus_dmamap_unload(txr->tx_mbuf_tag,
3965                             txr->tx_mbuf_map[i]);
3966                         m_freem(txr->tx_mbuf_ptr[i]);
3967                         txr->tx_mbuf_ptr[i] = NULL;
3968                 }
3969         }
3970
3971         /* Clear each TX chain page. */
3972         for (i = 0; i < txr->tx_pages; i++)
3973                 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
3974         txr->used_tx_bd = 0;
3975 }
3976
3977
3978 /****************************************************************************/
3979 /* Initialize the RX context memory.                                        */
3980 /*                                                                          */
3981 /* Returns:                                                                 */
3982 /*   Nothing                                                                */
3983 /****************************************************************************/
3984 static void
3985 bce_init_rx_context(struct bce_rx_ring *rxr)
3986 {
3987         uint32_t val;
3988
3989         /* Initialize the context ID for an L2 RX chain. */
3990         val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
3991             BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
3992
3993         /*
3994          * Set the level for generating pause frames
3995          * when the number of available rx_bd's gets
3996          * too low (the low watermark) and the level
3997          * when pause frames can be stopped (the high
3998          * watermark).
3999          */
4000         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4001             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4002                 uint32_t lo_water, hi_water;
4003
4004                 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4005                 hi_water = USABLE_RX_BD(rxr) / 4;
4006
4007                 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4008                 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4009
4010                 if (hi_water > 0xf)
4011                         hi_water = 0xf;
4012                 else if (hi_water == 0)
4013                         lo_water = 0;
4014                 val |= lo_water |
4015                     (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4016         }
4017
4018         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_CTX_TYPE, val);
4019
4020         /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4021         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4022             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4023                 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4024                 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4025         }
4026
4027         /* Point the hardware to the first page in the chain. */
4028         val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4029         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4030         val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4031         CTX_WR(rxr->sc, GET_CID_ADDR(RX_CID), BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4032 }
4033
4034
4035 /****************************************************************************/
4036 /* Allocate memory and initialize the RX data structures.                   */
4037 /*                                                                          */
4038 /* Returns:                                                                 */
4039 /*   0 for success, positive value for failure.                             */
4040 /****************************************************************************/
4041 static int
4042 bce_init_rx_chain(struct bce_rx_ring *rxr)
4043 {
4044         struct rx_bd *rxbd;
4045         int i, rc = 0;
4046         uint16_t prod, chain_prod;
4047         uint32_t prod_bseq;
4048
4049         /* Initialize the RX producer and consumer indices. */
4050         rxr->rx_prod = 0;
4051         rxr->rx_cons = 0;
4052         rxr->rx_prod_bseq = 0;
4053         rxr->free_rx_bd = USABLE_RX_BD(rxr);
4054         rxr->max_rx_bd = USABLE_RX_BD(rxr);
4055
4056         /* Initialize the RX next pointer chain entries. */
4057         for (i = 0; i < rxr->rx_pages; i++) {
4058                 int j;
4059
4060                 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4061
4062                 /* Check if we've reached the last page. */
4063                 if (i == (rxr->rx_pages - 1))
4064                         j = 0;
4065                 else
4066                         j = i + 1;
4067
4068                 /* Setup the chain page pointers. */
4069                 rxbd->rx_bd_haddr_hi =
4070                     htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4071                 rxbd->rx_bd_haddr_lo =
4072                     htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4073         }
4074
4075         /* Allocate mbuf clusters for the rx_bd chain. */
4076         prod = prod_bseq = 0;
4077         while (prod < TOTAL_RX_BD(rxr)) {
4078                 chain_prod = RX_CHAIN_IDX(rxr, prod);
4079                 if (bce_newbuf_std(rxr, &prod, &chain_prod, &prod_bseq, 1)) {
4080                         if_printf(&rxr->sc->arpcom.ac_if,
4081                             "Error filling RX chain: rx_bd[0x%04X]!\n",
4082                             chain_prod);
4083                         rc = ENOBUFS;
4084                         break;
4085                 }
4086                 prod = NEXT_RX_BD(prod);
4087         }
4088
4089         /* Save the RX chain producer index. */
4090         rxr->rx_prod = prod;
4091         rxr->rx_prod_bseq = prod_bseq;
4092
4093         /* Tell the chip about the waiting rx_bd's. */
4094         REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4095             rxr->rx_prod);
4096         REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4097             rxr->rx_prod_bseq);
4098
4099         bce_init_rx_context(rxr);
4100
4101         return(rc);
4102 }
4103
4104
4105 /****************************************************************************/
4106 /* Free memory and clear the RX data structures.                            */
4107 /*                                                                          */
4108 /* Returns:                                                                 */
4109 /*   Nothing.                                                               */
4110 /****************************************************************************/
4111 static void
4112 bce_free_rx_chain(struct bce_rx_ring *rxr)
4113 {
4114         int i;
4115
4116         /* Free any mbufs still in the RX mbuf chain. */
4117         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4118                 if (rxr->rx_mbuf_ptr[i] != NULL) {
4119                         bus_dmamap_unload(rxr->rx_mbuf_tag,
4120                             rxr->rx_mbuf_map[i]);
4121                         m_freem(rxr->rx_mbuf_ptr[i]);
4122                         rxr->rx_mbuf_ptr[i] = NULL;
4123                 }
4124         }
4125
4126         /* Clear each RX chain page. */
4127         for (i = 0; i < rxr->rx_pages; i++)
4128                 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4129 }
4130
4131
4132 /****************************************************************************/
4133 /* Set media options.                                                       */
4134 /*                                                                          */
4135 /* Returns:                                                                 */
4136 /*   0 for success, positive value for failure.                             */
4137 /****************************************************************************/
4138 static int
4139 bce_ifmedia_upd(struct ifnet *ifp)
4140 {
4141         struct bce_softc *sc = ifp->if_softc;
4142         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4143         int error = 0;
4144
4145         /*
4146          * 'mii' will be NULL, when this function is called on following
4147          * code path: bce_attach() -> bce_mgmt_init()
4148          */
4149         if (mii != NULL) {
4150                 /* Make sure the MII bus has been enumerated. */
4151                 sc->bce_link = 0;
4152                 if (mii->mii_instance) {
4153                         struct mii_softc *miisc;
4154
4155                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4156                                 mii_phy_reset(miisc);
4157                 }
4158                 error = mii_mediachg(mii);
4159         }
4160         return error;
4161 }
4162
4163
4164 /****************************************************************************/
4165 /* Reports current media status.                                            */
4166 /*                                                                          */
4167 /* Returns:                                                                 */
4168 /*   Nothing.                                                               */
4169 /****************************************************************************/
4170 static void
4171 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4172 {
4173         struct bce_softc *sc = ifp->if_softc;
4174         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4175
4176         mii_pollstat(mii);
4177         ifmr->ifm_active = mii->mii_media_active;
4178         ifmr->ifm_status = mii->mii_media_status;
4179 }
4180
4181
4182 /****************************************************************************/
4183 /* Handles PHY generated interrupt events.                                  */
4184 /*                                                                          */
4185 /* Returns:                                                                 */
4186 /*   Nothing.                                                               */
4187 /****************************************************************************/
4188 static void
4189 bce_phy_intr(struct bce_softc *sc)
4190 {
4191         uint32_t new_link_state, old_link_state;
4192         struct ifnet *ifp = &sc->arpcom.ac_if;
4193
4194         ASSERT_SERIALIZED(&sc->main_serialize);
4195
4196         new_link_state = sc->status_block->status_attn_bits &
4197                          STATUS_ATTN_BITS_LINK_STATE;
4198         old_link_state = sc->status_block->status_attn_bits_ack &
4199                          STATUS_ATTN_BITS_LINK_STATE;
4200
4201         /* Handle any changes if the link state has changed. */
4202         if (new_link_state != old_link_state) { /* XXX redundant? */
4203                 /* Update the status_attn_bits_ack field in the status block. */
4204                 if (new_link_state) {
4205                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4206                                STATUS_ATTN_BITS_LINK_STATE);
4207                         if (bootverbose)
4208                                 if_printf(ifp, "Link is now UP.\n");
4209                 } else {
4210                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4211                                STATUS_ATTN_BITS_LINK_STATE);
4212                         if (bootverbose)
4213                                 if_printf(ifp, "Link is now DOWN.\n");
4214                 }
4215
4216                 /*
4217                  * Assume link is down and allow tick routine to
4218                  * update the state based on the actual media state.
4219                  */
4220                 sc->bce_link = 0;
4221                 callout_stop(&sc->bce_tick_callout);
4222                 bce_tick_serialized(sc);
4223         }
4224
4225         /* Acknowledge the link change interrupt. */
4226         REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4227 }
4228
4229
4230 /****************************************************************************/
4231 /* Reads the receive consumer value from the status block (skipping over    */
4232 /* chain page pointer if necessary).                                        */
4233 /*                                                                          */
4234 /* Returns:                                                                 */
4235 /*   hw_cons                                                                */
4236 /****************************************************************************/
4237 static __inline uint16_t
4238 bce_get_hw_rx_cons(struct bce_softc *sc)
4239 {
4240         uint16_t hw_cons = sc->status_block->status_rx_quick_consumer_index0;
4241
4242         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4243                 hw_cons++;
4244         return hw_cons;
4245 }
4246
4247
4248 /****************************************************************************/
4249 /* Handles received frame interrupt events.                                 */
4250 /*                                                                          */
4251 /* Returns:                                                                 */
4252 /*   Nothing.                                                               */
4253 /****************************************************************************/
4254 static void
4255 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4256 {
4257         struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4258         uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4259         uint32_t sw_prod_bseq;
4260
4261         ASSERT_SERIALIZED(&rxr->rx_serialize);
4262
4263         /* Get working copies of the driver's view of the RX indices. */
4264         sw_cons = rxr->rx_cons;
4265         sw_prod = rxr->rx_prod;
4266         sw_prod_bseq = rxr->rx_prod_bseq;
4267
4268         /* Scan through the receive chain as long as there is work to do. */
4269         while (sw_cons != hw_cons) {
4270                 struct mbuf *m = NULL;
4271                 struct l2_fhdr *l2fhdr = NULL;
4272                 unsigned int len;
4273                 uint32_t status = 0;
4274
4275 #ifdef IFPOLL_ENABLE
4276                 if (count >= 0 && count-- == 0)
4277                         break;
4278 #endif
4279
4280                 /*
4281                  * Convert the producer/consumer indices
4282                  * to an actual rx_bd index.
4283                  */
4284                 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4285                 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4286
4287                 rxr->free_rx_bd++;
4288
4289                 /* The mbuf is stored with the last rx_bd entry of a packet. */
4290                 if (rxr->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4291                         if (sw_chain_cons != sw_chain_prod) {
4292                                 if_printf(ifp, "RX cons(%d) != prod(%d), "
4293                                     "drop!\n", sw_chain_cons, sw_chain_prod);
4294                                 IFNET_STAT_INC(ifp, ierrors, 1);
4295
4296                                 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4297                                     &sw_prod_bseq);
4298                                 m = NULL;
4299                                 goto bce_rx_int_next_rx;
4300                         }
4301
4302                         /* Unmap the mbuf from DMA space. */
4303                         bus_dmamap_sync(rxr->rx_mbuf_tag,
4304                             rxr->rx_mbuf_map[sw_chain_cons],
4305                             BUS_DMASYNC_POSTREAD);
4306
4307                         /* Save the mbuf from the driver's chain. */
4308                         m = rxr->rx_mbuf_ptr[sw_chain_cons];
4309
4310                         /*
4311                          * Frames received on the NetXteme II are prepended 
4312                          * with an l2_fhdr structure which provides status
4313                          * information about the received frame (including
4314                          * VLAN tags and checksum info).  The frames are also
4315                          * automatically adjusted to align the IP header
4316                          * (i.e. two null bytes are inserted before the 
4317                          * Ethernet header).  As a result the data DMA'd by
4318                          * the controller into the mbuf is as follows:
4319                          *
4320                          * +---------+-----+---------------------+-----+
4321                          * | l2_fhdr | pad | packet data         | FCS |
4322                          * +---------+-----+---------------------+-----+
4323                          * 
4324                          * The l2_fhdr needs to be checked and skipped and the
4325                          * FCS needs to be stripped before sending the packet
4326                          * up the stack.
4327                          */
4328                         l2fhdr = mtod(m, struct l2_fhdr *);
4329
4330                         len = l2fhdr->l2_fhdr_pkt_len;
4331                         status = l2fhdr->l2_fhdr_status;
4332
4333                         len -= ETHER_CRC_LEN;
4334
4335                         /* Check the received frame for errors. */
4336                         if (status & (L2_FHDR_ERRORS_BAD_CRC |
4337                                       L2_FHDR_ERRORS_PHY_DECODE |
4338                                       L2_FHDR_ERRORS_ALIGNMENT |
4339                                       L2_FHDR_ERRORS_TOO_SHORT |
4340                                       L2_FHDR_ERRORS_GIANT_FRAME)) {
4341                                 IFNET_STAT_INC(ifp, ierrors, 1);
4342
4343                                 /* Reuse the mbuf for a new frame. */
4344                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4345                                     &sw_prod_bseq);
4346                                 m = NULL;
4347                                 goto bce_rx_int_next_rx;
4348                         }
4349
4350                         /* 
4351                          * Get a new mbuf for the rx_bd.   If no new
4352                          * mbufs are available then reuse the current mbuf,
4353                          * log an ierror on the interface, and generate
4354                          * an error in the system log.
4355                          */
4356                         if (bce_newbuf_std(rxr, &sw_prod, &sw_chain_prod,
4357                             &sw_prod_bseq, 0)) {
4358                                 IFNET_STAT_INC(ifp, ierrors, 1);
4359
4360                                 /* Try and reuse the exisitng mbuf. */
4361                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4362                                     &sw_prod_bseq);
4363                                 m = NULL;
4364                                 goto bce_rx_int_next_rx;
4365                         }
4366
4367                         /*
4368                          * Skip over the l2_fhdr when passing
4369                          * the data up the stack.
4370                          */
4371                         m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4372
4373                         m->m_pkthdr.len = m->m_len = len;
4374                         m->m_pkthdr.rcvif = ifp;
4375
4376                         /* Validate the checksum if offload enabled. */
4377                         if (ifp->if_capenable & IFCAP_RXCSUM) {
4378                                 /* Check for an IP datagram. */
4379                                 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4380                                         m->m_pkthdr.csum_flags |=
4381                                                 CSUM_IP_CHECKED;
4382
4383                                         /* Check if the IP checksum is valid. */
4384                                         if ((l2fhdr->l2_fhdr_ip_xsum ^
4385                                              0xffff) == 0) {
4386                                                 m->m_pkthdr.csum_flags |=
4387                                                         CSUM_IP_VALID;
4388                                         }
4389                                 }
4390
4391                                 /* Check for a valid TCP/UDP frame. */
4392                                 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4393                                               L2_FHDR_STATUS_UDP_DATAGRAM)) {
4394
4395                                         /* Check for a good TCP/UDP checksum. */
4396                                         if ((status &
4397                                              (L2_FHDR_ERRORS_TCP_XSUM |
4398                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4399                                                 m->m_pkthdr.csum_data =
4400                                                 l2fhdr->l2_fhdr_tcp_udp_xsum;
4401                                                 m->m_pkthdr.csum_flags |=
4402                                                         CSUM_DATA_VALID |
4403                                                         CSUM_PSEUDO_HDR;
4404                                         }
4405                                 }
4406                         }
4407
4408                         IFNET_STAT_INC(ifp, ipackets, 1);
4409 bce_rx_int_next_rx:
4410                         sw_prod = NEXT_RX_BD(sw_prod);
4411                 }
4412
4413                 sw_cons = NEXT_RX_BD(sw_cons);
4414
4415                 /* If we have a packet, pass it up the stack */
4416                 if (m) {
4417                         if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4418                                 m->m_flags |= M_VLANTAG;
4419                                 m->m_pkthdr.ether_vlantag =
4420                                         l2fhdr->l2_fhdr_vlan_tag;
4421                         }
4422                         ifp->if_input(ifp, m);
4423                 }
4424         }
4425
4426         rxr->rx_cons = sw_cons;
4427         rxr->rx_prod = sw_prod;
4428         rxr->rx_prod_bseq = sw_prod_bseq;
4429
4430         REG_WR16(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX,
4431             rxr->rx_prod);
4432         REG_WR(rxr->sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BSEQ,
4433             rxr->rx_prod_bseq);
4434 }
4435
4436
4437 /****************************************************************************/
4438 /* Reads the transmit consumer value from the status block (skipping over   */
4439 /* chain page pointer if necessary).                                        */
4440 /*                                                                          */
4441 /* Returns:                                                                 */
4442 /*   hw_cons                                                                */
4443 /****************************************************************************/
4444 static __inline uint16_t
4445 bce_get_hw_tx_cons(struct bce_softc *sc)
4446 {
4447         uint16_t hw_cons = sc->status_block->status_tx_quick_consumer_index0;
4448
4449         if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4450                 hw_cons++;
4451         return hw_cons;
4452 }
4453
4454
4455 /****************************************************************************/
4456 /* Handles transmit completion interrupt events.                            */
4457 /*                                                                          */
4458 /* Returns:                                                                 */
4459 /*   Nothing.                                                               */
4460 /****************************************************************************/
4461 static void
4462 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4463 {
4464         struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4465         uint16_t sw_tx_cons, sw_tx_chain_cons;
4466
4467         ASSERT_SERIALIZED(&txr->tx_serialize);
4468
4469         /* Get the hardware's view of the TX consumer index. */
4470         sw_tx_cons = txr->tx_cons;
4471
4472         /* Cycle through any completed TX chain page entries. */
4473         while (sw_tx_cons != hw_tx_cons) {
4474                 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4475
4476                 /*
4477                  * Free the associated mbuf. Remember
4478                  * that only the last tx_bd of a packet
4479                  * has an mbuf pointer and DMA map.
4480                  */
4481                 if (txr->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4482                         /* Unmap the mbuf. */
4483                         bus_dmamap_unload(txr->tx_mbuf_tag,
4484                             txr->tx_mbuf_map[sw_tx_chain_cons]);
4485
4486                         /* Free the mbuf. */
4487                         m_freem(txr->tx_mbuf_ptr[sw_tx_chain_cons]);
4488                         txr->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4489
4490                         IFNET_STAT_INC(ifp, opackets, 1);
4491                 }
4492
4493                 txr->used_tx_bd--;
4494                 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4495         }
4496
4497         if (txr->used_tx_bd == 0) {
4498                 /* Clear the TX timeout timer. */
4499                 ifp->if_timer = 0;
4500         }
4501
4502         /* Clear the tx hardware queue full flag. */
4503         if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4504                 ifq_clr_oactive(&ifp->if_snd);
4505         txr->tx_cons = sw_tx_cons;
4506 }
4507
4508
4509 /****************************************************************************/
4510 /* Disables interrupt generation.                                           */
4511 /*                                                                          */
4512 /* Returns:                                                                 */
4513 /*   Nothing.                                                               */
4514 /****************************************************************************/
4515 static void
4516 bce_disable_intr(struct bce_softc *sc)
4517 {
4518         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4519         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4520
4521         callout_stop(&sc->bce_ckmsi_callout);
4522         sc->bce_msi_maylose = FALSE;
4523         sc->bce_check_rx_cons = 0;
4524         sc->bce_check_tx_cons = 0;
4525         sc->bce_check_status_idx = 0xffff;
4526
4527         lwkt_serialize_handler_disable(&sc->main_serialize);
4528 }
4529
4530
4531 /****************************************************************************/
4532 /* Enables interrupt generation.                                            */
4533 /*                                                                          */
4534 /* Returns:                                                                 */
4535 /*   Nothing.                                                               */
4536 /****************************************************************************/
4537 static void
4538 bce_enable_intr(struct bce_softc *sc)
4539 {
4540         lwkt_serialize_handler_enable(&sc->main_serialize);
4541
4542         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4543                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4544                BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4545         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4546                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4547
4548         REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4549
4550         if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4551                 sc->bce_msi_maylose = FALSE;
4552                 sc->bce_check_rx_cons = 0;
4553                 sc->bce_check_tx_cons = 0;
4554                 sc->bce_check_status_idx = 0xffff;
4555
4556                 if (bootverbose)
4557                         if_printf(&sc->arpcom.ac_if, "check msi\n");
4558
4559                 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4560                     bce_check_msi, sc, sc->bce_intr_cpuid);
4561         }
4562 }
4563
4564
4565 /****************************************************************************/
4566 /* Reenables interrupt generation during interrupt handling.                */
4567 /*                                                                          */
4568 /* Returns:                                                                 */
4569 /*   Nothing.                                                               */
4570 /****************************************************************************/
4571 static void
4572 bce_reenable_intr(struct bce_softc *sc)
4573 {
4574         if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
4575                 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4576                        BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4577                        BCE_PCICFG_INT_ACK_CMD_MASK_INT | sc->last_status_idx);
4578         }
4579         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4580                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | sc->last_status_idx);
4581 }
4582
4583
4584 /****************************************************************************/
4585 /* Handles controller initialization.                                       */
4586 /*                                                                          */
4587 /* Returns:                                                                 */
4588 /*   Nothing.                                                               */
4589 /****************************************************************************/
4590 static void
4591 bce_init(void *xsc)
4592 {
4593         struct bce_softc *sc = xsc;
4594         struct ifnet *ifp = &sc->arpcom.ac_if;
4595         uint32_t ether_mtu;
4596         int error, i;
4597
4598         ASSERT_IFNET_SERIALIZED_ALL(ifp);
4599
4600         /* Check if the driver is still running and bail out if it is. */
4601         if (ifp->if_flags & IFF_RUNNING)
4602                 return;
4603
4604         bce_stop(sc);
4605
4606         error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4607         if (error) {
4608                 if_printf(ifp, "Controller reset failed!\n");
4609                 goto back;
4610         }
4611
4612         error = bce_chipinit(sc);
4613         if (error) {
4614                 if_printf(ifp, "Controller initialization failed!\n");
4615                 goto back;
4616         }
4617
4618         error = bce_blockinit(sc);
4619         if (error) {
4620                 if_printf(ifp, "Block initialization failed!\n");
4621                 goto back;
4622         }
4623
4624         /* Load our MAC address. */
4625         bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4626         bce_set_mac_addr(sc);
4627
4628         /* Calculate and program the Ethernet MTU size. */
4629         ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4630
4631         /* 
4632          * Program the mtu, enabling jumbo frame 
4633          * support if necessary.  Also set the mbuf
4634          * allocation count for RX frames.
4635          */
4636         if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4637 #ifdef notyet
4638                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4639                        min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4640                        BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4641 #else
4642                 panic("jumbo buffer is not supported yet");
4643 #endif
4644         } else {
4645                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4646         }
4647
4648         /* Program appropriate promiscuous/multicast filtering. */
4649         bce_set_rx_mode(sc);
4650
4651         /* Init RX buffer descriptor chain. */
4652         for (i = 0; i < sc->ring_cnt; ++i)
4653                 bce_init_rx_chain(&sc->rx_rings[i]);    /* XXX return value */
4654
4655         /* Init TX buffer descriptor chain. */
4656         for (i = 0; i < sc->ring_cnt; ++i)
4657                 bce_init_tx_chain(&sc->tx_rings[i]);
4658
4659 #ifdef IFPOLL_ENABLE
4660         /* Disable interrupts if we are polling. */
4661         if (ifp->if_flags & IFF_NPOLLING) {
4662                 bce_disable_intr(sc);
4663
4664                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
4665                        (1 << 16) | sc->bce_rx_quick_cons_trip);
4666                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
4667                        (1 << 16) | sc->bce_tx_quick_cons_trip);
4668         } else
4669 #endif
4670         /* Enable host interrupts. */
4671         bce_enable_intr(sc);
4672
4673         bce_ifmedia_upd(ifp);
4674
4675         ifp->if_flags |= IFF_RUNNING;
4676         ifq_clr_oactive(&ifp->if_snd);
4677
4678         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4679             sc->bce_intr_cpuid);
4680 back:
4681         if (error)
4682                 bce_stop(sc);
4683 }
4684
4685
4686 /****************************************************************************/
4687 /* Initialize the controller just enough so that any management firmware    */
4688 /* running on the device will continue to operate corectly.                 */
4689 /*                                                                          */
4690 /* Returns:                                                                 */
4691 /*   Nothing.                                                               */
4692 /****************************************************************************/
4693 static void
4694 bce_mgmt_init(struct bce_softc *sc)
4695 {
4696         struct ifnet *ifp = &sc->arpcom.ac_if;
4697
4698         /* Bail out if management firmware is not running. */
4699         if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4700                 return;
4701
4702         /* Enable all critical blocks in the MAC. */
4703         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4704             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4705                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4706                     BCE_MISC_ENABLE_DEFAULT_XI);
4707         } else {
4708                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4709         }
4710         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4711         DELAY(20);
4712
4713         bce_ifmedia_upd(ifp);
4714 }
4715
4716
4717 /****************************************************************************/
4718 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4719 /* memory visible to the controller.                                        */
4720 /*                                                                          */
4721 /* Returns:                                                                 */
4722 /*   0 for success, positive value for failure.                             */
4723 /****************************************************************************/
4724 static int
4725 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4726 {
4727         bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4728         bus_dmamap_t map, tmp_map;
4729         struct mbuf *m0 = *m_head;
4730         struct tx_bd *txbd = NULL;
4731         uint16_t vlan_tag = 0, flags = 0, mss = 0;
4732         uint16_t chain_prod, chain_prod_start, prod;
4733         uint32_t prod_bseq;
4734         int i, error, maxsegs, nsegs;
4735
4736         /* Transfer any checksum offload flags to the bd. */
4737         if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4738                 error = bce_tso_setup(txr, m_head, &flags, &mss);
4739                 if (error)
4740                         return ENOBUFS;
4741                 m0 = *m_head;
4742         } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4743                 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4744                         flags |= TX_BD_FLAGS_IP_CKSUM;
4745                 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4746                         flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4747         }
4748
4749         /* Transfer any VLAN tags to the bd. */
4750         if (m0->m_flags & M_VLANTAG) {
4751                 flags |= TX_BD_FLAGS_VLAN_TAG;
4752                 vlan_tag = m0->m_pkthdr.ether_vlantag;
4753         }
4754
4755         prod = txr->tx_prod;
4756         chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4757
4758         /* Map the mbuf into DMAable memory. */
4759         map = txr->tx_mbuf_map[chain_prod_start];
4760
4761         maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4762         KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4763                 ("not enough segments %d", maxsegs));
4764         if (maxsegs > BCE_MAX_SEGMENTS)
4765                 maxsegs = BCE_MAX_SEGMENTS;
4766
4767         /* Map the mbuf into our DMA address space. */
4768         error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4769                         segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4770         if (error)
4771                 goto back;
4772         bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4773
4774         *nsegs_used += nsegs;
4775
4776         /* Reset m0 */
4777         m0 = *m_head;
4778
4779         /* prod points to an empty tx_bd at this point. */
4780         prod_bseq  = txr->tx_prod_bseq;
4781
4782         /*
4783          * Cycle through each mbuf segment that makes up
4784          * the outgoing frame, gathering the mapping info
4785          * for that segment and creating a tx_bd to for
4786          * the mbuf.
4787          */
4788         for (i = 0; i < nsegs; i++) {
4789                 chain_prod = TX_CHAIN_IDX(txr, prod);
4790                 txbd =
4791                 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4792
4793                 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4794                 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4795                 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4796                     htole16(segs[i].ds_len);
4797                 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4798                 txbd->tx_bd_flags = htole16(flags);
4799
4800                 prod_bseq += segs[i].ds_len;
4801                 if (i == 0)
4802                         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4803                 prod = NEXT_TX_BD(prod);
4804         }
4805
4806         /* Set the END flag on the last TX buffer descriptor. */
4807         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4808
4809         /*
4810          * Ensure that the mbuf pointer for this transmission
4811          * is placed at the array index of the last
4812          * descriptor in this chain.  This is done
4813          * because a single map is used for all 
4814          * segments of the mbuf and we don't want to
4815          * unload the map before all of the segments
4816          * have been freed.
4817          */
4818         txr->tx_mbuf_ptr[chain_prod] = m0;
4819
4820         tmp_map = txr->tx_mbuf_map[chain_prod];
4821         txr->tx_mbuf_map[chain_prod] = map;
4822         txr->tx_mbuf_map[chain_prod_start] = tmp_map;
4823
4824         txr->used_tx_bd += nsegs;
4825
4826         /* prod points to the next free tx_bd at this point. */
4827         txr->tx_prod = prod;
4828         txr->tx_prod_bseq = prod_bseq;
4829 back:
4830         if (error) {
4831                 m_freem(*m_head);
4832                 *m_head = NULL;
4833         }
4834         return error;
4835 }
4836
4837
4838 static void
4839 bce_xmit(struct bce_tx_ring *txr)
4840 {
4841         /* Start the transmit. */
4842         REG_WR16(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BIDX,
4843             txr->tx_prod);
4844         REG_WR(txr->sc, MB_GET_CID_ADDR(TX_CID) + BCE_L2CTX_TX_HOST_BSEQ,
4845             txr->tx_prod_bseq);
4846 }
4847
4848
4849 /****************************************************************************/
4850 /* Main transmit routine when called from another routine with a lock.      */
4851 /*                                                                          */
4852 /* Returns:                                                                 */
4853 /*   Nothing.                                                               */
4854 /****************************************************************************/
4855 static void
4856 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4857 {
4858         struct bce_softc *sc = ifp->if_softc;
4859         struct bce_tx_ring *txr = &sc->tx_rings[0];
4860         int count = 0;
4861
4862         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
4863         ASSERT_SERIALIZED(&txr->tx_serialize);
4864
4865         /* If there's no link or the transmit queue is empty then just exit. */
4866         if (!sc->bce_link) {
4867                 ifq_purge(&ifp->if_snd);
4868                 return;
4869         }
4870
4871         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd))
4872                 return;
4873
4874         for (;;) {
4875                 struct mbuf *m_head;
4876
4877                 /*
4878                  * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4879                  * unlikely to fail.
4880                  */
4881                 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4882                         ifq_set_oactive(&ifp->if_snd);
4883                         break;
4884                 }
4885
4886                 /* Check for any frames to send. */
4887                 m_head = ifq_dequeue(&ifp->if_snd, NULL);
4888                 if (m_head == NULL)
4889                         break;
4890
4891                 /*
4892                  * Pack the data into the transmit ring. If we
4893                  * don't have room, place the mbuf back at the
4894                  * head of the queue and set the OACTIVE flag
4895                  * to wait for the NIC to drain the chain.
4896                  */
4897                 if (bce_encap(txr, &m_head, &count)) {
4898                         IFNET_STAT_INC(ifp, oerrors, 1);
4899                         if (txr->used_tx_bd == 0) {
4900                                 continue;
4901                         } else {
4902                                 ifq_set_oactive(&ifp->if_snd);
4903                                 break;
4904                         }
4905                 }
4906
4907                 if (count >= txr->tx_wreg) {
4908                         bce_xmit(txr);
4909                         count = 0;
4910                 }
4911
4912                 /* Send a copy of the frame to any BPF listeners. */
4913                 ETHER_BPF_MTAP(ifp, m_head);
4914
4915                 /* Set the tx timeout. */
4916                 ifp->if_timer = BCE_TX_TIMEOUT;
4917         }
4918         if (count > 0)
4919                 bce_xmit(txr);
4920 }
4921
4922
4923 /****************************************************************************/
4924 /* Handles any IOCTL calls from the operating system.                       */
4925 /*                                                                          */
4926 /* Returns:                                                                 */
4927 /*   0 for success, positive value for failure.                             */
4928 /****************************************************************************/
4929 static int
4930 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
4931 {
4932         struct bce_softc *sc = ifp->if_softc;
4933         struct ifreq *ifr = (struct ifreq *)data;
4934         struct mii_data *mii;
4935         int mask, error = 0;
4936
4937         ASSERT_IFNET_SERIALIZED_ALL(ifp);
4938
4939         switch(command) {
4940         case SIOCSIFMTU:
4941                 /* Check that the MTU setting is supported. */
4942                 if (ifr->ifr_mtu < BCE_MIN_MTU ||
4943 #ifdef notyet
4944                     ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
4945 #else
4946                     ifr->ifr_mtu > ETHERMTU
4947 #endif
4948                    ) {
4949                         error = EINVAL;
4950                         break;
4951                 }
4952
4953                 ifp->if_mtu = ifr->ifr_mtu;
4954                 ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
4955                 bce_init(sc);
4956                 break;
4957
4958         case SIOCSIFFLAGS:
4959                 if (ifp->if_flags & IFF_UP) {
4960                         if (ifp->if_flags & IFF_RUNNING) {
4961                                 mask = ifp->if_flags ^ sc->bce_if_flags;
4962
4963                                 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
4964                                         bce_set_rx_mode(sc);
4965                         } else {
4966                                 bce_init(sc);
4967                         }
4968                 } else if (ifp->if_flags & IFF_RUNNING) {
4969                         bce_stop(sc);
4970
4971                         /* If MFW is running, restart the controller a bit. */
4972                         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
4973                                 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4974                                 bce_chipinit(sc);
4975                                 bce_mgmt_init(sc);
4976                         }
4977                 }
4978                 sc->bce_if_flags = ifp->if_flags;
4979                 break;
4980
4981         case SIOCADDMULTI:
4982         case SIOCDELMULTI:
4983                 if (ifp->if_flags & IFF_RUNNING)
4984                         bce_set_rx_mode(sc);
4985                 break;
4986
4987         case SIOCSIFMEDIA:
4988         case SIOCGIFMEDIA:
4989                 mii = device_get_softc(sc->bce_miibus);
4990                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
4991                 break;
4992
4993         case SIOCSIFCAP:
4994                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
4995                 if (mask & IFCAP_HWCSUM) {
4996                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
4997                         if (ifp->if_capenable & IFCAP_TXCSUM)
4998                                 ifp->if_hwassist |= BCE_CSUM_FEATURES;
4999                         else
5000                                 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5001                 }
5002                 if (mask & IFCAP_TSO) {
5003                         ifp->if_capenable ^= IFCAP_TSO;
5004                         if (ifp->if_capenable & IFCAP_TSO)
5005                                 ifp->if_hwassist |= CSUM_TSO;
5006                         else
5007                                 ifp->if_hwassist &= ~CSUM_TSO;
5008                 }
5009                 break;
5010
5011         default:
5012                 error = ether_ioctl(ifp, command, data);
5013                 break;
5014         }
5015         return error;
5016 }
5017
5018
5019 /****************************************************************************/
5020 /* Transmit timeout handler.                                                */
5021 /*                                                                          */
5022 /* Returns:                                                                 */
5023 /*   Nothing.                                                               */
5024 /****************************************************************************/
5025 static void
5026 bce_watchdog(struct ifnet *ifp)
5027 {
5028         struct bce_softc *sc = ifp->if_softc;
5029
5030         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5031
5032         /*
5033          * If we are in this routine because of pause frames, then
5034          * don't reset the hardware.
5035          */
5036         if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 
5037                 return;
5038
5039         if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5040
5041         ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
5042         bce_init(sc);
5043
5044         IFNET_STAT_INC(ifp, oerrors, 1);
5045
5046         if (!ifq_is_empty(&ifp->if_snd))
5047                 if_devstart(ifp);
5048 }
5049
5050
5051 #ifdef IFPOLL_ENABLE
5052
5053 static void
5054 bce_npoll_status(struct ifnet *ifp)
5055 {
5056         struct bce_softc *sc = ifp->if_softc;
5057         struct status_block *sblk = sc->status_block;
5058         uint32_t status_attn_bits;
5059
5060         ASSERT_SERIALIZED(&sc->main_serialize);
5061
5062         status_attn_bits = sblk->status_attn_bits;
5063
5064         /* Was it a link change interrupt? */
5065         if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5066             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5067                 bce_phy_intr(sc);
5068
5069                 /*
5070                  * Clear any transient status updates during link state change.
5071                  */
5072                 REG_WR(sc, BCE_HC_COMMAND,
5073                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5074                 REG_RD(sc, BCE_HC_COMMAND);
5075         }
5076
5077         /*
5078          * If any other attention is asserted then the chip is toast.
5079          */
5080         if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5081              (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5082                 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5083                     sblk->status_attn_bits);
5084                 bce_serialize_skipmain(sc);
5085                 bce_init(sc);
5086                 bce_deserialize_skipmain(sc);
5087         }
5088 }
5089
5090 static void
5091 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5092 {
5093         struct bce_softc *sc = ifp->if_softc;
5094         struct bce_rx_ring *rxr = arg;
5095         struct status_block *sblk = sc->status_block;
5096         uint16_t hw_rx_cons;
5097
5098         ASSERT_SERIALIZED(&rxr->rx_serialize);
5099
5100         /*
5101          * Save the status block index value for use when enabling
5102          * the interrupt.
5103          */
5104         sc->last_status_idx = sblk->status_idx;
5105
5106         /* Make sure status index is extracted before RX/TX cons */
5107         cpu_lfence();
5108
5109         hw_rx_cons = bce_get_hw_rx_cons(sc);
5110
5111         /* Check for any completed RX frames. */
5112         if (hw_rx_cons != rxr->rx_cons)
5113                 bce_rx_intr(rxr, count, hw_rx_cons);
5114 }
5115
5116 static void
5117 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5118 {
5119         struct bce_softc *sc = ifp->if_softc;
5120         struct bce_tx_ring *txr = arg;
5121         uint16_t hw_tx_cons;
5122
5123         ASSERT_SERIALIZED(&txr->tx_serialize);
5124
5125         hw_tx_cons = bce_get_hw_tx_cons(sc);
5126
5127         /* Check for any completed TX frames. */
5128         if (hw_tx_cons != txr->tx_cons) {
5129                 bce_tx_intr(txr, hw_tx_cons);
5130                 if (!ifq_is_empty(&ifp->if_snd))
5131                         if_devstart(ifp);
5132         }
5133 }
5134
5135 static void
5136 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5137 {
5138         struct bce_softc *sc = ifp->if_softc;
5139         int i;
5140
5141         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5142
5143         if (info != NULL) {
5144                 info->ifpi_status.status_func = bce_npoll_status;
5145                 info->ifpi_status.serializer = &sc->main_serialize;
5146
5147                 for (i = 0; i < sc->ring_cnt; ++i) {
5148                         struct bce_tx_ring *txr = &sc->tx_rings[i];
5149                         struct bce_rx_ring *rxr = &sc->rx_rings[i];
5150                         int idx = i + sc->npoll_ofs;
5151
5152                         KKASSERT(idx < ncpus2);
5153
5154                         info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5155                         info->ifpi_tx[idx].arg = txr;
5156                         info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5157
5158                         info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5159                         info->ifpi_rx[idx].arg = rxr;
5160                         info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5161                 }
5162
5163                 if (ifp->if_flags & IFF_RUNNING) {
5164                         bce_disable_intr(sc);
5165
5166                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5167                                (1 << 16) | sc->bce_rx_quick_cons_trip);
5168                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5169                                (1 << 16) | sc->bce_tx_quick_cons_trip);
5170                 }
5171                 ifq_set_cpuid(&ifp->if_snd, sc->npoll_ofs); /* XXX */
5172         } else {
5173                 ifq_set_cpuid(&ifp->if_snd, sc->bce_intr_cpuid);
5174
5175                 if (ifp->if_flags & IFF_RUNNING) {
5176                         bce_enable_intr(sc);
5177
5178                         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
5179                                (sc->bce_tx_quick_cons_trip_int << 16) |
5180                                sc->bce_tx_quick_cons_trip);
5181                         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
5182                                (sc->bce_rx_quick_cons_trip_int << 16) |
5183                                sc->bce_rx_quick_cons_trip);
5184                 }
5185         }
5186 }
5187
5188 #endif  /* IFPOLL_ENABLE */
5189
5190
5191 /*
5192  * Interrupt handler.
5193  */
5194 /****************************************************************************/
5195 /* Main interrupt entry point.  Verifies that the controller generated the  */
5196 /* interrupt and then calls a separate routine for handle the various       */
5197 /* interrupt causes (PHY, TX, RX).                                          */
5198 /*                                                                          */
5199 /* Returns:                                                                 */
5200 /*   0 for success, positive value for failure.                             */
5201 /****************************************************************************/
5202 static void
5203 bce_intr(struct bce_softc *sc)
5204 {
5205         struct ifnet *ifp = &sc->arpcom.ac_if;
5206         struct status_block *sblk;
5207         uint16_t hw_rx_cons, hw_tx_cons;
5208         uint32_t status_attn_bits;
5209         struct bce_tx_ring *txr = &sc->tx_rings[0];
5210         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5211
5212         ASSERT_SERIALIZED(&sc->main_serialize);
5213
5214         sblk = sc->status_block;
5215
5216         /*
5217          * Save the status block index value for use during
5218          * the next interrupt.
5219          */
5220         sc->last_status_idx = sblk->status_idx;
5221
5222         /* Make sure status index is extracted before rx/tx cons */
5223         cpu_lfence();
5224
5225         /* Check if the hardware has finished any work. */
5226         hw_rx_cons = bce_get_hw_rx_cons(sc);
5227         hw_tx_cons = bce_get_hw_tx_cons(sc);
5228
5229         status_attn_bits = sblk->status_attn_bits;
5230
5231         /* Was it a link change interrupt? */
5232         if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5233             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5234                 bce_phy_intr(sc);
5235
5236                 /*
5237                  * Clear any transient status updates during link state
5238                  * change.
5239                  */
5240                 REG_WR(sc, BCE_HC_COMMAND,
5241                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5242                 REG_RD(sc, BCE_HC_COMMAND);
5243         }
5244
5245         /*
5246          * If any other attention is asserted then
5247          * the chip is toast.
5248          */
5249         if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5250             (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5251                 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5252                           sblk->status_attn_bits);
5253                 bce_serialize_skipmain(sc);
5254                 bce_init(sc);
5255                 bce_deserialize_skipmain(sc);
5256                 return;
5257         }
5258
5259         /* Check for any completed RX frames. */
5260         lwkt_serialize_enter(&rxr->rx_serialize);
5261         if (hw_rx_cons != rxr->rx_cons)
5262                 bce_rx_intr(rxr, -1, hw_rx_cons);
5263         lwkt_serialize_exit(&rxr->rx_serialize);
5264
5265         /* Check for any completed TX frames. */
5266         lwkt_serialize_enter(&txr->tx_serialize);
5267         if (hw_tx_cons != txr->tx_cons) {
5268                 bce_tx_intr(txr, hw_tx_cons);
5269                 if (!ifq_is_empty(&ifp->if_snd))
5270                         if_devstart(ifp);
5271         }
5272         lwkt_serialize_exit(&txr->tx_serialize);
5273
5274         /* Re-enable interrupts. */
5275         bce_reenable_intr(sc);
5276 }
5277
5278 static void
5279 bce_intr_legacy(void *xsc)
5280 {
5281         struct bce_softc *sc = xsc;
5282         struct status_block *sblk;
5283
5284         sblk = sc->status_block;
5285
5286         /*
5287          * If the hardware status block index matches the last value
5288          * read by the driver and we haven't asserted our interrupt
5289          * then there's nothing to do.
5290          */
5291         if (sblk->status_idx == sc->last_status_idx &&
5292             (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5293              BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5294                 return;
5295
5296         /* Ack the interrupt and stop others from occuring. */
5297         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5298                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5299                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5300
5301         /*
5302          * Read back to deassert IRQ immediately to avoid too
5303          * many spurious interrupts.
5304          */
5305         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5306
5307         bce_intr(sc);
5308 }
5309
5310 static void
5311 bce_intr_msi(void *xsc)
5312 {
5313         struct bce_softc *sc = xsc;
5314
5315         /* Ack the interrupt and stop others from occuring. */
5316         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5317                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5318                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5319
5320         bce_intr(sc);
5321 }
5322
5323 static void
5324 bce_intr_msi_oneshot(void *xsc)
5325 {
5326         bce_intr(xsc);
5327 }
5328
5329
5330 /****************************************************************************/
5331 /* Programs the various packet receive modes (broadcast and multicast).     */
5332 /*                                                                          */
5333 /* Returns:                                                                 */
5334 /*   Nothing.                                                               */
5335 /****************************************************************************/
5336 static void
5337 bce_set_rx_mode(struct bce_softc *sc)
5338 {
5339         struct ifnet *ifp = &sc->arpcom.ac_if;
5340         struct ifmultiaddr *ifma;
5341         uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5342         uint32_t rx_mode, sort_mode;
5343         int h, i;
5344
5345         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5346
5347         /* Initialize receive mode default settings. */
5348         rx_mode = sc->rx_mode &
5349                   ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5350                     BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5351         sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5352
5353         /*
5354          * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5355          * be enbled.
5356          */
5357         if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5358             !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5359                 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5360
5361         /*
5362          * Check for promiscuous, all multicast, or selected
5363          * multicast address filtering.
5364          */
5365         if (ifp->if_flags & IFF_PROMISC) {
5366                 /* Enable promiscuous mode. */
5367                 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5368                 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5369         } else if (ifp->if_flags & IFF_ALLMULTI) {
5370                 /* Enable all multicast addresses. */
5371                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5372                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5373                                0xffffffff);
5374                 }
5375                 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5376         } else {
5377                 /* Accept one or more multicast(s). */
5378                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5379                         if (ifma->ifma_addr->sa_family != AF_LINK)
5380                                 continue;
5381                         h = ether_crc32_le(
5382                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5383                             ETHER_ADDR_LEN) & 0xFF;
5384                         hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5385                 }
5386
5387                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5388                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5389                                hashes[i]);
5390                 }
5391                 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5392         }
5393
5394         /* Only make changes if the recive mode has actually changed. */
5395         if (rx_mode != sc->rx_mode) {
5396                 sc->rx_mode = rx_mode;
5397                 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5398         }
5399
5400         /* Disable and clear the exisitng sort before enabling a new sort. */
5401         REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5402         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5403         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5404 }
5405
5406
5407 /****************************************************************************/
5408 /* Called periodically to updates statistics from the controllers           */
5409 /* statistics block.                                                        */
5410 /*                                                                          */
5411 /* Returns:                                                                 */
5412 /*   Nothing.                                                               */
5413 /****************************************************************************/
5414 static void
5415 bce_stats_update(struct bce_softc *sc)
5416 {
5417         struct ifnet *ifp = &sc->arpcom.ac_if;
5418         struct statistics_block *stats = sc->stats_block;
5419
5420         ASSERT_SERIALIZED(&sc->main_serialize);
5421
5422         /* 
5423          * Certain controllers don't report carrier sense errors correctly.
5424          * See errata E11_5708CA0_1165.
5425          */
5426         if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5427             !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5428                 IFNET_STAT_INC(ifp, oerrors,
5429                         (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5430         }
5431
5432         /*
5433          * Update the sysctl statistics from the hardware statistics.
5434          */
5435         sc->stat_IfHCInOctets =
5436                 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5437                  (uint64_t)stats->stat_IfHCInOctets_lo;
5438
5439         sc->stat_IfHCInBadOctets =
5440                 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5441                  (uint64_t)stats->stat_IfHCInBadOctets_lo;
5442
5443         sc->stat_IfHCOutOctets =
5444                 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5445                  (uint64_t)stats->stat_IfHCOutOctets_lo;
5446
5447         sc->stat_IfHCOutBadOctets =
5448                 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5449                  (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5450
5451         sc->stat_IfHCInUcastPkts =
5452                 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5453                  (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5454
5455         sc->stat_IfHCInMulticastPkts =
5456                 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5457                  (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5458
5459         sc->stat_IfHCInBroadcastPkts =
5460                 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5461                  (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5462
5463         sc->stat_IfHCOutUcastPkts =
5464                 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5465                  (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5466
5467         sc->stat_IfHCOutMulticastPkts =
5468                 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5469                  (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5470
5471         sc->stat_IfHCOutBroadcastPkts =
5472                 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5473                  (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5474
5475         sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5476                 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5477
5478         sc->stat_Dot3StatsCarrierSenseErrors =
5479                 stats->stat_Dot3StatsCarrierSenseErrors;
5480
5481         sc->stat_Dot3StatsFCSErrors =
5482                 stats->stat_Dot3StatsFCSErrors;
5483
5484         sc->stat_Dot3StatsAlignmentErrors =
5485                 stats->stat_Dot3StatsAlignmentErrors;
5486
5487         sc->stat_Dot3StatsSingleCollisionFrames =
5488                 stats->stat_Dot3StatsSingleCollisionFrames;
5489
5490         sc->stat_Dot3StatsMultipleCollisionFrames =
5491                 stats->stat_Dot3StatsMultipleCollisionFrames;
5492
5493         sc->stat_Dot3StatsDeferredTransmissions =
5494                 stats->stat_Dot3StatsDeferredTransmissions;
5495
5496         sc->stat_Dot3StatsExcessiveCollisions =
5497                 stats->stat_Dot3StatsExcessiveCollisions;
5498
5499         sc->stat_Dot3StatsLateCollisions =
5500                 stats->stat_Dot3StatsLateCollisions;
5501
5502         sc->stat_EtherStatsCollisions =
5503                 stats->stat_EtherStatsCollisions;
5504
5505         sc->stat_EtherStatsFragments =
5506                 stats->stat_EtherStatsFragments;
5507
5508         sc->stat_EtherStatsJabbers =
5509                 stats->stat_EtherStatsJabbers;
5510
5511         sc->stat_EtherStatsUndersizePkts =
5512                 stats->stat_EtherStatsUndersizePkts;
5513
5514         sc->stat_EtherStatsOverrsizePkts =
5515                 stats->stat_EtherStatsOverrsizePkts;
5516
5517         sc->stat_EtherStatsPktsRx64Octets =
5518                 stats->stat_EtherStatsPktsRx64Octets;
5519
5520         sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5521                 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5522
5523         sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5524                 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5525
5526         sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5527                 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5528
5529         sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5530                 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5531
5532         sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5533                 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5534
5535         sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5536                 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5537
5538         sc->stat_EtherStatsPktsTx64Octets =
5539                 stats->stat_EtherStatsPktsTx64Octets;
5540
5541         sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5542                 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5543
5544         sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5545                 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5546
5547         sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5548                 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5549
5550         sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5551                 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5552
5553         sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5554                 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5555
5556         sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5557                 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5558
5559         sc->stat_XonPauseFramesReceived =
5560                 stats->stat_XonPauseFramesReceived;
5561
5562         sc->stat_XoffPauseFramesReceived =
5563                 stats->stat_XoffPauseFramesReceived;
5564
5565         sc->stat_OutXonSent =
5566                 stats->stat_OutXonSent;
5567
5568         sc->stat_OutXoffSent =
5569                 stats->stat_OutXoffSent;
5570
5571         sc->stat_FlowControlDone =
5572                 stats->stat_FlowControlDone;
5573
5574         sc->stat_MacControlFramesReceived =
5575                 stats->stat_MacControlFramesReceived;
5576
5577         sc->stat_XoffStateEntered =
5578                 stats->stat_XoffStateEntered;
5579
5580         sc->stat_IfInFramesL2FilterDiscards =
5581                 stats->stat_IfInFramesL2FilterDiscards;
5582
5583         sc->stat_IfInRuleCheckerDiscards =
5584                 stats->stat_IfInRuleCheckerDiscards;
5585
5586         sc->stat_IfInFTQDiscards =
5587                 stats->stat_IfInFTQDiscards;
5588
5589         sc->stat_IfInMBUFDiscards =
5590                 stats->stat_IfInMBUFDiscards;
5591
5592         sc->stat_IfInRuleCheckerP4Hit =
5593                 stats->stat_IfInRuleCheckerP4Hit;
5594
5595         sc->stat_CatchupInRuleCheckerDiscards =
5596                 stats->stat_CatchupInRuleCheckerDiscards;
5597
5598         sc->stat_CatchupInFTQDiscards =
5599                 stats->stat_CatchupInFTQDiscards;
5600
5601         sc->stat_CatchupInMBUFDiscards =
5602                 stats->stat_CatchupInMBUFDiscards;
5603
5604         sc->stat_CatchupInRuleCheckerP4Hit =
5605                 stats->stat_CatchupInRuleCheckerP4Hit;
5606
5607         sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5608
5609         /*
5610          * Update the interface statistics from the
5611          * hardware statistics.
5612          */
5613         IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5614
5615         IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5616             (u_long)sc->stat_EtherStatsOverrsizePkts +
5617             (u_long)sc->stat_IfInMBUFDiscards +
5618             (u_long)sc->stat_Dot3StatsAlignmentErrors +
5619             (u_long)sc->stat_Dot3StatsFCSErrors +
5620             (u_long)sc->stat_IfInRuleCheckerDiscards +
5621             (u_long)sc->stat_IfInFTQDiscards +
5622             (u_long)sc->com_no_buffers);
5623
5624         IFNET_STAT_SET(ifp, oerrors,
5625             (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5626             (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5627             (u_long)sc->stat_Dot3StatsLateCollisions);
5628 }
5629
5630
5631 /****************************************************************************/
5632 /* Periodic function to notify the bootcode that the driver is still        */
5633 /* present.                                                                 */
5634 /*                                                                          */
5635 /* Returns:                                                                 */
5636 /*   Nothing.                                                               */
5637 /****************************************************************************/
5638 static void
5639 bce_pulse(void *xsc)
5640 {
5641         struct bce_softc *sc = xsc;
5642         struct ifnet *ifp = &sc->arpcom.ac_if;
5643         uint32_t msg;
5644
5645         lwkt_serialize_enter(&sc->main_serialize);
5646
5647         /* Tell the firmware that the driver is still running. */
5648         msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5649         bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5650
5651         /* Update the bootcode condition. */
5652         sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5653
5654         /* Report whether the bootcode still knows the driver is running. */
5655         if (!sc->bce_drv_cardiac_arrest) {
5656                 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5657                         sc->bce_drv_cardiac_arrest = 1;
5658                         if_printf(ifp, "Bootcode lost the driver pulse! "
5659                             "(bc_state = 0x%08X)\n", sc->bc_state);
5660                 }
5661         } else {
5662                 /*
5663                  * Not supported by all bootcode versions.
5664                  * (v5.0.11+ and v5.2.1+)  Older bootcode
5665                  * will require the driver to reset the
5666                  * controller to clear this condition.
5667                  */
5668                 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5669                         sc->bce_drv_cardiac_arrest = 0;
5670                         if_printf(ifp, "Bootcode found the driver pulse! "
5671                             "(bc_state = 0x%08X)\n", sc->bc_state);
5672                 }
5673         }
5674
5675         /* Schedule the next pulse. */
5676         callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5677             sc->bce_intr_cpuid);
5678
5679         lwkt_serialize_exit(&sc->main_serialize);
5680 }
5681
5682
5683 /****************************************************************************/
5684 /* Periodic function to check whether MSI is lost                           */
5685 /*                                                                          */
5686 /* Returns:                                                                 */
5687 /*   Nothing.                                                               */
5688 /****************************************************************************/
5689 static void
5690 bce_check_msi(void *xsc)
5691 {
5692         struct bce_softc *sc = xsc;
5693         struct ifnet *ifp = &sc->arpcom.ac_if;
5694         struct status_block *sblk = sc->status_block;
5695         struct bce_tx_ring *txr = &sc->tx_rings[0];
5696         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5697
5698         lwkt_serialize_enter(&sc->main_serialize);
5699
5700         KKASSERT(mycpuid == sc->bce_intr_cpuid);
5701
5702         if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5703                 lwkt_serialize_exit(&sc->main_serialize);
5704                 return;
5705         }
5706
5707         if (bce_get_hw_rx_cons(sc) != rxr->rx_cons ||
5708             bce_get_hw_tx_cons(sc) != txr->tx_cons ||
5709             (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5710             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5711                 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5712                     sc->bce_check_tx_cons == txr->tx_cons &&
5713                     sc->bce_check_status_idx == sc->last_status_idx) {
5714                         uint32_t msi_ctrl;
5715
5716                         if (!sc->bce_msi_maylose) {
5717                                 sc->bce_msi_maylose = TRUE;
5718                                 goto done;
5719                         }
5720
5721                         msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5722                         if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5723                                 if (bootverbose)
5724                                         if_printf(ifp, "lost MSI\n");
5725
5726                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5727                                     msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5728                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5729
5730                                 bce_intr_msi(sc);
5731                         } else if (bootverbose) {
5732                                 if_printf(ifp, "MSI may be lost\n");
5733                         }
5734                 }
5735         }
5736         sc->bce_msi_maylose = FALSE;
5737         sc->bce_check_rx_cons = rxr->rx_cons;
5738         sc->bce_check_tx_cons = txr->tx_cons;
5739         sc->bce_check_status_idx = sc->last_status_idx;
5740
5741 done:
5742         callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5743             bce_check_msi, sc);
5744         lwkt_serialize_exit(&sc->main_serialize);
5745 }
5746
5747
5748 /****************************************************************************/
5749 /* Periodic function to perform maintenance tasks.                          */
5750 /*                                                                          */
5751 /* Returns:                                                                 */
5752 /*   Nothing.                                                               */
5753 /****************************************************************************/
5754 static void
5755 bce_tick_serialized(struct bce_softc *sc)
5756 {
5757         struct ifnet *ifp = &sc->arpcom.ac_if;
5758         struct mii_data *mii;
5759
5760         ASSERT_SERIALIZED(&sc->main_serialize);
5761
5762         /* Update the statistics from the hardware statistics block. */
5763         bce_stats_update(sc);
5764
5765         /* Schedule the next tick. */
5766         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5767             sc->bce_intr_cpuid);
5768
5769         /* If link is up already up then we're done. */
5770         if (sc->bce_link)
5771                 return;
5772
5773         mii = device_get_softc(sc->bce_miibus);
5774         mii_tick(mii);
5775
5776         /* Check if the link has come up. */
5777         if ((mii->mii_media_status & IFM_ACTIVE) &&
5778             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5779                 sc->bce_link++;
5780                 /* Now that link is up, handle any outstanding TX traffic. */
5781                 if_devstart_sched(ifp);
5782         }
5783 }
5784
5785
5786 static void
5787 bce_tick(void *xsc)
5788 {
5789         struct bce_softc *sc = xsc;
5790
5791         lwkt_serialize_enter(&sc->main_serialize);
5792         bce_tick_serialized(sc);
5793         lwkt_serialize_exit(&sc->main_serialize);
5794 }
5795
5796
5797 /****************************************************************************/
5798 /* Adds any sysctl parameters for tuning or debugging purposes.             */
5799 /*                                                                          */
5800 /* Returns:                                                                 */
5801 /*   0 for success, positive value for failure.                             */
5802 /****************************************************************************/
5803 static void
5804 bce_add_sysctls(struct bce_softc *sc)
5805 {
5806         struct sysctl_ctx_list *ctx;
5807         struct sysctl_oid_list *children;
5808
5809         sysctl_ctx_init(&sc->bce_sysctl_ctx);
5810         sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
5811                                               SYSCTL_STATIC_CHILDREN(_hw),
5812                                               OID_AUTO,
5813                                               device_get_nameunit(sc->bce_dev),
5814                                               CTLFLAG_RD, 0, "");
5815         if (sc->bce_sysctl_tree == NULL) {
5816                 device_printf(sc->bce_dev, "can't add sysctl node\n");
5817                 return;
5818         }
5819
5820         ctx = &sc->bce_sysctl_ctx;
5821         children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
5822
5823         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
5824                         CTLTYPE_INT | CTLFLAG_RW,
5825                         sc, 0, bce_sysctl_tx_bds_int, "I",
5826                         "Send max coalesced BD count during interrupt");
5827         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
5828                         CTLTYPE_INT | CTLFLAG_RW,
5829                         sc, 0, bce_sysctl_tx_bds, "I",
5830                         "Send max coalesced BD count");
5831         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
5832                         CTLTYPE_INT | CTLFLAG_RW,
5833                         sc, 0, bce_sysctl_tx_ticks_int, "I",
5834                         "Send coalescing ticks during interrupt");
5835         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
5836                         CTLTYPE_INT | CTLFLAG_RW,
5837                         sc, 0, bce_sysctl_tx_ticks, "I",
5838                         "Send coalescing ticks");
5839
5840         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
5841                         CTLTYPE_INT | CTLFLAG_RW,
5842                         sc, 0, bce_sysctl_rx_bds_int, "I",
5843                         "Receive max coalesced BD count during interrupt");
5844         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
5845                         CTLTYPE_INT | CTLFLAG_RW,
5846                         sc, 0, bce_sysctl_rx_bds, "I",
5847                         "Receive max coalesced BD count");
5848         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
5849                         CTLTYPE_INT | CTLFLAG_RW,
5850                         sc, 0, bce_sysctl_rx_ticks_int, "I",
5851                         "Receive coalescing ticks during interrupt");
5852         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
5853                         CTLTYPE_INT | CTLFLAG_RW,
5854                         sc, 0, bce_sysctl_rx_ticks, "I",
5855                         "Receive coalescing ticks");
5856
5857         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
5858                 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
5859         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
5860                 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
5861
5862         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
5863                 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
5864                 "# segments before write to hardware registers");
5865
5866 #ifdef IFPOLL_ENABLE
5867         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
5868             CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
5869             "I", "NPOLLING cpu offset");
5870 #endif
5871
5872         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5873                 "stat_IfHCInOctets",
5874                 CTLFLAG_RD, &sc->stat_IfHCInOctets,
5875                 "Bytes received");
5876
5877         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5878                 "stat_IfHCInBadOctets",
5879                 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
5880                 "Bad bytes received");
5881
5882         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5883                 "stat_IfHCOutOctets",
5884                 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
5885                 "Bytes sent");
5886
5887         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5888                 "stat_IfHCOutBadOctets",
5889                 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
5890                 "Bad bytes sent");
5891
5892         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5893                 "stat_IfHCInUcastPkts",
5894                 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
5895                 "Unicast packets received");
5896
5897         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5898                 "stat_IfHCInMulticastPkts",
5899                 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
5900                 "Multicast packets received");
5901
5902         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5903                 "stat_IfHCInBroadcastPkts",
5904                 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
5905                 "Broadcast packets received");
5906
5907         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5908                 "stat_IfHCOutUcastPkts",
5909                 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
5910                 "Unicast packets sent");
5911
5912         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5913                 "stat_IfHCOutMulticastPkts",
5914                 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
5915                 "Multicast packets sent");
5916
5917         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
5918                 "stat_IfHCOutBroadcastPkts",
5919                 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
5920                 "Broadcast packets sent");
5921
5922         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5923                 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
5924                 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
5925                 0, "Internal MAC transmit errors");
5926
5927         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5928                 "stat_Dot3StatsCarrierSenseErrors",
5929                 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
5930                 0, "Carrier sense errors");
5931
5932         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5933                 "stat_Dot3StatsFCSErrors",
5934                 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
5935                 0, "Frame check sequence errors");
5936
5937         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5938                 "stat_Dot3StatsAlignmentErrors",
5939                 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
5940                 0, "Alignment errors");
5941
5942         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5943                 "stat_Dot3StatsSingleCollisionFrames",
5944                 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
5945                 0, "Single Collision Frames");
5946
5947         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5948                 "stat_Dot3StatsMultipleCollisionFrames",
5949                 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
5950                 0, "Multiple Collision Frames");
5951
5952         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5953                 "stat_Dot3StatsDeferredTransmissions",
5954                 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
5955                 0, "Deferred Transmissions");
5956
5957         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5958                 "stat_Dot3StatsExcessiveCollisions",
5959                 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
5960                 0, "Excessive Collisions");
5961
5962         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5963                 "stat_Dot3StatsLateCollisions",
5964                 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
5965                 0, "Late Collisions");
5966
5967         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5968                 "stat_EtherStatsCollisions",
5969                 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
5970                 0, "Collisions");
5971
5972         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5973                 "stat_EtherStatsFragments",
5974                 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
5975                 0, "Fragments");
5976
5977         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5978                 "stat_EtherStatsJabbers",
5979                 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
5980                 0, "Jabbers");
5981
5982         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5983                 "stat_EtherStatsUndersizePkts",
5984                 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
5985                 0, "Undersize packets");
5986
5987         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5988                 "stat_EtherStatsOverrsizePkts",
5989                 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
5990                 0, "stat_EtherStatsOverrsizePkts");
5991
5992         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5993                 "stat_EtherStatsPktsRx64Octets",
5994                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
5995                 0, "Bytes received in 64 byte packets");
5996
5997         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
5998                 "stat_EtherStatsPktsRx65Octetsto127Octets",
5999                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6000                 0, "Bytes received in 65 to 127 byte packets");
6001
6002         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6003                 "stat_EtherStatsPktsRx128Octetsto255Octets",
6004                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6005                 0, "Bytes received in 128 to 255 byte packets");
6006
6007         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6008                 "stat_EtherStatsPktsRx256Octetsto511Octets",
6009                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6010                 0, "Bytes received in 256 to 511 byte packets");
6011
6012         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6013                 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6014                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6015                 0, "Bytes received in 512 to 1023 byte packets");
6016
6017         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6018                 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6019                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6020                 0, "Bytes received in 1024 t0 1522 byte packets");
6021
6022         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6023                 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6024                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6025                 0, "Bytes received in 1523 to 9022 byte packets");
6026
6027         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6028                 "stat_EtherStatsPktsTx64Octets",
6029                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6030                 0, "Bytes sent in 64 byte packets");
6031
6032         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6033                 "stat_EtherStatsPktsTx65Octetsto127Octets",
6034                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6035                 0, "Bytes sent in 65 to 127 byte packets");
6036
6037         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6038                 "stat_EtherStatsPktsTx128Octetsto255Octets",
6039                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6040                 0, "Bytes sent in 128 to 255 byte packets");
6041
6042         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6043                 "stat_EtherStatsPktsTx256Octetsto511Octets",
6044                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6045                 0, "Bytes sent in 256 to 511 byte packets");
6046
6047         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6048                 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6049                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6050                 0, "Bytes sent in 512 to 1023 byte packets");
6051
6052         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6053                 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6054                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6055                 0, "Bytes sent in 1024 to 1522 byte packets");
6056
6057         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6058                 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6059                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6060                 0, "Bytes sent in 1523 to 9022 byte packets");
6061
6062         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6063                 "stat_XonPauseFramesReceived",
6064                 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6065                 0, "XON pause frames receved");
6066
6067         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6068                 "stat_XoffPauseFramesReceived",
6069                 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6070                 0, "XOFF pause frames received");
6071
6072         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6073                 "stat_OutXonSent",
6074                 CTLFLAG_RD, &sc->stat_OutXonSent,
6075                 0, "XON pause frames sent");
6076
6077         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6078                 "stat_OutXoffSent",
6079                 CTLFLAG_RD, &sc->stat_OutXoffSent,
6080                 0, "XOFF pause frames sent");
6081
6082         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6083                 "stat_FlowControlDone",
6084                 CTLFLAG_RD, &sc->stat_FlowControlDone,
6085                 0, "Flow control done");
6086
6087         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6088                 "stat_MacControlFramesReceived",
6089                 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6090                 0, "MAC control frames received");
6091
6092         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6093                 "stat_XoffStateEntered",
6094                 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6095                 0, "XOFF state entered");
6096
6097         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6098                 "stat_IfInFramesL2FilterDiscards",
6099                 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6100                 0, "Received L2 packets discarded");
6101
6102         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6103                 "stat_IfInRuleCheckerDiscards",
6104                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6105                 0, "Received packets discarded by rule");
6106
6107         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6108                 "stat_IfInFTQDiscards",
6109                 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6110                 0, "Received packet FTQ discards");
6111
6112         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6113                 "stat_IfInMBUFDiscards",
6114                 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6115                 0, "Received packets discarded due to lack of controller buffer memory");
6116
6117         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6118                 "stat_IfInRuleCheckerP4Hit",
6119                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6120                 0, "Received packets rule checker hits");
6121
6122         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6123                 "stat_CatchupInRuleCheckerDiscards",
6124                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6125                 0, "Received packets discarded in Catchup path");
6126
6127         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6128                 "stat_CatchupInFTQDiscards",
6129                 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6130                 0, "Received packets discarded in FTQ in Catchup path");
6131
6132         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6133                 "stat_CatchupInMBUFDiscards",
6134                 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6135                 0, "Received packets discarded in controller buffer memory in Catchup path");
6136
6137         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6138                 "stat_CatchupInRuleCheckerP4Hit",
6139                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6140                 0, "Received packets rule checker hits in Catchup path");
6141
6142         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6143                 "com_no_buffers",
6144                 CTLFLAG_RD, &sc->com_no_buffers,
6145                 0, "Valid packets received but no RX buffers available");
6146 }
6147
6148 static int
6149 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6150 {
6151         struct bce_softc *sc = arg1;
6152
6153         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6154                         &sc->bce_tx_quick_cons_trip_int,
6155                         BCE_COALMASK_TX_BDS_INT);
6156 }
6157
6158 static int
6159 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6160 {
6161         struct bce_softc *sc = arg1;
6162
6163         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6164                         &sc->bce_tx_quick_cons_trip,
6165                         BCE_COALMASK_TX_BDS);
6166 }
6167
6168 static int
6169 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6170 {
6171         struct bce_softc *sc = arg1;
6172
6173         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6174                         &sc->bce_tx_ticks_int,
6175                         BCE_COALMASK_TX_TICKS_INT);
6176 }
6177
6178 static int
6179 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6180 {
6181         struct bce_softc *sc = arg1;
6182
6183         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6184                         &sc->bce_tx_ticks,
6185                         BCE_COALMASK_TX_TICKS);
6186 }
6187
6188 static int
6189 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6190 {
6191         struct bce_softc *sc = arg1;
6192
6193         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6194                         &sc->bce_rx_quick_cons_trip_int,
6195                         BCE_COALMASK_RX_BDS_INT);
6196 }
6197
6198 static int
6199 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6200 {
6201         struct bce_softc *sc = arg1;
6202
6203         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6204                         &sc->bce_rx_quick_cons_trip,
6205                         BCE_COALMASK_RX_BDS);
6206 }
6207
6208 static int
6209 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6210 {
6211         struct bce_softc *sc = arg1;
6212
6213         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6214                         &sc->bce_rx_ticks_int,
6215                         BCE_COALMASK_RX_TICKS_INT);
6216 }
6217
6218 static int
6219 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6220 {
6221         struct bce_softc *sc = arg1;
6222
6223         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6224                         &sc->bce_rx_ticks,
6225                         BCE_COALMASK_RX_TICKS);
6226 }
6227
6228 static int
6229 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6230                        uint32_t coalchg_mask)
6231 {
6232         struct bce_softc *sc = arg1;
6233         struct ifnet *ifp = &sc->arpcom.ac_if;
6234         int error = 0, v;
6235
6236         ifnet_serialize_all(ifp);
6237
6238         v = *coal;
6239         error = sysctl_handle_int(oidp, &v, 0, req);
6240         if (!error && req->newptr != NULL) {
6241                 if (v < 0) {
6242                         error = EINVAL;
6243                 } else {
6244                         *coal = v;
6245                         sc->bce_coalchg_mask |= coalchg_mask;
6246
6247                         /* Commit changes */
6248                         bce_coal_change(sc);
6249                 }
6250         }
6251
6252         ifnet_deserialize_all(ifp);
6253         return error;
6254 }
6255
6256 static void
6257 bce_coal_change(struct bce_softc *sc)
6258 {
6259         struct ifnet *ifp = &sc->arpcom.ac_if;
6260
6261         ASSERT_SERIALIZED(&sc->main_serialize);
6262
6263         if ((ifp->if_flags & IFF_RUNNING) == 0) {
6264                 sc->bce_coalchg_mask = 0;
6265                 return;
6266         }
6267
6268         if (sc->bce_coalchg_mask &
6269             (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6270                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6271                        (sc->bce_tx_quick_cons_trip_int << 16) |
6272                        sc->bce_tx_quick_cons_trip);
6273                 if (bootverbose) {
6274                         if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6275                                   sc->bce_tx_quick_cons_trip,
6276                                   sc->bce_tx_quick_cons_trip_int);
6277                 }
6278         }
6279
6280         if (sc->bce_coalchg_mask &
6281             (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6282                 REG_WR(sc, BCE_HC_TX_TICKS,
6283                        (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6284                 if (bootverbose) {
6285                         if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6286                                   sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6287                 }
6288         }
6289
6290         if (sc->bce_coalchg_mask &
6291             (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6292                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6293                        (sc->bce_rx_quick_cons_trip_int << 16) |
6294                        sc->bce_rx_quick_cons_trip);
6295                 if (bootverbose) {
6296                         if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6297                                   sc->bce_rx_quick_cons_trip,
6298                                   sc->bce_rx_quick_cons_trip_int);
6299                 }
6300         }
6301
6302         if (sc->bce_coalchg_mask &
6303             (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6304                 REG_WR(sc, BCE_HC_RX_TICKS,
6305                        (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6306                 if (bootverbose) {
6307                         if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6308                                   sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6309                 }
6310         }
6311
6312         sc->bce_coalchg_mask = 0;
6313 }
6314
6315 static int
6316 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6317     uint16_t *flags0, uint16_t *mss0)
6318 {
6319         struct mbuf *m;
6320         uint16_t flags;
6321         int thoff, iphlen, hoff;
6322
6323         m = *mp;
6324         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6325
6326         hoff = m->m_pkthdr.csum_lhlen;
6327         iphlen = m->m_pkthdr.csum_iphlen;
6328         thoff = m->m_pkthdr.csum_thlen;
6329
6330         KASSERT(hoff >= sizeof(struct ether_header),
6331             ("invalid ether header len %d", hoff));
6332         KASSERT(iphlen >= sizeof(struct ip),
6333             ("invalid ip header len %d", iphlen));
6334         KASSERT(thoff >= sizeof(struct tcphdr),
6335             ("invalid tcp header len %d", thoff));
6336
6337         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6338                 m = m_pullup(m, hoff + iphlen + thoff);
6339                 if (m == NULL) {
6340                         *mp = NULL;
6341                         return ENOBUFS;
6342                 }
6343                 *mp = m;
6344         }
6345
6346         /* Set the LSO flag in the TX BD */
6347         flags = TX_BD_FLAGS_SW_LSO;
6348
6349         /* Set the length of IP + TCP options (in 32 bit words) */
6350         flags |= (((iphlen + thoff -
6351             sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6352
6353         *mss0 = htole16(m->m_pkthdr.tso_segsz);
6354         *flags0 = flags;
6355
6356         return 0;
6357 }
6358
6359 static void
6360 bce_setup_serialize(struct bce_softc *sc)
6361 {
6362         int i, j;
6363
6364         /*
6365          * Allocate serializer array
6366          */
6367
6368         /* Main + TX + RX */
6369         sc->serialize_cnt = 1 + sc->ring_cnt + sc->ring_cnt;
6370
6371         sc->serializes =
6372             kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6373                 M_DEVBUF, M_WAITOK | M_ZERO);
6374
6375         /*
6376          * Setup serializers
6377          *
6378          * NOTE: Order is critical
6379          */
6380
6381         i = 0;
6382         KKASSERT(i < sc->serialize_cnt);
6383         sc->serializes[i++] = &sc->main_serialize;
6384
6385         sc->tx_serialize = i;
6386         for (j = 0; j < sc->ring_cnt; ++j) {
6387                 KKASSERT(i < sc->serialize_cnt);
6388                 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6389         }
6390
6391         sc->rx_serialize = i;
6392         for (j = 0; j < sc->ring_cnt; ++j) {
6393                 KKASSERT(i < sc->serialize_cnt);
6394                 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6395         }
6396
6397         KKASSERT(i == sc->serialize_cnt);
6398 }
6399
6400 static void
6401 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6402 {
6403         struct bce_softc *sc = ifp->if_softc;
6404
6405         ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
6406             sc->tx_serialize, sc->rx_serialize, slz);
6407 }
6408
6409 static void
6410 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6411 {
6412         struct bce_softc *sc = ifp->if_softc;
6413
6414         ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
6415             sc->tx_serialize, sc->rx_serialize, slz);
6416 }
6417
6418 static int
6419 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6420 {
6421         struct bce_softc *sc = ifp->if_softc;
6422
6423         return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6424             sc->tx_serialize, sc->rx_serialize, slz);
6425 }
6426
6427 #ifdef INVARIANTS
6428
6429 static void
6430 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6431     boolean_t serialized)
6432 {
6433         struct bce_softc *sc = ifp->if_softc;
6434
6435         ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6436             sc->tx_serialize, sc->rx_serialize, slz, serialized);
6437 }
6438
6439 #endif  /* INVARIANTS */
6440
6441 static void
6442 bce_serialize_skipmain(struct bce_softc *sc)
6443 {
6444         lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6445 }
6446
6447 static void
6448 bce_deserialize_skipmain(struct bce_softc *sc)
6449 {
6450         lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6451 }
6452
6453 #ifdef IFPOLL_ENABLE
6454
6455 static int
6456 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6457 {
6458         struct bce_softc *sc = (void *)arg1;
6459         struct ifnet *ifp = &sc->arpcom.ac_if;
6460         int error, off;
6461
6462         off = sc->npoll_ofs;
6463         error = sysctl_handle_int(oidp, &off, 0, req);
6464         if (error || req->newptr == NULL)
6465                 return error;
6466         if (off < 0)
6467                 return EINVAL;
6468
6469         ifnet_serialize_all(ifp);
6470         if (off >= ncpus2 || off % sc->ring_cnt != 0) {
6471                 error = EINVAL;
6472         } else {
6473                 error = 0;
6474                 sc->npoll_ofs = off;
6475         }
6476         ifnet_deserialize_all(ifp);
6477
6478         return error;
6479 }
6480
6481 #endif  /* IFPOLL_ENABLE */