bce: Strip extra blank lines
[dragonfly.git] / sys / dev / netif / bce / if_bce.c
1 /*-
2  * Copyright (c) 2006-2007 Broadcom Corporation
3  *      David Christensen <davidch@broadcom.com>.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of Broadcom Corporation nor the name of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written consent.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * $FreeBSD: src/sys/dev/bce/if_bce.c,v 1.31 2007/05/16 23:34:11 davidch Exp $
31  */
32
33 /*
34  * The following controllers are supported by this driver:
35  *   BCM5706C A2, A3
36  *   BCM5706S A2, A3
37  *   BCM5708C B1, B2
38  *   BCM5708S B1, B2
39  *   BCM5709C A1, B2, C0
40  *   BCM5716  C0
41  *
42  * The following controllers are not supported by this driver:
43  *   BCM5706C A0, A1
44  *   BCM5706S A0, A1
45  *   BCM5708C A0, B0
46  *   BCM5708S A0, B0
47  *   BCM5709C A0, B0, B1
48  *   BCM5709S A0, A1, B0, B1, B2, C0
49  *
50  *
51  * Note about MSI-X on 5709/5716:
52  * - 9 MSI-X vectors are supported.
53  * - MSI-X vectors, RX/TX rings and status blocks' association
54  *   are fixed:
55  *   o  The first RX ring and the first TX ring use the first
56  *      status block.
57  *   o  The first MSI-X vector is associated with the first
58  *      status block.
59  *   o  The second RX ring and the second TX ring use the second
60  *      status block.
61  *   o  The second MSI-X vector is associated with the second
62  *      status block.
63  *   ...
64  *   and so on so forth.
65  * - Status blocks must reside in physically contiguous memory
66  *   and each status block consumes 128bytes.  In addition to
67  *   this, the memory for the status blocks is aligned on 128bytes
68  *   in this driver.  (see bce_dma_alloc() and HC_CONFIG)
69  * - Each status block has its own coalesce parameters, which also
70  *   serve as the related MSI-X vector's interrupt moderation
71  *   parameters.  (see bce_coal_change())
72  */
73
74 #include "opt_bce.h"
75 #include "opt_ifpoll.h"
76
77 #include <sys/param.h>
78 #include <sys/bus.h>
79 #include <sys/endian.h>
80 #include <sys/kernel.h>
81 #include <sys/interrupt.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/queue.h>
85 #include <sys/rman.h>
86 #include <sys/serialize.h>
87 #include <sys/socket.h>
88 #include <sys/sockio.h>
89 #include <sys/sysctl.h>
90
91 #include <netinet/ip.h>
92 #include <netinet/tcp.h>
93
94 #include <net/bpf.h>
95 #include <net/ethernet.h>
96 #include <net/if.h>
97 #include <net/if_arp.h>
98 #include <net/if_dl.h>
99 #include <net/if_media.h>
100 #include <net/if_poll.h>
101 #include <net/if_types.h>
102 #include <net/ifq_var.h>
103 #include <net/toeplitz.h>
104 #include <net/toeplitz2.h>
105 #include <net/vlan/if_vlan_var.h>
106 #include <net/vlan/if_vlan_ether.h>
107
108 #include <dev/netif/mii_layer/mii.h>
109 #include <dev/netif/mii_layer/miivar.h>
110 #include <dev/netif/mii_layer/brgphyreg.h>
111
112 #include <bus/pci/pcireg.h>
113 #include <bus/pci/pcivar.h>
114
115 #include "miibus_if.h"
116
117 #include <dev/netif/bce/if_bcereg.h>
118 #include <dev/netif/bce/if_bcefw.h>
119
120 #define BCE_MSI_CKINTVL         ((10 * hz) / 1000)      /* 10ms */
121
122 #ifdef BCE_RSS_DEBUG
123 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...) \
124 do { \
125         if (sc->rss_debug >= lvl) \
126                 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \
127 } while (0)
128 #else   /* !BCE_RSS_DEBUG */
129 #define BCE_RSS_DPRINTF(sc, lvl, fmt, ...)      ((void)0)
130 #endif  /* BCE_RSS_DEBUG */
131
132 /****************************************************************************/
133 /* PCI Device ID Table                                                      */
134 /*                                                                          */
135 /* Used by bce_probe() to identify the devices supported by this driver.    */
136 /****************************************************************************/
137 #define BCE_DEVDESC_MAX         64
138
139 static struct bce_type bce_devs[] = {
140         /* BCM5706C Controllers and OEM boards. */
141         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3101,
142                 "HP NC370T Multifunction Gigabit Server Adapter" },
143         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3106,
144                 "HP NC370i Multifunction Gigabit Server Adapter" },
145         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x3070,
146                 "HP NC380T PCIe DP Multifunc Gig Server Adapter" },
147         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  HP_VENDORID, 0x1709,
148                 "HP NC371i Multifunction Gigabit Server Adapter" },
149         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706,  PCI_ANY_ID,  PCI_ANY_ID,
150                 "Broadcom NetXtreme II BCM5706 1000Base-T" },
151
152         /* BCM5706S controllers and OEM boards. */
153         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, HP_VENDORID, 0x3102,
154                 "HP NC370F Multifunction Gigabit Server Adapter" },
155         { BRCM_VENDORID, BRCM_DEVICEID_BCM5706S, PCI_ANY_ID,  PCI_ANY_ID,
156                 "Broadcom NetXtreme II BCM5706 1000Base-SX" },
157
158         /* BCM5708C controllers and OEM boards. */
159         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7037,
160                 "HP NC373T PCIe Multifunction Gig Server Adapter" },
161         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7038,
162                 "HP NC373i Multifunction Gigabit Server Adapter" },
163         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  HP_VENDORID, 0x7045,
164                 "HP NC374m PCIe Multifunction Adapter" },
165         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708,  PCI_ANY_ID,  PCI_ANY_ID,
166                 "Broadcom NetXtreme II BCM5708 1000Base-T" },
167
168         /* BCM5708S controllers and OEM boards. */
169         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x1706,
170                 "HP NC373m Multifunction Gigabit Server Adapter" },
171         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703b,
172                 "HP NC373i Multifunction Gigabit Server Adapter" },
173         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  HP_VENDORID, 0x703d,
174                 "HP NC373F PCIe Multifunc Giga Server Adapter" },
175         { BRCM_VENDORID, BRCM_DEVICEID_BCM5708S,  PCI_ANY_ID,  PCI_ANY_ID,
176                 "Broadcom NetXtreme II BCM5708S 1000Base-T" },
177
178         /* BCM5709C controllers and OEM boards. */
179         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7055,
180                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
181         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  HP_VENDORID, 0x7059,
182                 "HP NC382T PCIe DP Multifunction Gigabit Server Adapter" },
183         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709,  PCI_ANY_ID,  PCI_ANY_ID,
184                 "Broadcom NetXtreme II BCM5709 1000Base-T" },
185
186         /* BCM5709S controllers and OEM boards. */
187         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x171d,
188                 "HP NC382m DP 1GbE Multifunction BL-c Adapter" },
189         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  HP_VENDORID, 0x7056,
190                 "HP NC382i DP Multifunction Gigabit Server Adapter" },
191         { BRCM_VENDORID, BRCM_DEVICEID_BCM5709S,  PCI_ANY_ID,  PCI_ANY_ID,
192                 "Broadcom NetXtreme II BCM5709 1000Base-SX" },
193
194         /* BCM5716 controllers and OEM boards. */
195         { BRCM_VENDORID, BRCM_DEVICEID_BCM5716,   PCI_ANY_ID,  PCI_ANY_ID,
196                 "Broadcom NetXtreme II BCM5716 1000Base-T" },
197
198         { 0, 0, 0, 0, NULL }
199 };
200
201 /****************************************************************************/
202 /* Supported Flash NVRAM device data.                                       */
203 /****************************************************************************/
204 static const struct flash_spec flash_table[] =
205 {
206 #define BUFFERED_FLAGS          (BCE_NV_BUFFERED | BCE_NV_TRANSLATE)
207 #define NONBUFFERED_FLAGS       (BCE_NV_WREN)
208
209         /* Slow EEPROM */
210         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
211          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
212          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
213          "EEPROM - slow"},
214         /* Expansion entry 0001 */
215         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
216          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
217          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218          "Entry 0001"},
219         /* Saifun SA25F010 (non-buffered flash) */
220         /* strap, cfg1, & write1 need updates */
221         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
222          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
223          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
224          "Non-buffered flash (128kB)"},
225         /* Saifun SA25F020 (non-buffered flash) */
226         /* strap, cfg1, & write1 need updates */
227         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
228          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
229          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
230          "Non-buffered flash (256kB)"},
231         /* Expansion entry 0100 */
232         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
233          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
234          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
235          "Entry 0100"},
236         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
237         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
238          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
239          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
240          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
241         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
242         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
243          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
244          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
245          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
246         /* Saifun SA25F005 (non-buffered flash) */
247         /* strap, cfg1, & write1 need updates */
248         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
249          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
250          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
251          "Non-buffered flash (64kB)"},
252         /* Fast EEPROM */
253         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
254          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
255          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
256          "EEPROM - fast"},
257         /* Expansion entry 1001 */
258         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
259          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
260          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
261          "Entry 1001"},
262         /* Expansion entry 1010 */
263         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
264          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
265          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
266          "Entry 1010"},
267         /* ATMEL AT45DB011B (buffered flash) */
268         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
269          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
270          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
271          "Buffered flash (128kB)"},
272         /* Expansion entry 1100 */
273         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
274          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
275          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
276          "Entry 1100"},
277         /* Expansion entry 1101 */
278         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
279          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
280          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
281          "Entry 1101"},
282         /* Ateml Expansion entry 1110 */
283         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
284          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
285          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
286          "Entry 1110 (Atmel)"},
287         /* ATMEL AT45DB021B (buffered flash) */
288         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
289          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
290          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
291          "Buffered flash (256kB)"},
292 };
293
294 /*
295  * The BCM5709 controllers transparently handle the
296  * differences between Atmel 264 byte pages and all
297  * flash devices which use 256 byte pages, so no
298  * logical-to-physical mapping is required in the
299  * driver.
300  */
301 static struct flash_spec flash_5709 = {
302         .flags          = BCE_NV_BUFFERED,
303         .page_bits      = BCM5709_FLASH_PAGE_BITS,
304         .page_size      = BCM5709_FLASH_PAGE_SIZE,
305         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
306         .total_size     = BUFFERED_FLASH_TOTAL_SIZE * 2,
307         .name           = "5709/5716 buffered flash (256kB)",
308 };
309
310 /****************************************************************************/
311 /* DragonFly device entry points.                                           */
312 /****************************************************************************/
313 static int      bce_probe(device_t);
314 static int      bce_attach(device_t);
315 static int      bce_detach(device_t);
316 static void     bce_shutdown(device_t);
317 static int      bce_miibus_read_reg(device_t, int, int);
318 static int      bce_miibus_write_reg(device_t, int, int, int);
319 static void     bce_miibus_statchg(device_t);
320
321 /****************************************************************************/
322 /* BCE Register/Memory Access Routines                                      */
323 /****************************************************************************/
324 static uint32_t bce_reg_rd_ind(struct bce_softc *, uint32_t);
325 static void     bce_reg_wr_ind(struct bce_softc *, uint32_t, uint32_t);
326 static void     bce_shmem_wr(struct bce_softc *, uint32_t, uint32_t);
327 static uint32_t bce_shmem_rd(struct bce_softc *, u32);
328 static void     bce_ctx_wr(struct bce_softc *, uint32_t, uint32_t, uint32_t);
329
330 /****************************************************************************/
331 /* BCE NVRAM Access Routines                                                */
332 /****************************************************************************/
333 static int      bce_acquire_nvram_lock(struct bce_softc *);
334 static int      bce_release_nvram_lock(struct bce_softc *);
335 static void     bce_enable_nvram_access(struct bce_softc *);
336 static void     bce_disable_nvram_access(struct bce_softc *);
337 static int      bce_nvram_read_dword(struct bce_softc *, uint32_t, uint8_t *,
338                     uint32_t);
339 static int      bce_init_nvram(struct bce_softc *);
340 static int      bce_nvram_read(struct bce_softc *, uint32_t, uint8_t *, int);
341 static int      bce_nvram_test(struct bce_softc *);
342
343 /****************************************************************************/
344 /* BCE DMA Allocate/Free Routines                                           */
345 /****************************************************************************/
346 static int      bce_dma_alloc(struct bce_softc *);
347 static void     bce_dma_free(struct bce_softc *);
348 static void     bce_dma_map_addr(void *, bus_dma_segment_t *, int, int);
349
350 /****************************************************************************/
351 /* BCE Firmware Synchronization and Load                                    */
352 /****************************************************************************/
353 static int      bce_fw_sync(struct bce_softc *, uint32_t);
354 static void     bce_load_rv2p_fw(struct bce_softc *, uint32_t *,
355                     uint32_t, uint32_t);
356 static void     bce_load_cpu_fw(struct bce_softc *, struct cpu_reg *,
357                     struct fw_info *);
358 static void     bce_start_cpu(struct bce_softc *, struct cpu_reg *);
359 static void     bce_halt_cpu(struct bce_softc *, struct cpu_reg *);
360 static void     bce_start_rxp_cpu(struct bce_softc *);
361 static void     bce_init_rxp_cpu(struct bce_softc *);
362 static void     bce_init_txp_cpu(struct bce_softc *);
363 static void     bce_init_tpat_cpu(struct bce_softc *);
364 static void     bce_init_cp_cpu(struct bce_softc *);
365 static void     bce_init_com_cpu(struct bce_softc *);
366 static void     bce_init_cpus(struct bce_softc *);
367 static void     bce_setup_msix_table(struct bce_softc *);
368 static void     bce_init_rss(struct bce_softc *);
369
370 static void     bce_stop(struct bce_softc *);
371 static int      bce_reset(struct bce_softc *, uint32_t);
372 static int      bce_chipinit(struct bce_softc *);
373 static int      bce_blockinit(struct bce_softc *);
374 static void     bce_probe_pci_caps(struct bce_softc *);
375 static void     bce_print_adapter_info(struct bce_softc *);
376 static void     bce_get_media(struct bce_softc *);
377 static void     bce_mgmt_init(struct bce_softc *);
378 static int      bce_init_ctx(struct bce_softc *);
379 static void     bce_get_mac_addr(struct bce_softc *);
380 static void     bce_set_mac_addr(struct bce_softc *);
381 static void     bce_set_rx_mode(struct bce_softc *);
382 static void     bce_coal_change(struct bce_softc *);
383 static void     bce_npoll_coal_change(struct bce_softc *);
384 static void     bce_setup_serialize(struct bce_softc *);
385 static void     bce_serialize_skipmain(struct bce_softc *);
386 static void     bce_deserialize_skipmain(struct bce_softc *);
387 static void     bce_set_timer_cpuid(struct bce_softc *, boolean_t);
388 static int      bce_alloc_intr(struct bce_softc *);
389 static void     bce_free_intr(struct bce_softc *);
390 static void     bce_try_alloc_msix(struct bce_softc *);
391 static void     bce_free_msix(struct bce_softc *, boolean_t);
392 static void     bce_setup_ring_cnt(struct bce_softc *);
393 static int      bce_setup_intr(struct bce_softc *);
394 static void     bce_teardown_intr(struct bce_softc *);
395 static int      bce_setup_msix(struct bce_softc *);
396 static void     bce_teardown_msix(struct bce_softc *, int);
397
398 static int      bce_create_tx_ring(struct bce_tx_ring *);
399 static void     bce_destroy_tx_ring(struct bce_tx_ring *);
400 static void     bce_init_tx_context(struct bce_tx_ring *);
401 static int      bce_init_tx_chain(struct bce_tx_ring *);
402 static void     bce_free_tx_chain(struct bce_tx_ring *);
403 static void     bce_xmit(struct bce_tx_ring *);
404 static int      bce_encap(struct bce_tx_ring *, struct mbuf **, int *);
405 static int      bce_tso_setup(struct bce_tx_ring *, struct mbuf **,
406                     uint16_t *, uint16_t *);
407
408 static int      bce_create_rx_ring(struct bce_rx_ring *);
409 static void     bce_destroy_rx_ring(struct bce_rx_ring *);
410 static void     bce_init_rx_context(struct bce_rx_ring *);
411 static int      bce_init_rx_chain(struct bce_rx_ring *);
412 static void     bce_free_rx_chain(struct bce_rx_ring *);
413 static int      bce_newbuf_std(struct bce_rx_ring *, uint16_t *, uint16_t *,
414                     uint32_t *, int);
415 static void     bce_setup_rxdesc_std(struct bce_rx_ring *, uint16_t,
416                     uint32_t *);
417
418 static void     bce_start(struct ifnet *, struct ifaltq_subque *);
419 static int      bce_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
420 static void     bce_watchdog(struct ifaltq_subque *);
421 static int      bce_ifmedia_upd(struct ifnet *);
422 static void     bce_ifmedia_sts(struct ifnet *, struct ifmediareq *);
423 static void     bce_init(void *);
424 #ifdef IFPOLL_ENABLE
425 static void     bce_npoll(struct ifnet *, struct ifpoll_info *);
426 static void     bce_npoll_rx(struct ifnet *, void *, int);
427 static void     bce_npoll_tx(struct ifnet *, void *, int);
428 static void     bce_npoll_status(struct ifnet *);
429 static void     bce_npoll_rx_pack(struct ifnet *, void *, int);
430 #endif
431 static void     bce_serialize(struct ifnet *, enum ifnet_serialize);
432 static void     bce_deserialize(struct ifnet *, enum ifnet_serialize);
433 static int      bce_tryserialize(struct ifnet *, enum ifnet_serialize);
434 #ifdef INVARIANTS
435 static void     bce_serialize_assert(struct ifnet *, enum ifnet_serialize,
436                     boolean_t);
437 #endif
438
439 static void     bce_intr(struct bce_softc *);
440 static void     bce_intr_legacy(void *);
441 static void     bce_intr_msi(void *);
442 static void     bce_intr_msi_oneshot(void *);
443 static void     bce_intr_msix_rxtx(void *);
444 static void     bce_intr_msix_rx(void *);
445 static void     bce_tx_intr(struct bce_tx_ring *, uint16_t);
446 static void     bce_rx_intr(struct bce_rx_ring *, int, uint16_t);
447 static void     bce_phy_intr(struct bce_softc *);
448 static void     bce_disable_intr(struct bce_softc *);
449 static void     bce_enable_intr(struct bce_softc *);
450 static void     bce_reenable_intr(struct bce_rx_ring *);
451 static void     bce_check_msi(void *);
452
453 static void     bce_stats_update(struct bce_softc *);
454 static void     bce_tick(void *);
455 static void     bce_tick_serialized(struct bce_softc *);
456 static void     bce_pulse(void *);
457
458 static void     bce_add_sysctls(struct bce_softc *);
459 static int      bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS);
460 static int      bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS);
461 static int      bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS);
462 static int      bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS);
463 static int      bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS);
464 static int      bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS);
465 static int      bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS);
466 static int      bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS);
467 #ifdef IFPOLL_ENABLE
468 static int      bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS);
469 #endif
470 static int      bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS,
471                     uint32_t *, uint32_t);
472
473 /*
474  * NOTE:
475  * Don't set bce_tx_ticks_int/bce_tx_ticks to 1023.  Linux's bnx2
476  * takes 1023 as the TX ticks limit.  However, using 1023 will
477  * cause 5708(B2) to generate extra interrupts (~2000/s) even when
478  * there is _no_ network activity on the NIC.
479  */
480 static uint32_t bce_tx_bds_int = 255;           /* bcm: 20 */
481 static uint32_t bce_tx_bds = 255;               /* bcm: 20 */
482 static uint32_t bce_tx_ticks_int = 1022;        /* bcm: 80 */
483 static uint32_t bce_tx_ticks = 1022;            /* bcm: 80 */
484 static uint32_t bce_rx_bds_int = 128;           /* bcm: 6 */
485 static uint32_t bce_rx_bds = 0;                 /* bcm: 6 */
486 static uint32_t bce_rx_ticks_int = 150;         /* bcm: 18 */
487 static uint32_t bce_rx_ticks = 150;             /* bcm: 18 */
488
489 static int      bce_tx_wreg = 8;
490
491 static int      bce_msi_enable = 1;
492 static int      bce_msix_enable = 1;
493
494 static int      bce_rx_pages = RX_PAGES_DEFAULT;
495 static int      bce_tx_pages = TX_PAGES_DEFAULT;
496
497 static int      bce_rx_rings = 0;       /* auto */
498 static int      bce_tx_rings = 0;       /* auto */
499
500 TUNABLE_INT("hw.bce.tx_bds_int", &bce_tx_bds_int);
501 TUNABLE_INT("hw.bce.tx_bds", &bce_tx_bds);
502 TUNABLE_INT("hw.bce.tx_ticks_int", &bce_tx_ticks_int);
503 TUNABLE_INT("hw.bce.tx_ticks", &bce_tx_ticks);
504 TUNABLE_INT("hw.bce.rx_bds_int", &bce_rx_bds_int);
505 TUNABLE_INT("hw.bce.rx_bds", &bce_rx_bds);
506 TUNABLE_INT("hw.bce.rx_ticks_int", &bce_rx_ticks_int);
507 TUNABLE_INT("hw.bce.rx_ticks", &bce_rx_ticks);
508 TUNABLE_INT("hw.bce.msi.enable", &bce_msi_enable);
509 TUNABLE_INT("hw.bce.msix.enable", &bce_msix_enable);
510 TUNABLE_INT("hw.bce.rx_pages", &bce_rx_pages);
511 TUNABLE_INT("hw.bce.tx_pages", &bce_tx_pages);
512 TUNABLE_INT("hw.bce.tx_wreg", &bce_tx_wreg);
513 TUNABLE_INT("hw.bce.tx_rings", &bce_tx_rings);
514 TUNABLE_INT("hw.bce.rx_rings", &bce_rx_rings);
515
516 /****************************************************************************/
517 /* DragonFly device dispatch table.                                         */
518 /****************************************************************************/
519 static device_method_t bce_methods[] = {
520         /* Device interface */
521         DEVMETHOD(device_probe,         bce_probe),
522         DEVMETHOD(device_attach,        bce_attach),
523         DEVMETHOD(device_detach,        bce_detach),
524         DEVMETHOD(device_shutdown,      bce_shutdown),
525
526         /* bus interface */
527         DEVMETHOD(bus_print_child,      bus_generic_print_child),
528         DEVMETHOD(bus_driver_added,     bus_generic_driver_added),
529
530         /* MII interface */
531         DEVMETHOD(miibus_readreg,       bce_miibus_read_reg),
532         DEVMETHOD(miibus_writereg,      bce_miibus_write_reg),
533         DEVMETHOD(miibus_statchg,       bce_miibus_statchg),
534
535         DEVMETHOD_END
536 };
537
538 static driver_t bce_driver = {
539         "bce",
540         bce_methods,
541         sizeof(struct bce_softc)
542 };
543
544 static devclass_t bce_devclass;
545
546 DECLARE_DUMMY_MODULE(if_bce);
547 MODULE_DEPEND(bce, miibus, 1, 1, 1);
548 DRIVER_MODULE(if_bce, pci, bce_driver, bce_devclass, NULL, NULL);
549 DRIVER_MODULE(miibus, bce, miibus_driver, miibus_devclass, NULL, NULL);
550
551 /****************************************************************************/
552 /* Device probe function.                                                   */
553 /*                                                                          */
554 /* Compares the device to the driver's list of supported devices and        */
555 /* reports back to the OS whether this is the right driver for the device.  */
556 /*                                                                          */
557 /* Returns:                                                                 */
558 /*   BUS_PROBE_DEFAULT on success, positive value on failure.               */
559 /****************************************************************************/
560 static int
561 bce_probe(device_t dev)
562 {
563         struct bce_type *t;
564         uint16_t vid, did, svid, sdid;
565
566         /* Get the data for the device to be probed. */
567         vid  = pci_get_vendor(dev);
568         did  = pci_get_device(dev);
569         svid = pci_get_subvendor(dev);
570         sdid = pci_get_subdevice(dev);
571
572         /* Look through the list of known devices for a match. */
573         for (t = bce_devs; t->bce_name != NULL; ++t) {
574                 if (vid == t->bce_vid && did == t->bce_did && 
575                     (svid == t->bce_svid || t->bce_svid == PCI_ANY_ID) &&
576                     (sdid == t->bce_sdid || t->bce_sdid == PCI_ANY_ID)) {
577                         uint32_t revid = pci_read_config(dev, PCIR_REVID, 4);
578                         char *descbuf;
579
580                         descbuf = kmalloc(BCE_DEVDESC_MAX, M_TEMP, M_WAITOK);
581
582                         /* Print out the device identity. */
583                         ksnprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
584                                   t->bce_name,
585                                   ((revid & 0xf0) >> 4) + 'A', revid & 0xf);
586
587                         device_set_desc_copy(dev, descbuf);
588                         kfree(descbuf, M_TEMP);
589                         return 0;
590                 }
591         }
592         return ENXIO;
593 }
594
595 /****************************************************************************/
596 /* PCI Capabilities Probe Function.                                         */
597 /*                                                                          */
598 /* Walks the PCI capabiites list for the device to find what features are   */
599 /* supported.                                                               */
600 /*                                                                          */
601 /* Returns:                                                                 */
602 /*   None.                                                                  */
603 /****************************************************************************/
604 static void
605 bce_print_adapter_info(struct bce_softc *sc)
606 {
607         device_printf(sc->bce_dev, "ASIC (0x%08X); ", sc->bce_chipid);
608
609         kprintf("Rev (%c%d); ", ((BCE_CHIP_ID(sc) & 0xf000) >> 12) + 'A',
610                 ((BCE_CHIP_ID(sc) & 0x0ff0) >> 4));
611
612         /* Bus info. */
613         if (sc->bce_flags & BCE_PCIE_FLAG) {
614                 kprintf("Bus (PCIe x%d, ", sc->link_width);
615                 switch (sc->link_speed) {
616                 case 1:
617                         kprintf("2.5Gbps); ");
618                         break;
619                 case 2:
620                         kprintf("5Gbps); ");
621                         break;
622                 default:
623                         kprintf("Unknown link speed); ");
624                         break;
625                 }
626         } else {
627                 kprintf("Bus (PCI%s, %s, %dMHz); ",
628                     ((sc->bce_flags & BCE_PCIX_FLAG) ? "-X" : ""),
629                     ((sc->bce_flags & BCE_PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
630                     sc->bus_speed_mhz);
631         }
632
633         /* Firmware version and device features. */
634         kprintf("B/C (%s)", sc->bce_bc_ver);
635
636         if ((sc->bce_flags & BCE_MFW_ENABLE_FLAG) ||
637             (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) {
638                 kprintf("; Flags(");
639                 if (sc->bce_flags & BCE_MFW_ENABLE_FLAG)
640                         kprintf("MFW[%s]", sc->bce_mfw_ver);
641                 if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
642                         kprintf(" 2.5G");
643                 kprintf(")");
644         }
645         kprintf("\n");
646 }
647
648 /****************************************************************************/
649 /* PCI Capabilities Probe Function.                                         */
650 /*                                                                          */
651 /* Walks the PCI capabiites list for the device to find what features are   */
652 /* supported.                                                               */
653 /*                                                                          */
654 /* Returns:                                                                 */
655 /*   None.                                                                  */
656 /****************************************************************************/
657 static void
658 bce_probe_pci_caps(struct bce_softc *sc)
659 {
660         device_t dev = sc->bce_dev;
661         uint8_t ptr;
662
663         if (pci_is_pcix(dev))
664                 sc->bce_cap_flags |= BCE_PCIX_CAPABLE_FLAG;
665
666         ptr = pci_get_pciecap_ptr(dev);
667         if (ptr) {
668                 uint16_t link_status = pci_read_config(dev, ptr + 0x12, 2);
669
670                 sc->link_speed = link_status & 0xf;
671                 sc->link_width = (link_status >> 4) & 0x3f;
672                 sc->bce_cap_flags |= BCE_PCIE_CAPABLE_FLAG;
673                 sc->bce_flags |= BCE_PCIE_FLAG;
674         }
675 }
676
677 /****************************************************************************/
678 /* Device attach function.                                                  */
679 /*                                                                          */
680 /* Allocates device resources, performs secondary chip identification,      */
681 /* resets and initializes the hardware, and initializes driver instance     */
682 /* variables.                                                               */
683 /*                                                                          */
684 /* Returns:                                                                 */
685 /*   0 on success, positive value on failure.                               */
686 /****************************************************************************/
687 static int
688 bce_attach(device_t dev)
689 {
690         struct bce_softc *sc = device_get_softc(dev);
691         struct ifnet *ifp = &sc->arpcom.ac_if;
692         uint32_t val;
693         int rid, rc = 0;
694         int i, j;
695         struct mii_probe_args mii_args;
696         uintptr_t mii_priv = 0;
697 #ifdef IFPOLL_ENABLE
698         int offset, offset_def;
699 #endif
700
701         sc->bce_dev = dev;
702         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
703
704         lwkt_serialize_init(&sc->main_serialize);
705         for (i = 0; i < BCE_MSIX_MAX; ++i) {
706                 struct bce_msix_data *msix = &sc->bce_msix[i];
707
708                 msix->msix_cpuid = -1;
709                 msix->msix_rid = -1;
710         }
711
712         pci_enable_busmaster(dev);
713
714         bce_probe_pci_caps(sc);
715
716         /* Allocate PCI memory resources. */
717         rid = PCIR_BAR(0);
718         sc->bce_res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
719                                                  RF_ACTIVE | PCI_RF_DENSE);
720         if (sc->bce_res_mem == NULL) {
721                 device_printf(dev, "PCI memory allocation failed\n");
722                 return ENXIO;
723         }
724         sc->bce_btag = rman_get_bustag(sc->bce_res_mem);
725         sc->bce_bhandle = rman_get_bushandle(sc->bce_res_mem);
726
727         /*
728          * Configure byte swap and enable indirect register access.
729          * Rely on CPU to do target byte swapping on big endian systems.
730          * Access to registers outside of PCI configurtion space are not
731          * valid until this is done.
732          */
733         pci_write_config(dev, BCE_PCICFG_MISC_CONFIG,
734                          BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
735                          BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP, 4);
736
737         /* Save ASIC revsion info. */
738         sc->bce_chipid =  REG_RD(sc, BCE_MISC_ID);
739
740         /* Weed out any non-production controller revisions. */
741         switch (BCE_CHIP_ID(sc)) {
742         case BCE_CHIP_ID_5706_A0:
743         case BCE_CHIP_ID_5706_A1:
744         case BCE_CHIP_ID_5708_A0:
745         case BCE_CHIP_ID_5708_B0:
746         case BCE_CHIP_ID_5709_A0:
747         case BCE_CHIP_ID_5709_B0:
748         case BCE_CHIP_ID_5709_B1:
749 #ifdef foo
750         /* 5709C B2 seems to work fine */
751         case BCE_CHIP_ID_5709_B2:
752 #endif
753                 device_printf(dev, "Unsupported chip id 0x%08x!\n",
754                               BCE_CHIP_ID(sc));
755                 rc = ENODEV;
756                 goto fail;
757         }
758
759         mii_priv |= BRGPHY_FLAG_WIRESPEED;
760         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
761                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax ||
762                     BCE_CHIP_REV(sc) == BCE_CHIP_REV_Bx)
763                         mii_priv |= BRGPHY_FLAG_NO_EARLYDAC;
764         } else {
765                 mii_priv |= BRGPHY_FLAG_BER_BUG;
766         }
767
768         /*
769          * Find the base address for shared memory access.
770          * Newer versions of bootcode use a signature and offset
771          * while older versions use a fixed address.
772          */
773         val = REG_RD_IND(sc, BCE_SHM_HDR_SIGNATURE);
774         if ((val & BCE_SHM_HDR_SIGNATURE_SIG_MASK) ==
775             BCE_SHM_HDR_SIGNATURE_SIG) {
776                 /* Multi-port devices use different offsets in shared memory. */
777                 sc->bce_shmem_base = REG_RD_IND(sc,
778                     BCE_SHM_HDR_ADDR_0 + (pci_get_function(sc->bce_dev) << 2));
779         } else {
780                 sc->bce_shmem_base = HOST_VIEW_SHMEM_BASE;
781         }
782
783         /* Fetch the bootcode revision. */
784         val = bce_shmem_rd(sc, BCE_DEV_INFO_BC_REV);
785         for (i = 0, j = 0; i < 3; i++) {
786                 uint8_t num;
787                 int k, skip0;
788
789                 num = (uint8_t)(val >> (24 - (i * 8)));
790                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
791                         if (num >= k || !skip0 || k == 1) {
792                                 sc->bce_bc_ver[j++] = (num / k) + '0';
793                                 skip0 = 0;
794                         }
795                 }
796                 if (i != 2)
797                         sc->bce_bc_ver[j++] = '.';
798         }
799
800         /* Check if any management firwmare is running. */
801         val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
802         if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
803                 sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
804
805                 /* Allow time for firmware to enter the running state. */
806                 for (i = 0; i < 30; i++) {
807                         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
808                         if (val & BCE_CONDITION_MFW_RUN_MASK)
809                                 break;
810                         DELAY(10000);
811                 }
812         }
813
814         /* Check the current bootcode state. */
815         val = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION) &
816             BCE_CONDITION_MFW_RUN_MASK;
817         if (val != BCE_CONDITION_MFW_RUN_UNKNOWN &&
818             val != BCE_CONDITION_MFW_RUN_NONE) {
819                 uint32_t addr = bce_shmem_rd(sc, BCE_MFW_VER_PTR);
820
821                 for (i = 0, j = 0; j < 3; j++) {
822                         val = bce_reg_rd_ind(sc, addr + j * 4);
823                         val = bswap32(val);
824                         memcpy(&sc->bce_mfw_ver[i], &val, 4);
825                         i += 4;
826                 }
827         }
828
829         /* Get PCI bus information (speed and type). */
830         val = REG_RD(sc, BCE_PCICFG_MISC_STATUS);
831         if (val & BCE_PCICFG_MISC_STATUS_PCIX_DET) {
832                 uint32_t clkreg;
833
834                 sc->bce_flags |= BCE_PCIX_FLAG;
835
836                 clkreg = REG_RD(sc, BCE_PCICFG_PCI_CLOCK_CONTROL_BITS) &
837                          BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
838                 switch (clkreg) {
839                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
840                         sc->bus_speed_mhz = 133;
841                         break;
842
843                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
844                         sc->bus_speed_mhz = 100;
845                         break;
846
847                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
848                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
849                         sc->bus_speed_mhz = 66;
850                         break;
851
852                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
853                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
854                         sc->bus_speed_mhz = 50;
855                         break;
856
857                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
858                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
859                 case BCE_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
860                         sc->bus_speed_mhz = 33;
861                         break;
862                 }
863         } else {
864                 if (val & BCE_PCICFG_MISC_STATUS_M66EN)
865                         sc->bus_speed_mhz = 66;
866                 else
867                         sc->bus_speed_mhz = 33;
868         }
869
870         if (val & BCE_PCICFG_MISC_STATUS_32BIT_DET)
871                 sc->bce_flags |= BCE_PCI_32BIT_FLAG;
872
873         /* Reset the controller. */
874         rc = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
875         if (rc != 0)
876                 goto fail;
877
878         /* Initialize the controller. */
879         rc = bce_chipinit(sc);
880         if (rc != 0) {
881                 device_printf(dev, "Controller initialization failed!\n");
882                 goto fail;
883         }
884
885         /* Perform NVRAM test. */
886         rc = bce_nvram_test(sc);
887         if (rc != 0) {
888                 device_printf(dev, "NVRAM test failed!\n");
889                 goto fail;
890         }
891
892         /* Fetch the permanent Ethernet MAC address. */
893         bce_get_mac_addr(sc);
894
895         /*
896          * Trip points control how many BDs
897          * should be ready before generating an
898          * interrupt while ticks control how long
899          * a BD can sit in the chain before
900          * generating an interrupt.  Set the default 
901          * values for the RX and TX rings.
902          */
903
904 #ifdef BCE_DRBUG
905         /* Force more frequent interrupts. */
906         sc->bce_tx_quick_cons_trip_int = 1;
907         sc->bce_tx_quick_cons_trip     = 1;
908         sc->bce_tx_ticks_int           = 0;
909         sc->bce_tx_ticks               = 0;
910
911         sc->bce_rx_quick_cons_trip_int = 1;
912         sc->bce_rx_quick_cons_trip     = 1;
913         sc->bce_rx_ticks_int           = 0;
914         sc->bce_rx_ticks               = 0;
915 #else
916         sc->bce_tx_quick_cons_trip_int = bce_tx_bds_int;
917         sc->bce_tx_quick_cons_trip     = bce_tx_bds;
918         sc->bce_tx_ticks_int           = bce_tx_ticks_int;
919         sc->bce_tx_ticks               = bce_tx_ticks;
920
921         sc->bce_rx_quick_cons_trip_int = bce_rx_bds_int;
922         sc->bce_rx_quick_cons_trip     = bce_rx_bds;
923         sc->bce_rx_ticks_int           = bce_rx_ticks_int;
924         sc->bce_rx_ticks               = bce_rx_ticks;
925 #endif
926
927         /* Update statistics once every second. */
928         sc->bce_stats_ticks = 1000000 & 0xffff00;
929
930         /* Find the media type for the adapter. */
931         bce_get_media(sc);
932
933         /* Find out RX/TX ring count */
934         bce_setup_ring_cnt(sc);
935
936         /* Allocate DMA memory resources. */
937         rc = bce_dma_alloc(sc);
938         if (rc != 0) {
939                 device_printf(dev, "DMA resource allocation failed!\n");
940                 goto fail;
941         }
942
943 #ifdef IFPOLL_ENABLE
944         /*
945          * NPOLLING RX/TX CPU offset
946          */
947         if (sc->rx_ring_cnt2 == ncpus2) {
948                 offset = 0;
949         } else {
950                 offset_def = (sc->rx_ring_cnt2 * device_get_unit(dev)) % ncpus2;
951                 offset = device_getenv_int(dev, "npoll.offset", offset_def);
952                 if (offset >= ncpus2 ||
953                     offset % sc->rx_ring_cnt2 != 0) {
954                         device_printf(dev, "invalid npoll.offset %d, use %d\n",
955                             offset, offset_def);
956                         offset = offset_def;
957                 }
958         }
959         sc->npoll_ofs = offset;
960 #endif
961
962         /* Allocate PCI IRQ resources. */
963         rc = bce_alloc_intr(sc);
964         if (rc != 0)
965                 goto fail;
966
967         /* Setup serializer */
968         bce_setup_serialize(sc);
969
970         /* Initialize the ifnet interface. */
971         ifp->if_softc = sc;
972         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
973         ifp->if_ioctl = bce_ioctl;
974         ifp->if_start = bce_start;
975         ifp->if_init = bce_init;
976         ifp->if_serialize = bce_serialize;
977         ifp->if_deserialize = bce_deserialize;
978         ifp->if_tryserialize = bce_tryserialize;
979 #ifdef INVARIANTS
980         ifp->if_serialize_assert = bce_serialize_assert;
981 #endif
982 #ifdef IFPOLL_ENABLE
983         ifp->if_npoll = bce_npoll;
984 #endif
985
986         ifp->if_mtu = ETHERMTU;
987         ifp->if_hwassist = BCE_CSUM_FEATURES | CSUM_TSO;
988         ifp->if_capabilities = BCE_IF_CAPABILITIES;
989         ifp->if_capenable = ifp->if_capabilities;
990
991         if (sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)
992                 ifp->if_baudrate = IF_Gbps(2.5);
993         else
994                 ifp->if_baudrate = IF_Gbps(1);
995
996         ifq_set_maxlen(&ifp->if_snd, USABLE_TX_BD(&sc->tx_rings[0]));
997         ifq_set_ready(&ifp->if_snd);
998         ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt);
999
1000         if (sc->tx_ring_cnt > 1) {
1001                 ifp->if_mapsubq = ifq_mapsubq_mask;
1002                 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_cnt - 1);
1003         }
1004
1005         /*
1006          * Look for our PHY.
1007          */
1008         mii_probe_args_init(&mii_args, bce_ifmedia_upd, bce_ifmedia_sts);
1009         mii_args.mii_probemask = 1 << sc->bce_phy_addr;
1010         mii_args.mii_privtag = MII_PRIVTAG_BRGPHY;
1011         mii_args.mii_priv = mii_priv;
1012
1013         rc = mii_probe(dev, &sc->bce_miibus, &mii_args);
1014         if (rc != 0) {
1015                 device_printf(dev, "PHY probe failed!\n");
1016                 goto fail;
1017         }
1018
1019         /* Attach to the Ethernet interface list. */
1020         ether_ifattach(ifp, sc->eaddr, NULL);
1021
1022         callout_init_mp(&sc->bce_tick_callout);
1023         callout_init_mp(&sc->bce_pulse_callout);
1024         callout_init_mp(&sc->bce_ckmsi_callout);
1025
1026         rc = bce_setup_intr(sc);
1027         if (rc != 0) {
1028                 device_printf(dev, "Failed to setup IRQ!\n");
1029                 ether_ifdetach(ifp);
1030                 goto fail;
1031         }
1032
1033         for (i = 0; i < sc->tx_ring_cnt; ++i) {
1034                 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i);
1035                 struct bce_tx_ring *txr = &sc->tx_rings[i];
1036
1037                 ifsq_set_cpuid(ifsq, sc->bce_msix[i].msix_cpuid);
1038                 ifsq_set_priv(ifsq, txr);
1039                 txr->ifsq = ifsq;
1040
1041                 ifsq_watchdog_init(&txr->tx_watchdog, ifsq, bce_watchdog);
1042         }
1043
1044         /* Set timer CPUID */
1045         bce_set_timer_cpuid(sc, FALSE);
1046
1047         /* Add the supported sysctls to the kernel. */
1048         bce_add_sysctls(sc);
1049
1050         /*
1051          * The chip reset earlier notified the bootcode that
1052          * a driver is present.  We now need to start our pulse
1053          * routine so that the bootcode is reminded that we're
1054          * still running.
1055          */
1056         bce_pulse(sc);
1057
1058         /* Get the firmware running so IPMI still works */
1059         bce_mgmt_init(sc);
1060
1061         if (bootverbose)
1062                 bce_print_adapter_info(sc);
1063
1064         return 0;
1065 fail:
1066         bce_detach(dev);
1067         return(rc);
1068 }
1069
1070 /****************************************************************************/
1071 /* Device detach function.                                                  */
1072 /*                                                                          */
1073 /* Stops the controller, resets the controller, and releases resources.     */
1074 /*                                                                          */
1075 /* Returns:                                                                 */
1076 /*   0 on success, positive value on failure.                               */
1077 /****************************************************************************/
1078 static int
1079 bce_detach(device_t dev)
1080 {
1081         struct bce_softc *sc = device_get_softc(dev);
1082
1083         if (device_is_attached(dev)) {
1084                 struct ifnet *ifp = &sc->arpcom.ac_if;
1085                 uint32_t msg;
1086
1087                 ifnet_serialize_all(ifp);
1088
1089                 /* Stop and reset the controller. */
1090                 callout_stop(&sc->bce_pulse_callout);
1091                 bce_stop(sc);
1092                 if (sc->bce_flags & BCE_NO_WOL_FLAG)
1093                         msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1094                 else
1095                         msg = BCE_DRV_MSG_CODE_UNLOAD;
1096                 bce_reset(sc, msg);
1097
1098                 bce_teardown_intr(sc);
1099
1100                 ifnet_deserialize_all(ifp);
1101
1102                 ether_ifdetach(ifp);
1103         }
1104
1105         /* If we have a child device on the MII bus remove it too. */
1106         if (sc->bce_miibus)
1107                 device_delete_child(dev, sc->bce_miibus);
1108         bus_generic_detach(dev);
1109
1110         bce_free_intr(sc);
1111
1112         if (sc->bce_res_mem != NULL) {
1113                 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0),
1114                                      sc->bce_res_mem);
1115         }
1116
1117         bce_dma_free(sc);
1118
1119         if (sc->bce_sysctl_tree != NULL)
1120                 sysctl_ctx_free(&sc->bce_sysctl_ctx);
1121
1122         if (sc->serializes != NULL)
1123                 kfree(sc->serializes, M_DEVBUF);
1124
1125         return 0;
1126 }
1127
1128 /****************************************************************************/
1129 /* Device shutdown function.                                                */
1130 /*                                                                          */
1131 /* Stops and resets the controller.                                         */
1132 /*                                                                          */
1133 /* Returns:                                                                 */
1134 /*   Nothing                                                                */
1135 /****************************************************************************/
1136 static void
1137 bce_shutdown(device_t dev)
1138 {
1139         struct bce_softc *sc = device_get_softc(dev);
1140         struct ifnet *ifp = &sc->arpcom.ac_if;
1141         uint32_t msg;
1142
1143         ifnet_serialize_all(ifp);
1144
1145         bce_stop(sc);
1146         if (sc->bce_flags & BCE_NO_WOL_FLAG)
1147                 msg = BCE_DRV_MSG_CODE_UNLOAD_LNK_DN;
1148         else
1149                 msg = BCE_DRV_MSG_CODE_UNLOAD;
1150         bce_reset(sc, msg);
1151
1152         ifnet_deserialize_all(ifp);
1153 }
1154
1155 /****************************************************************************/
1156 /* Indirect register read.                                                  */
1157 /*                                                                          */
1158 /* Reads NetXtreme II registers using an index/data register pair in PCI    */
1159 /* configuration space.  Using this mechanism avoids issues with posted     */
1160 /* reads but is much slower than memory-mapped I/O.                         */
1161 /*                                                                          */
1162 /* Returns:                                                                 */
1163 /*   The value of the register.                                             */
1164 /****************************************************************************/
1165 static uint32_t
1166 bce_reg_rd_ind(struct bce_softc *sc, uint32_t offset)
1167 {
1168         device_t dev = sc->bce_dev;
1169
1170         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1171         return pci_read_config(dev, BCE_PCICFG_REG_WINDOW, 4);
1172 }
1173
1174 /****************************************************************************/
1175 /* Indirect register write.                                                 */
1176 /*                                                                          */
1177 /* Writes NetXtreme II registers using an index/data register pair in PCI   */
1178 /* configuration space.  Using this mechanism avoids issues with posted     */
1179 /* writes but is muchh slower than memory-mapped I/O.                       */
1180 /*                                                                          */
1181 /* Returns:                                                                 */
1182 /*   Nothing.                                                               */
1183 /****************************************************************************/
1184 static void
1185 bce_reg_wr_ind(struct bce_softc *sc, uint32_t offset, uint32_t val)
1186 {
1187         device_t dev = sc->bce_dev;
1188
1189         pci_write_config(dev, BCE_PCICFG_REG_WINDOW_ADDRESS, offset, 4);
1190         pci_write_config(dev, BCE_PCICFG_REG_WINDOW, val, 4);
1191 }
1192
1193 /****************************************************************************/
1194 /* Shared memory write.                                                     */
1195 /*                                                                          */
1196 /* Writes NetXtreme II shared memory region.                                */
1197 /*                                                                          */
1198 /* Returns:                                                                 */
1199 /*   Nothing.                                                               */
1200 /****************************************************************************/
1201 static void
1202 bce_shmem_wr(struct bce_softc *sc, uint32_t offset, uint32_t val)
1203 {
1204         bce_reg_wr_ind(sc, sc->bce_shmem_base + offset, val);
1205 }
1206
1207 /****************************************************************************/
1208 /* Shared memory read.                                                      */
1209 /*                                                                          */
1210 /* Reads NetXtreme II shared memory region.                                 */
1211 /*                                                                          */
1212 /* Returns:                                                                 */
1213 /*   The 32 bit value read.                                                 */
1214 /****************************************************************************/
1215 static u32
1216 bce_shmem_rd(struct bce_softc *sc, uint32_t offset)
1217 {
1218         return bce_reg_rd_ind(sc, sc->bce_shmem_base + offset);
1219 }
1220
1221 /****************************************************************************/
1222 /* Context memory write.                                                    */
1223 /*                                                                          */
1224 /* The NetXtreme II controller uses context memory to track connection      */
1225 /* information for L2 and higher network protocols.                         */
1226 /*                                                                          */
1227 /* Returns:                                                                 */
1228 /*   Nothing.                                                               */
1229 /****************************************************************************/
1230 static void
1231 bce_ctx_wr(struct bce_softc *sc, uint32_t cid_addr, uint32_t ctx_offset,
1232     uint32_t ctx_val)
1233 {
1234         uint32_t idx, offset = ctx_offset + cid_addr;
1235         uint32_t val, retry_cnt = 5;
1236
1237         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1238             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1239                 REG_WR(sc, BCE_CTX_CTX_DATA, ctx_val);
1240                 REG_WR(sc, BCE_CTX_CTX_CTRL, (offset | BCE_CTX_CTX_CTRL_WRITE_REQ));
1241
1242                 for (idx = 0; idx < retry_cnt; idx++) {
1243                         val = REG_RD(sc, BCE_CTX_CTX_CTRL);
1244                         if ((val & BCE_CTX_CTX_CTRL_WRITE_REQ) == 0)
1245                                 break;
1246                         DELAY(5);
1247                 }
1248
1249                 if (val & BCE_CTX_CTX_CTRL_WRITE_REQ) {
1250                         device_printf(sc->bce_dev,
1251                             "Unable to write CTX memory: "
1252                             "cid_addr = 0x%08X, offset = 0x%08X!\n",
1253                             cid_addr, ctx_offset);
1254                 }
1255         } else {
1256                 REG_WR(sc, BCE_CTX_DATA_ADR, offset);
1257                 REG_WR(sc, BCE_CTX_DATA, ctx_val);
1258         }
1259 }
1260
1261 /****************************************************************************/
1262 /* PHY register read.                                                       */
1263 /*                                                                          */
1264 /* Implements register reads on the MII bus.                                */
1265 /*                                                                          */
1266 /* Returns:                                                                 */
1267 /*   The value of the register.                                             */
1268 /****************************************************************************/
1269 static int
1270 bce_miibus_read_reg(device_t dev, int phy, int reg)
1271 {
1272         struct bce_softc *sc = device_get_softc(dev);
1273         uint32_t val;
1274         int i;
1275
1276         /* Make sure we are accessing the correct PHY address. */
1277         KASSERT(phy == sc->bce_phy_addr,
1278             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1279
1280         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1281                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1282                 val &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1283
1284                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1285                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1286
1287                 DELAY(40);
1288         }
1289
1290         val = BCE_MIPHY(phy) | BCE_MIREG(reg) |
1291               BCE_EMAC_MDIO_COMM_COMMAND_READ | BCE_EMAC_MDIO_COMM_DISEXT |
1292               BCE_EMAC_MDIO_COMM_START_BUSY;
1293         REG_WR(sc, BCE_EMAC_MDIO_COMM, val);
1294
1295         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1296                 DELAY(10);
1297
1298                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1299                 if (!(val & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1300                         DELAY(5);
1301
1302                         val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1303                         val &= BCE_EMAC_MDIO_COMM_DATA;
1304                         break;
1305                 }
1306         }
1307
1308         if (val & BCE_EMAC_MDIO_COMM_START_BUSY) {
1309                 if_printf(&sc->arpcom.ac_if,
1310                           "Error: PHY read timeout! phy = %d, reg = 0x%04X\n",
1311                           phy, reg);
1312                 val = 0x0;
1313         } else {
1314                 val = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1315         }
1316
1317         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1318                 val = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1319                 val |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1320
1321                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val);
1322                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1323
1324                 DELAY(40);
1325         }
1326         return (val & 0xffff);
1327 }
1328
1329 /****************************************************************************/
1330 /* PHY register write.                                                      */
1331 /*                                                                          */
1332 /* Implements register writes on the MII bus.                               */
1333 /*                                                                          */
1334 /* Returns:                                                                 */
1335 /*   The value of the register.                                             */
1336 /****************************************************************************/
1337 static int
1338 bce_miibus_write_reg(device_t dev, int phy, int reg, int val)
1339 {
1340         struct bce_softc *sc = device_get_softc(dev);
1341         uint32_t val1;
1342         int i;
1343
1344         /* Make sure we are accessing the correct PHY address. */
1345         KASSERT(phy == sc->bce_phy_addr,
1346             ("invalid phyno %d, should be %d\n", phy, sc->bce_phy_addr));
1347
1348         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1349                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1350                 val1 &= ~BCE_EMAC_MDIO_MODE_AUTO_POLL;
1351
1352                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1353                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1354
1355                 DELAY(40);
1356         }
1357
1358         val1 = BCE_MIPHY(phy) | BCE_MIREG(reg) | val |
1359                 BCE_EMAC_MDIO_COMM_COMMAND_WRITE |
1360                 BCE_EMAC_MDIO_COMM_START_BUSY | BCE_EMAC_MDIO_COMM_DISEXT;
1361         REG_WR(sc, BCE_EMAC_MDIO_COMM, val1);
1362
1363         for (i = 0; i < BCE_PHY_TIMEOUT; i++) {
1364                 DELAY(10);
1365
1366                 val1 = REG_RD(sc, BCE_EMAC_MDIO_COMM);
1367                 if (!(val1 & BCE_EMAC_MDIO_COMM_START_BUSY)) {
1368                         DELAY(5);
1369                         break;
1370                 }
1371         }
1372
1373         if (val1 & BCE_EMAC_MDIO_COMM_START_BUSY)
1374                 if_printf(&sc->arpcom.ac_if, "PHY write timeout!\n");
1375
1376         if (sc->bce_phy_flags & BCE_PHY_INT_MODE_AUTO_POLLING_FLAG) {
1377                 val1 = REG_RD(sc, BCE_EMAC_MDIO_MODE);
1378                 val1 |= BCE_EMAC_MDIO_MODE_AUTO_POLL;
1379
1380                 REG_WR(sc, BCE_EMAC_MDIO_MODE, val1);
1381                 REG_RD(sc, BCE_EMAC_MDIO_MODE);
1382
1383                 DELAY(40);
1384         }
1385         return 0;
1386 }
1387
1388 /****************************************************************************/
1389 /* MII bus status change.                                                   */
1390 /*                                                                          */
1391 /* Called by the MII bus driver when the PHY establishes link to set the    */
1392 /* MAC interface registers.                                                 */
1393 /*                                                                          */
1394 /* Returns:                                                                 */
1395 /*   Nothing.                                                               */
1396 /****************************************************************************/
1397 static void
1398 bce_miibus_statchg(device_t dev)
1399 {
1400         struct bce_softc *sc = device_get_softc(dev);
1401         struct mii_data *mii = device_get_softc(sc->bce_miibus);
1402
1403         BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT);
1404
1405         /*
1406          * Set MII or GMII interface based on the speed negotiated
1407          * by the PHY.
1408          */
1409         if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 
1410             IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) {
1411                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_GMII);
1412         } else {
1413                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_PORT_MII);
1414         }
1415
1416         /*
1417          * Set half or full duplex based on the duplicity negotiated
1418          * by the PHY.
1419          */
1420         if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
1421                 BCE_CLRBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1422         } else {
1423                 BCE_SETBIT(sc, BCE_EMAC_MODE, BCE_EMAC_MODE_HALF_DUPLEX);
1424         }
1425 }
1426
1427 /****************************************************************************/
1428 /* Acquire NVRAM lock.                                                      */
1429 /*                                                                          */
1430 /* Before the NVRAM can be accessed the caller must acquire an NVRAM lock.  */
1431 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1432 /* for use by the driver.                                                   */
1433 /*                                                                          */
1434 /* Returns:                                                                 */
1435 /*   0 on success, positive value on failure.                               */
1436 /****************************************************************************/
1437 static int
1438 bce_acquire_nvram_lock(struct bce_softc *sc)
1439 {
1440         uint32_t val;
1441         int j;
1442
1443         /* Request access to the flash interface. */
1444         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_SET2);
1445         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1446                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1447                 if (val & BCE_NVM_SW_ARB_ARB_ARB2)
1448                         break;
1449
1450                 DELAY(5);
1451         }
1452
1453         if (j >= NVRAM_TIMEOUT_COUNT) {
1454                 return EBUSY;
1455         }
1456         return 0;
1457 }
1458
1459 /****************************************************************************/
1460 /* Release NVRAM lock.                                                      */
1461 /*                                                                          */
1462 /* When the caller is finished accessing NVRAM the lock must be released.   */
1463 /* Locks 0 and 2 are reserved, lock 1 is used by firmware and lock 2 is     */
1464 /* for use by the driver.                                                   */
1465 /*                                                                          */
1466 /* Returns:                                                                 */
1467 /*   0 on success, positive value on failure.                               */
1468 /****************************************************************************/
1469 static int
1470 bce_release_nvram_lock(struct bce_softc *sc)
1471 {
1472         int j;
1473         uint32_t val;
1474
1475         /*
1476          * Relinquish nvram interface.
1477          */
1478         REG_WR(sc, BCE_NVM_SW_ARB, BCE_NVM_SW_ARB_ARB_REQ_CLR2);
1479
1480         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
1481                 val = REG_RD(sc, BCE_NVM_SW_ARB);
1482                 if (!(val & BCE_NVM_SW_ARB_ARB_ARB2))
1483                         break;
1484
1485                 DELAY(5);
1486         }
1487
1488         if (j >= NVRAM_TIMEOUT_COUNT) {
1489                 return EBUSY;
1490         }
1491         return 0;
1492 }
1493
1494 /****************************************************************************/
1495 /* Enable NVRAM access.                                                     */
1496 /*                                                                          */
1497 /* Before accessing NVRAM for read or write operations the caller must      */
1498 /* enabled NVRAM access.                                                    */
1499 /*                                                                          */
1500 /* Returns:                                                                 */
1501 /*   Nothing.                                                               */
1502 /****************************************************************************/
1503 static void
1504 bce_enable_nvram_access(struct bce_softc *sc)
1505 {
1506         uint32_t val;
1507
1508         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1509         /* Enable both bits, even on read. */
1510         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1511                val | BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN);
1512 }
1513
1514 /****************************************************************************/
1515 /* Disable NVRAM access.                                                    */
1516 /*                                                                          */
1517 /* When the caller is finished accessing NVRAM access must be disabled.     */
1518 /*                                                                          */
1519 /* Returns:                                                                 */
1520 /*   Nothing.                                                               */
1521 /****************************************************************************/
1522 static void
1523 bce_disable_nvram_access(struct bce_softc *sc)
1524 {
1525         uint32_t val;
1526
1527         val = REG_RD(sc, BCE_NVM_ACCESS_ENABLE);
1528
1529         /* Disable both bits, even after read. */
1530         REG_WR(sc, BCE_NVM_ACCESS_ENABLE,
1531                val & ~(BCE_NVM_ACCESS_ENABLE_EN | BCE_NVM_ACCESS_ENABLE_WR_EN));
1532 }
1533
1534 /****************************************************************************/
1535 /* Read a dword (32 bits) from NVRAM.                                       */
1536 /*                                                                          */
1537 /* Read a 32 bit word from NVRAM.  The caller is assumed to have already    */
1538 /* obtained the NVRAM lock and enabled the controller for NVRAM access.     */
1539 /*                                                                          */
1540 /* Returns:                                                                 */
1541 /*   0 on success and the 32 bit value read, positive value on failure.     */
1542 /****************************************************************************/
1543 static int
1544 bce_nvram_read_dword(struct bce_softc *sc, uint32_t offset, uint8_t *ret_val,
1545                      uint32_t cmd_flags)
1546 {
1547         uint32_t cmd;
1548         int i, rc = 0;
1549
1550         /* Build the command word. */
1551         cmd = BCE_NVM_COMMAND_DOIT | cmd_flags;
1552
1553         /* Calculate the offset for buffered flash. */
1554         if (sc->bce_flash_info->flags & BCE_NV_TRANSLATE) {
1555                 offset = ((offset / sc->bce_flash_info->page_size) <<
1556                           sc->bce_flash_info->page_bits) +
1557                          (offset % sc->bce_flash_info->page_size);
1558         }
1559
1560         /*
1561          * Clear the DONE bit separately, set the address to read,
1562          * and issue the read.
1563          */
1564         REG_WR(sc, BCE_NVM_COMMAND, BCE_NVM_COMMAND_DONE);
1565         REG_WR(sc, BCE_NVM_ADDR, offset & BCE_NVM_ADDR_NVM_ADDR_VALUE);
1566         REG_WR(sc, BCE_NVM_COMMAND, cmd);
1567
1568         /* Wait for completion. */
1569         for (i = 0; i < NVRAM_TIMEOUT_COUNT; i++) {
1570                 uint32_t val;
1571
1572                 DELAY(5);
1573
1574                 val = REG_RD(sc, BCE_NVM_COMMAND);
1575                 if (val & BCE_NVM_COMMAND_DONE) {
1576                         val = REG_RD(sc, BCE_NVM_READ);
1577
1578                         val = be32toh(val);
1579                         memcpy(ret_val, &val, 4);
1580                         break;
1581                 }
1582         }
1583
1584         /* Check for errors. */
1585         if (i >= NVRAM_TIMEOUT_COUNT) {
1586                 if_printf(&sc->arpcom.ac_if,
1587                           "Timeout error reading NVRAM at offset 0x%08X!\n",
1588                           offset);
1589                 rc = EBUSY;
1590         }
1591         return rc;
1592 }
1593
1594 /****************************************************************************/
1595 /* Initialize NVRAM access.                                                 */
1596 /*                                                                          */
1597 /* Identify the NVRAM device in use and prepare the NVRAM interface to      */
1598 /* access that device.                                                      */
1599 /*                                                                          */
1600 /* Returns:                                                                 */
1601 /*   0 on success, positive value on failure.                               */
1602 /****************************************************************************/
1603 static int
1604 bce_init_nvram(struct bce_softc *sc)
1605 {
1606         uint32_t val;
1607         int j, entry_count, rc = 0;
1608         const struct flash_spec *flash;
1609
1610         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1611             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1612                 sc->bce_flash_info = &flash_5709;
1613                 goto bce_init_nvram_get_flash_size;
1614         }
1615
1616         /* Determine the selected interface. */
1617         val = REG_RD(sc, BCE_NVM_CFG1);
1618
1619         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
1620
1621         /*
1622          * Flash reconfiguration is required to support additional
1623          * NVRAM devices not directly supported in hardware.
1624          * Check if the flash interface was reconfigured
1625          * by the bootcode.
1626          */
1627
1628         if (val & 0x40000000) {
1629                 /* Flash interface reconfigured by bootcode. */
1630                 for (j = 0, flash = flash_table; j < entry_count;
1631                      j++, flash++) {
1632                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
1633                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
1634                                 sc->bce_flash_info = flash;
1635                                 break;
1636                         }
1637                 }
1638         } else {
1639                 /* Flash interface not yet reconfigured. */
1640                 uint32_t mask;
1641
1642                 if (val & (1 << 23))
1643                         mask = FLASH_BACKUP_STRAP_MASK;
1644                 else
1645                         mask = FLASH_STRAP_MASK;
1646
1647                 /* Look for the matching NVRAM device configuration data. */
1648                 for (j = 0, flash = flash_table; j < entry_count;
1649                      j++, flash++) {
1650                         /* Check if the device matches any of the known devices. */
1651                         if ((val & mask) == (flash->strapping & mask)) {
1652                                 /* Found a device match. */
1653                                 sc->bce_flash_info = flash;
1654
1655                                 /* Request access to the flash interface. */
1656                                 rc = bce_acquire_nvram_lock(sc);
1657                                 if (rc != 0)
1658                                         return rc;
1659
1660                                 /* Reconfigure the flash interface. */
1661                                 bce_enable_nvram_access(sc);
1662                                 REG_WR(sc, BCE_NVM_CFG1, flash->config1);
1663                                 REG_WR(sc, BCE_NVM_CFG2, flash->config2);
1664                                 REG_WR(sc, BCE_NVM_CFG3, flash->config3);
1665                                 REG_WR(sc, BCE_NVM_WRITE1, flash->write1);
1666                                 bce_disable_nvram_access(sc);
1667                                 bce_release_nvram_lock(sc);
1668                                 break;
1669                         }
1670                 }
1671         }
1672
1673         /* Check if a matching device was found. */
1674         if (j == entry_count) {
1675                 sc->bce_flash_info = NULL;
1676                 if_printf(&sc->arpcom.ac_if, "Unknown Flash NVRAM found!\n");
1677                 return ENODEV;
1678         }
1679
1680 bce_init_nvram_get_flash_size:
1681         /* Write the flash config data to the shared memory interface. */
1682         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG2) &
1683             BCE_SHARED_HW_CFG2_NVM_SIZE_MASK;
1684         if (val)
1685                 sc->bce_flash_size = val;
1686         else
1687                 sc->bce_flash_size = sc->bce_flash_info->total_size;
1688
1689         return rc;
1690 }
1691
1692 /****************************************************************************/
1693 /* Read an arbitrary range of data from NVRAM.                              */
1694 /*                                                                          */
1695 /* Prepares the NVRAM interface for access and reads the requested data     */
1696 /* into the supplied buffer.                                                */
1697 /*                                                                          */
1698 /* Returns:                                                                 */
1699 /*   0 on success and the data read, positive value on failure.             */
1700 /****************************************************************************/
1701 static int
1702 bce_nvram_read(struct bce_softc *sc, uint32_t offset, uint8_t *ret_buf,
1703                int buf_size)
1704 {
1705         uint32_t cmd_flags, offset32, len32, extra;
1706         int rc = 0;
1707
1708         if (buf_size == 0)
1709                 return 0;
1710
1711         /* Request access to the flash interface. */
1712         rc = bce_acquire_nvram_lock(sc);
1713         if (rc != 0)
1714                 return rc;
1715
1716         /* Enable access to flash interface */
1717         bce_enable_nvram_access(sc);
1718
1719         len32 = buf_size;
1720         offset32 = offset;
1721         extra = 0;
1722
1723         cmd_flags = 0;
1724
1725         /* XXX should we release nvram lock if read_dword() fails? */
1726         if (offset32 & 3) {
1727                 uint8_t buf[4];
1728                 uint32_t pre_len;
1729
1730                 offset32 &= ~3;
1731                 pre_len = 4 - (offset & 3);
1732
1733                 if (pre_len >= len32) {
1734                         pre_len = len32;
1735                         cmd_flags = BCE_NVM_COMMAND_FIRST | BCE_NVM_COMMAND_LAST;
1736                 } else {
1737                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1738                 }
1739
1740                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1741                 if (rc)
1742                         return rc;
1743
1744                 memcpy(ret_buf, buf + (offset & 3), pre_len);
1745
1746                 offset32 += 4;
1747                 ret_buf += pre_len;
1748                 len32 -= pre_len;
1749         }
1750
1751         if (len32 & 3) {
1752                 extra = 4 - (len32 & 3);
1753                 len32 = (len32 + 4) & ~3;
1754         }
1755
1756         if (len32 == 4) {
1757                 uint8_t buf[4];
1758
1759                 if (cmd_flags)
1760                         cmd_flags = BCE_NVM_COMMAND_LAST;
1761                 else
1762                         cmd_flags = BCE_NVM_COMMAND_FIRST |
1763                                     BCE_NVM_COMMAND_LAST;
1764
1765                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1766
1767                 memcpy(ret_buf, buf, 4 - extra);
1768         } else if (len32 > 0) {
1769                 uint8_t buf[4];
1770
1771                 /* Read the first word. */
1772                 if (cmd_flags)
1773                         cmd_flags = 0;
1774                 else
1775                         cmd_flags = BCE_NVM_COMMAND_FIRST;
1776
1777                 rc = bce_nvram_read_dword(sc, offset32, ret_buf, cmd_flags);
1778
1779                 /* Advance to the next dword. */
1780                 offset32 += 4;
1781                 ret_buf += 4;
1782                 len32 -= 4;
1783
1784                 while (len32 > 4 && rc == 0) {
1785                         rc = bce_nvram_read_dword(sc, offset32, ret_buf, 0);
1786
1787                         /* Advance to the next dword. */
1788                         offset32 += 4;
1789                         ret_buf += 4;
1790                         len32 -= 4;
1791                 }
1792
1793                 if (rc)
1794                         goto bce_nvram_read_locked_exit;
1795
1796                 cmd_flags = BCE_NVM_COMMAND_LAST;
1797                 rc = bce_nvram_read_dword(sc, offset32, buf, cmd_flags);
1798
1799                 memcpy(ret_buf, buf, 4 - extra);
1800         }
1801
1802 bce_nvram_read_locked_exit:
1803         /* Disable access to flash interface and release the lock. */
1804         bce_disable_nvram_access(sc);
1805         bce_release_nvram_lock(sc);
1806
1807         return rc;
1808 }
1809
1810 /****************************************************************************/
1811 /* Verifies that NVRAM is accessible and contains valid data.               */
1812 /*                                                                          */
1813 /* Reads the configuration data from NVRAM and verifies that the CRC is     */
1814 /* correct.                                                                 */
1815 /*                                                                          */
1816 /* Returns:                                                                 */
1817 /*   0 on success, positive value on failure.                               */
1818 /****************************************************************************/
1819 static int
1820 bce_nvram_test(struct bce_softc *sc)
1821 {
1822         uint32_t buf[BCE_NVRAM_SIZE / 4];
1823         uint32_t magic, csum;
1824         uint8_t *data = (uint8_t *)buf;
1825         int rc = 0;
1826
1827         /*
1828          * Check that the device NVRAM is valid by reading
1829          * the magic value at offset 0.
1830          */
1831         rc = bce_nvram_read(sc, 0, data, 4);
1832         if (rc != 0)
1833                 return rc;
1834
1835         magic = be32toh(buf[0]);
1836         if (magic != BCE_NVRAM_MAGIC) {
1837                 if_printf(&sc->arpcom.ac_if,
1838                           "Invalid NVRAM magic value! Expected: 0x%08X, "
1839                           "Found: 0x%08X\n", BCE_NVRAM_MAGIC, magic);
1840                 return ENODEV;
1841         }
1842
1843         /*
1844          * Verify that the device NVRAM includes valid
1845          * configuration data.
1846          */
1847         rc = bce_nvram_read(sc, 0x100, data, BCE_NVRAM_SIZE);
1848         if (rc != 0)
1849                 return rc;
1850
1851         csum = ether_crc32_le(data, 0x100);
1852         if (csum != BCE_CRC32_RESIDUAL) {
1853                 if_printf(&sc->arpcom.ac_if,
1854                           "Invalid Manufacturing Information NVRAM CRC! "
1855                           "Expected: 0x%08X, Found: 0x%08X\n",
1856                           BCE_CRC32_RESIDUAL, csum);
1857                 return ENODEV;
1858         }
1859
1860         csum = ether_crc32_le(data + 0x100, 0x100);
1861         if (csum != BCE_CRC32_RESIDUAL) {
1862                 if_printf(&sc->arpcom.ac_if,
1863                           "Invalid Feature Configuration Information "
1864                           "NVRAM CRC! Expected: 0x%08X, Found: 08%08X\n",
1865                           BCE_CRC32_RESIDUAL, csum);
1866                 rc = ENODEV;
1867         }
1868         return rc;
1869 }
1870
1871 /****************************************************************************/
1872 /* Identifies the current media type of the controller and sets the PHY     */
1873 /* address.                                                                 */
1874 /*                                                                          */
1875 /* Returns:                                                                 */
1876 /*   Nothing.                                                               */
1877 /****************************************************************************/
1878 static void
1879 bce_get_media(struct bce_softc *sc)
1880 {
1881         uint32_t val;
1882
1883         sc->bce_phy_addr = 1;
1884
1885         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
1886             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
1887                 uint32_t val = REG_RD(sc, BCE_MISC_DUAL_MEDIA_CTRL);
1888                 uint32_t bond_id = val & BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID;
1889                 uint32_t strap;
1890
1891                 /*
1892                  * The BCM5709S is software configurable
1893                  * for Copper or SerDes operation.
1894                  */
1895                 if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_C) {
1896                         return;
1897                 } else if (bond_id == BCE_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
1898                         sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1899                         return;
1900                 }
1901
1902                 if (val & BCE_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE) {
1903                         strap = (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
1904                 } else {
1905                         strap =
1906                         (val & BCE_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
1907                 }
1908
1909                 if (pci_get_function(sc->bce_dev) == 0) {
1910                         switch (strap) {
1911                         case 0x4:
1912                         case 0x5:
1913                         case 0x6:
1914                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1915                                 break;
1916                         }
1917                 } else {
1918                         switch (strap) {
1919                         case 0x1:
1920                         case 0x2:
1921                         case 0x4:
1922                                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1923                                 break;
1924                         }
1925                 }
1926         } else if (BCE_CHIP_BOND_ID(sc) & BCE_CHIP_BOND_ID_SERDES_BIT) {
1927                 sc->bce_phy_flags |= BCE_PHY_SERDES_FLAG;
1928         }
1929
1930         if (sc->bce_phy_flags & BCE_PHY_SERDES_FLAG) {
1931                 sc->bce_flags |= BCE_NO_WOL_FLAG;
1932                 if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5706) {
1933                         sc->bce_phy_addr = 2;
1934                         val = bce_shmem_rd(sc, BCE_SHARED_HW_CFG_CONFIG);
1935                         if (val & BCE_SHARED_HW_CFG_PHY_2_5G)
1936                                 sc->bce_phy_flags |= BCE_PHY_2_5G_CAPABLE_FLAG;
1937                 }
1938         } else if ((BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) ||
1939             (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)) {
1940                 sc->bce_phy_flags |= BCE_PHY_CRC_FIX_FLAG;
1941         }
1942 }
1943
1944 static void
1945 bce_destroy_tx_ring(struct bce_tx_ring *txr)
1946 {
1947         int i;
1948
1949         /* Destroy the TX buffer descriptor DMA stuffs. */
1950         if (txr->tx_bd_chain_tag != NULL) {
1951                 for (i = 0; i < txr->tx_pages; i++) {
1952                         if (txr->tx_bd_chain[i] != NULL) {
1953                                 bus_dmamap_unload(txr->tx_bd_chain_tag,
1954                                     txr->tx_bd_chain_map[i]);
1955                                 bus_dmamem_free(txr->tx_bd_chain_tag,
1956                                     txr->tx_bd_chain[i],
1957                                     txr->tx_bd_chain_map[i]);
1958                         }
1959                 }
1960                 bus_dma_tag_destroy(txr->tx_bd_chain_tag);
1961         }
1962
1963         /* Destroy the TX mbuf DMA stuffs. */
1964         if (txr->tx_mbuf_tag != NULL) {
1965                 for (i = 0; i < TOTAL_TX_BD(txr); i++) {
1966                         /* Must have been unloaded in bce_stop() */
1967                         KKASSERT(txr->tx_mbuf_ptr[i] == NULL);
1968                         bus_dmamap_destroy(txr->tx_mbuf_tag,
1969                             txr->tx_mbuf_map[i]);
1970                 }
1971                 bus_dma_tag_destroy(txr->tx_mbuf_tag);
1972         }
1973
1974         if (txr->tx_bd_chain_map != NULL)
1975                 kfree(txr->tx_bd_chain_map, M_DEVBUF);
1976         if (txr->tx_bd_chain != NULL)
1977                 kfree(txr->tx_bd_chain, M_DEVBUF);
1978         if (txr->tx_bd_chain_paddr != NULL)
1979                 kfree(txr->tx_bd_chain_paddr, M_DEVBUF);
1980
1981         if (txr->tx_mbuf_map != NULL)
1982                 kfree(txr->tx_mbuf_map, M_DEVBUF);
1983         if (txr->tx_mbuf_ptr != NULL)
1984                 kfree(txr->tx_mbuf_ptr, M_DEVBUF);
1985 }
1986
1987 static void
1988 bce_destroy_rx_ring(struct bce_rx_ring *rxr)
1989 {
1990         int i;
1991
1992         /* Destroy the RX buffer descriptor DMA stuffs. */
1993         if (rxr->rx_bd_chain_tag != NULL) {
1994                 for (i = 0; i < rxr->rx_pages; i++) {
1995                         if (rxr->rx_bd_chain[i] != NULL) {
1996                                 bus_dmamap_unload(rxr->rx_bd_chain_tag,
1997                                     rxr->rx_bd_chain_map[i]);
1998                                 bus_dmamem_free(rxr->rx_bd_chain_tag,
1999                                     rxr->rx_bd_chain[i],
2000                                     rxr->rx_bd_chain_map[i]);
2001                         }
2002                 }
2003                 bus_dma_tag_destroy(rxr->rx_bd_chain_tag);
2004         }
2005
2006         /* Destroy the RX mbuf DMA stuffs. */
2007         if (rxr->rx_mbuf_tag != NULL) {
2008                 for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2009                         /* Must have been unloaded in bce_stop() */
2010                         KKASSERT(rxr->rx_mbuf_ptr[i] == NULL);
2011                         bus_dmamap_destroy(rxr->rx_mbuf_tag,
2012                             rxr->rx_mbuf_map[i]);
2013                 }
2014                 bus_dmamap_destroy(rxr->rx_mbuf_tag, rxr->rx_mbuf_tmpmap);
2015                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2016         }
2017
2018         if (rxr->rx_bd_chain_map != NULL)
2019                 kfree(rxr->rx_bd_chain_map, M_DEVBUF);
2020         if (rxr->rx_bd_chain != NULL)
2021                 kfree(rxr->rx_bd_chain, M_DEVBUF);
2022         if (rxr->rx_bd_chain_paddr != NULL)
2023                 kfree(rxr->rx_bd_chain_paddr, M_DEVBUF);
2024
2025         if (rxr->rx_mbuf_map != NULL)
2026                 kfree(rxr->rx_mbuf_map, M_DEVBUF);
2027         if (rxr->rx_mbuf_ptr != NULL)
2028                 kfree(rxr->rx_mbuf_ptr, M_DEVBUF);
2029         if (rxr->rx_mbuf_paddr != NULL)
2030                 kfree(rxr->rx_mbuf_paddr, M_DEVBUF);
2031 }
2032
2033 /****************************************************************************/
2034 /* Free any DMA memory owned by the driver.                                 */
2035 /*                                                                          */
2036 /* Scans through each data structre that requires DMA memory and frees      */
2037 /* the memory if allocated.                                                 */
2038 /*                                                                          */
2039 /* Returns:                                                                 */
2040 /*   Nothing.                                                               */
2041 /****************************************************************************/
2042 static void
2043 bce_dma_free(struct bce_softc *sc)
2044 {
2045         int i;
2046
2047         /* Destroy the status block. */
2048         if (sc->status_tag != NULL) {
2049                 if (sc->status_block != NULL) {
2050                         bus_dmamap_unload(sc->status_tag, sc->status_map);
2051                         bus_dmamem_free(sc->status_tag, sc->status_block,
2052                                         sc->status_map);
2053                 }
2054                 bus_dma_tag_destroy(sc->status_tag);
2055         }
2056
2057         /* Destroy the statistics block. */
2058         if (sc->stats_tag != NULL) {
2059                 if (sc->stats_block != NULL) {
2060                         bus_dmamap_unload(sc->stats_tag, sc->stats_map);
2061                         bus_dmamem_free(sc->stats_tag, sc->stats_block,
2062                                         sc->stats_map);
2063                 }
2064                 bus_dma_tag_destroy(sc->stats_tag);
2065         }
2066
2067         /* Destroy the CTX DMA stuffs. */
2068         if (sc->ctx_tag != NULL) {
2069                 for (i = 0; i < sc->ctx_pages; i++) {
2070                         if (sc->ctx_block[i] != NULL) {
2071                                 bus_dmamap_unload(sc->ctx_tag, sc->ctx_map[i]);
2072                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2073                                                 sc->ctx_map[i]);
2074                         }
2075                 }
2076                 bus_dma_tag_destroy(sc->ctx_tag);
2077         }
2078
2079         /* Free TX rings */
2080         if (sc->tx_rings != NULL) {
2081                 for (i = 0; i < sc->tx_ring_cnt; ++i)
2082                         bce_destroy_tx_ring(&sc->tx_rings[i]);
2083                 kfree(sc->tx_rings, M_DEVBUF);
2084         }
2085
2086         /* Free RX rings */
2087         if (sc->rx_rings != NULL) {
2088                 for (i = 0; i < sc->rx_ring_cnt; ++i)
2089                         bce_destroy_rx_ring(&sc->rx_rings[i]);
2090                 kfree(sc->rx_rings, M_DEVBUF);
2091         }
2092
2093         /* Destroy the parent tag */
2094         if (sc->parent_tag != NULL)
2095                 bus_dma_tag_destroy(sc->parent_tag);
2096 }
2097
2098 /****************************************************************************/
2099 /* Get DMA memory from the OS.                                              */
2100 /*                                                                          */
2101 /* Validates that the OS has provided DMA buffers in response to a          */
2102 /* bus_dmamap_load() call and saves the physical address of those buffers.  */
2103 /* When the callback is used the OS will return 0 for the mapping function  */
2104 /* (bus_dmamap_load()) so we use the value of map_arg->maxsegs to pass any  */
2105 /* failures back to the caller.                                             */
2106 /*                                                                          */
2107 /* Returns:                                                                 */
2108 /*   Nothing.                                                               */
2109 /****************************************************************************/
2110 static void
2111 bce_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2112 {
2113         bus_addr_t *busaddr = arg;
2114
2115         /* Check for an error and signal the caller that an error occurred. */
2116         if (error)
2117                 return;
2118
2119         KASSERT(nseg == 1, ("only one segment is allowed"));
2120         *busaddr = segs->ds_addr;
2121 }
2122
2123 static int
2124 bce_create_tx_ring(struct bce_tx_ring *txr)
2125 {
2126         int pages, rc, i;
2127
2128         lwkt_serialize_init(&txr->tx_serialize);
2129         txr->tx_wreg = bce_tx_wreg;
2130
2131         pages = device_getenv_int(txr->sc->bce_dev, "tx_pages", bce_tx_pages);
2132         if (pages <= 0 || pages > TX_PAGES_MAX || !powerof2(pages)) {
2133                 device_printf(txr->sc->bce_dev, "invalid # of TX pages\n");
2134                 pages = TX_PAGES_DEFAULT;
2135         }
2136         txr->tx_pages = pages;
2137
2138         txr->tx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * txr->tx_pages,
2139             M_DEVBUF, M_WAITOK | M_ZERO);
2140         txr->tx_bd_chain = kmalloc(sizeof(struct tx_bd *) * txr->tx_pages,
2141             M_DEVBUF, M_WAITOK | M_ZERO);
2142         txr->tx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * txr->tx_pages,
2143             M_DEVBUF, M_WAITOK | M_ZERO);
2144
2145         txr->tx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_TX_BD(txr),
2146             M_DEVBUF, M_WAITOK | M_ZERO);
2147         txr->tx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_TX_BD(txr),
2148             M_DEVBUF, M_WAITOK | M_ZERO);
2149
2150         /*
2151          * Create a DMA tag for the TX buffer descriptor chain,
2152          * allocate and clear the  memory, and fetch the
2153          * physical address of the block.
2154          */
2155         rc = bus_dma_tag_create(txr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2156             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2157             BCE_TX_CHAIN_PAGE_SZ, 1, BCE_TX_CHAIN_PAGE_SZ,
2158             0, &txr->tx_bd_chain_tag);
2159         if (rc != 0) {
2160                 device_printf(txr->sc->bce_dev, "Could not allocate "
2161                     "TX descriptor chain DMA tag!\n");
2162                 return rc;
2163         }
2164
2165         for (i = 0; i < txr->tx_pages; i++) {
2166                 bus_addr_t busaddr;
2167
2168                 rc = bus_dmamem_alloc(txr->tx_bd_chain_tag,
2169                     (void **)&txr->tx_bd_chain[i],
2170                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2171                     &txr->tx_bd_chain_map[i]);
2172                 if (rc != 0) {
2173                         device_printf(txr->sc->bce_dev,
2174                             "Could not allocate %dth TX descriptor "
2175                             "chain DMA memory!\n", i);
2176                         return rc;
2177                 }
2178
2179                 rc = bus_dmamap_load(txr->tx_bd_chain_tag,
2180                     txr->tx_bd_chain_map[i],
2181                     txr->tx_bd_chain[i],
2182                     BCE_TX_CHAIN_PAGE_SZ,
2183                     bce_dma_map_addr, &busaddr,
2184                     BUS_DMA_WAITOK);
2185                 if (rc != 0) {
2186                         if (rc == EINPROGRESS) {
2187                                 panic("%s coherent memory loading "
2188                                     "is still in progress!",
2189                                     txr->sc->arpcom.ac_if.if_xname);
2190                         }
2191                         device_printf(txr->sc->bce_dev, "Could not map %dth "
2192                             "TX descriptor chain DMA memory!\n", i);
2193                         bus_dmamem_free(txr->tx_bd_chain_tag,
2194                             txr->tx_bd_chain[i],
2195                             txr->tx_bd_chain_map[i]);
2196                         txr->tx_bd_chain[i] = NULL;
2197                         return rc;
2198                 }
2199
2200                 txr->tx_bd_chain_paddr[i] = busaddr;
2201         }
2202
2203         /* Create a DMA tag for TX mbufs. */
2204         rc = bus_dma_tag_create(txr->sc->parent_tag, 1, 0,
2205             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2206             IP_MAXPACKET + sizeof(struct ether_vlan_header),
2207             BCE_MAX_SEGMENTS, PAGE_SIZE,
2208             BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2209             &txr->tx_mbuf_tag);
2210         if (rc != 0) {
2211                 device_printf(txr->sc->bce_dev,
2212                     "Could not allocate TX mbuf DMA tag!\n");
2213                 return rc;
2214         }
2215
2216         /* Create DMA maps for the TX mbufs clusters. */
2217         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
2218                 rc = bus_dmamap_create(txr->tx_mbuf_tag,
2219                     BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
2220                     &txr->tx_mbuf_map[i]);
2221                 if (rc != 0) {
2222                         int j;
2223
2224                         for (j = 0; j < i; ++j) {
2225                                 bus_dmamap_destroy(txr->tx_mbuf_tag,
2226                                     txr->tx_mbuf_map[i]);
2227                         }
2228                         bus_dma_tag_destroy(txr->tx_mbuf_tag);
2229                         txr->tx_mbuf_tag = NULL;
2230
2231                         device_printf(txr->sc->bce_dev, "Unable to create "
2232                             "%dth TX mbuf DMA map!\n", i);
2233                         return rc;
2234                 }
2235         }
2236         return 0;
2237 }
2238
2239 static int
2240 bce_create_rx_ring(struct bce_rx_ring *rxr)
2241 {
2242         int pages, rc, i;
2243
2244         lwkt_serialize_init(&rxr->rx_serialize);
2245
2246         pages = device_getenv_int(rxr->sc->bce_dev, "rx_pages", bce_rx_pages);
2247         if (pages <= 0 || pages > RX_PAGES_MAX || !powerof2(pages)) {
2248                 device_printf(rxr->sc->bce_dev, "invalid # of RX pages\n");
2249                 pages = RX_PAGES_DEFAULT;
2250         }
2251         rxr->rx_pages = pages;
2252
2253         rxr->rx_bd_chain_map = kmalloc(sizeof(bus_dmamap_t) * rxr->rx_pages,
2254             M_DEVBUF, M_WAITOK | M_ZERO);
2255         rxr->rx_bd_chain = kmalloc(sizeof(struct rx_bd *) * rxr->rx_pages,
2256             M_DEVBUF, M_WAITOK | M_ZERO);
2257         rxr->rx_bd_chain_paddr = kmalloc(sizeof(bus_addr_t) * rxr->rx_pages,
2258             M_DEVBUF, M_WAITOK | M_ZERO);
2259
2260         rxr->rx_mbuf_map = kmalloc(sizeof(bus_dmamap_t) * TOTAL_RX_BD(rxr),
2261             M_DEVBUF, M_WAITOK | M_ZERO);
2262         rxr->rx_mbuf_ptr = kmalloc(sizeof(struct mbuf *) * TOTAL_RX_BD(rxr),
2263             M_DEVBUF, M_WAITOK | M_ZERO);
2264         rxr->rx_mbuf_paddr = kmalloc(sizeof(bus_addr_t) * TOTAL_RX_BD(rxr),
2265             M_DEVBUF, M_WAITOK | M_ZERO);
2266
2267         /*
2268          * Create a DMA tag for the RX buffer descriptor chain,
2269          * allocate and clear the  memory, and fetch the physical
2270          * address of the blocks.
2271          */
2272         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCM_PAGE_SIZE, 0,
2273             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2274             BCE_RX_CHAIN_PAGE_SZ, 1, BCE_RX_CHAIN_PAGE_SZ,
2275             0, &rxr->rx_bd_chain_tag);
2276         if (rc != 0) {
2277                 device_printf(rxr->sc->bce_dev, "Could not allocate "
2278                     "RX descriptor chain DMA tag!\n");
2279                 return rc;
2280         }
2281
2282         for (i = 0; i < rxr->rx_pages; i++) {
2283                 bus_addr_t busaddr;
2284
2285                 rc = bus_dmamem_alloc(rxr->rx_bd_chain_tag,
2286                     (void **)&rxr->rx_bd_chain[i],
2287                     BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT,
2288                     &rxr->rx_bd_chain_map[i]);
2289                 if (rc != 0) {
2290                         device_printf(rxr->sc->bce_dev,
2291                             "Could not allocate %dth RX descriptor "
2292                             "chain DMA memory!\n", i);
2293                         return rc;
2294                 }
2295
2296                 rc = bus_dmamap_load(rxr->rx_bd_chain_tag,
2297                     rxr->rx_bd_chain_map[i],
2298                     rxr->rx_bd_chain[i],
2299                     BCE_RX_CHAIN_PAGE_SZ,
2300                     bce_dma_map_addr, &busaddr,
2301                     BUS_DMA_WAITOK);
2302                 if (rc != 0) {
2303                         if (rc == EINPROGRESS) {
2304                                 panic("%s coherent memory loading "
2305                                     "is still in progress!",
2306                                     rxr->sc->arpcom.ac_if.if_xname);
2307                         }
2308                         device_printf(rxr->sc->bce_dev,
2309                             "Could not map %dth RX descriptor "
2310                             "chain DMA memory!\n", i);
2311                         bus_dmamem_free(rxr->rx_bd_chain_tag,
2312                             rxr->rx_bd_chain[i],
2313                             rxr->rx_bd_chain_map[i]);
2314                         rxr->rx_bd_chain[i] = NULL;
2315                         return rc;
2316                 }
2317
2318                 rxr->rx_bd_chain_paddr[i] = busaddr;
2319         }
2320
2321         /* Create a DMA tag for RX mbufs. */
2322         rc = bus_dma_tag_create(rxr->sc->parent_tag, BCE_DMA_RX_ALIGN, 0,
2323             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
2324             MCLBYTES, 1, MCLBYTES,
2325             BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | BUS_DMA_WAITOK,
2326             &rxr->rx_mbuf_tag);
2327         if (rc != 0) {
2328                 device_printf(rxr->sc->bce_dev,
2329                     "Could not allocate RX mbuf DMA tag!\n");
2330                 return rc;
2331         }
2332
2333         /* Create tmp DMA map for RX mbuf clusters. */
2334         rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2335             &rxr->rx_mbuf_tmpmap);
2336         if (rc != 0) {
2337                 bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2338                 rxr->rx_mbuf_tag = NULL;
2339
2340                 device_printf(rxr->sc->bce_dev,
2341                     "Could not create RX mbuf tmp DMA map!\n");
2342                 return rc;
2343         }
2344
2345         /* Create DMA maps for the RX mbuf clusters. */
2346         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
2347                 rc = bus_dmamap_create(rxr->rx_mbuf_tag, BUS_DMA_WAITOK,
2348                     &rxr->rx_mbuf_map[i]);
2349                 if (rc != 0) {
2350                         int j;
2351
2352                         for (j = 0; j < i; ++j) {
2353                                 bus_dmamap_destroy(rxr->rx_mbuf_tag,
2354                                     rxr->rx_mbuf_map[j]);
2355                         }
2356                         bus_dma_tag_destroy(rxr->rx_mbuf_tag);
2357                         rxr->rx_mbuf_tag = NULL;
2358
2359                         device_printf(rxr->sc->bce_dev, "Unable to create "
2360                             "%dth RX mbuf DMA map!\n", i);
2361                         return rc;
2362                 }
2363         }
2364         return 0;
2365 }
2366
2367 /****************************************************************************/
2368 /* Allocate any DMA memory needed by the driver.                            */
2369 /*                                                                          */
2370 /* Allocates DMA memory needed for the various global structures needed by  */
2371 /* hardware.                                                                */
2372 /*                                                                          */
2373 /* Memory alignment requirements:                                           */
2374 /* -----------------+----------+----------+----------+----------+           */
2375 /*  Data Structure  |   5706   |   5708   |   5709   |   5716   |           */
2376 /* -----------------+----------+----------+----------+----------+           */
2377 /* Status Block     | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2378 /* Statistics Block | 8 bytes  | 8 bytes  | 16 bytes | 16 bytes |           */
2379 /* RX Buffers       | 16 bytes | 16 bytes | 16 bytes | 16 bytes |           */
2380 /* PG Buffers       |   none   |   none   |   none   |   none   |           */
2381 /* TX Buffers       |   none   |   none   |   none   |   none   |           */
2382 /* Chain Pages(1)   |   4KiB   |   4KiB   |   4KiB   |   4KiB   |           */
2383 /* Context Pages(1) |   N/A    |   N/A    |   4KiB   |   4KiB   |           */
2384 /* -----------------+----------+----------+----------+----------+           */
2385 /*                                                                          */
2386 /* (1) Must align with CPU page size (BCM_PAGE_SZIE).                       */
2387 /*                                                                          */
2388 /* Returns:                                                                 */
2389 /*   0 for success, positive value for failure.                             */
2390 /****************************************************************************/
2391 static int
2392 bce_dma_alloc(struct bce_softc *sc)
2393 {
2394         struct ifnet *ifp = &sc->arpcom.ac_if;
2395         int i, rc = 0;
2396         bus_addr_t busaddr, max_busaddr;
2397         bus_size_t status_align, stats_align, status_size;
2398
2399         /*
2400          * The embedded PCIe to PCI-X bridge (EPB) 
2401          * in the 5708 cannot address memory above 
2402          * 40 bits (E7_5708CB1_23043 & E6_5708SB1_23043). 
2403          */
2404         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5708)
2405                 max_busaddr = BCE_BUS_SPACE_MAXADDR;
2406         else
2407                 max_busaddr = BUS_SPACE_MAXADDR;
2408
2409         /*
2410          * BCM5709 and BCM5716 uses host memory as cache for context memory.
2411          */
2412         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2413             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2414                 sc->ctx_pages = BCE_CTX_BLK_SZ / BCM_PAGE_SIZE;
2415                 if (sc->ctx_pages == 0)
2416                         sc->ctx_pages = 1;
2417                 if (sc->ctx_pages > BCE_CTX_PAGES) {
2418                         device_printf(sc->bce_dev, "excessive ctx pages %d\n",
2419                             sc->ctx_pages);
2420                         return ENOMEM;
2421                 }
2422                 status_align = 16;
2423                 stats_align = 16;
2424         } else {
2425                 status_align = 8;
2426                 stats_align = 8;
2427         }
2428
2429         /*
2430          * Each MSI-X vector needs a status block; each status block
2431          * consumes 128bytes and is 128bytes aligned.
2432          */
2433         if (sc->rx_ring_cnt > 1) {
2434                 status_size = BCE_MSIX_MAX * BCE_STATUS_BLK_MSIX_ALIGN;
2435                 status_align = BCE_STATUS_BLK_MSIX_ALIGN;
2436         } else {
2437                 status_size = BCE_STATUS_BLK_SZ;
2438         }
2439
2440         /*
2441          * Allocate the parent bus DMA tag appropriate for PCI.
2442          */
2443         rc = bus_dma_tag_create(NULL, 1, BCE_DMA_BOUNDARY,
2444                                 max_busaddr, BUS_SPACE_MAXADDR,
2445                                 NULL, NULL,
2446                                 BUS_SPACE_MAXSIZE_32BIT, 0,
2447                                 BUS_SPACE_MAXSIZE_32BIT,
2448                                 0, &sc->parent_tag);
2449         if (rc != 0) {
2450                 if_printf(ifp, "Could not allocate parent DMA tag!\n");
2451                 return rc;
2452         }
2453
2454         /*
2455          * Allocate status block.
2456          */
2457         sc->status_block = bus_dmamem_coherent_any(sc->parent_tag,
2458                                 status_align, status_size,
2459                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2460                                 &sc->status_tag, &sc->status_map,
2461                                 &sc->status_block_paddr);
2462         if (sc->status_block == NULL) {
2463                 if_printf(ifp, "Could not allocate status block!\n");
2464                 return ENOMEM;
2465         }
2466
2467         /*
2468          * Allocate statistics block.
2469          */
2470         sc->stats_block = bus_dmamem_coherent_any(sc->parent_tag,
2471                                 stats_align, BCE_STATS_BLK_SZ,
2472                                 BUS_DMA_WAITOK | BUS_DMA_ZERO,
2473                                 &sc->stats_tag, &sc->stats_map,
2474                                 &sc->stats_block_paddr);
2475         if (sc->stats_block == NULL) {
2476                 if_printf(ifp, "Could not allocate statistics block!\n");
2477                 return ENOMEM;
2478         }
2479
2480         /*
2481          * Allocate context block, if needed
2482          */
2483         if (sc->ctx_pages != 0) {
2484                 rc = bus_dma_tag_create(sc->parent_tag, BCM_PAGE_SIZE, 0,
2485                                         BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2486                                         NULL, NULL,
2487                                         BCM_PAGE_SIZE, 1, BCM_PAGE_SIZE,
2488                                         0, &sc->ctx_tag);
2489                 if (rc != 0) {
2490                         if_printf(ifp, "Could not allocate "
2491                                   "context block DMA tag!\n");
2492                         return rc;
2493                 }
2494
2495                 for (i = 0; i < sc->ctx_pages; i++) {
2496                         rc = bus_dmamem_alloc(sc->ctx_tag,
2497                                               (void **)&sc->ctx_block[i],
2498                                               BUS_DMA_WAITOK | BUS_DMA_ZERO |
2499                                               BUS_DMA_COHERENT,
2500                                               &sc->ctx_map[i]);
2501                         if (rc != 0) {
2502                                 if_printf(ifp, "Could not allocate %dth context "
2503                                           "DMA memory!\n", i);
2504                                 return rc;
2505                         }
2506
2507                         rc = bus_dmamap_load(sc->ctx_tag, sc->ctx_map[i],
2508                                              sc->ctx_block[i], BCM_PAGE_SIZE,
2509                                              bce_dma_map_addr, &busaddr,
2510                                              BUS_DMA_WAITOK);
2511                         if (rc != 0) {
2512                                 if (rc == EINPROGRESS) {
2513                                         panic("%s coherent memory loading "
2514                                               "is still in progress!", ifp->if_xname);
2515                                 }
2516                                 if_printf(ifp, "Could not map %dth context "
2517                                           "DMA memory!\n", i);
2518                                 bus_dmamem_free(sc->ctx_tag, sc->ctx_block[i],
2519                                                 sc->ctx_map[i]);
2520                                 sc->ctx_block[i] = NULL;
2521                                 return rc;
2522                         }
2523                         sc->ctx_paddr[i] = busaddr;
2524                 }
2525         }
2526
2527         sc->tx_rings = kmalloc_cachealign(
2528             sizeof(struct bce_tx_ring) * sc->tx_ring_cnt, M_DEVBUF,
2529             M_WAITOK | M_ZERO);
2530         for (i = 0; i < sc->tx_ring_cnt; ++i) {
2531                 sc->tx_rings[i].sc = sc;
2532                 if (i == 0) {
2533                         sc->tx_rings[i].tx_cid = TX_CID;
2534                         sc->tx_rings[i].tx_hw_cons =
2535                             &sc->status_block->status_tx_quick_consumer_index0;
2536                 } else {
2537                         struct status_block_msix *sblk =
2538                             (struct status_block_msix *)
2539                             (((uint8_t *)(sc->status_block)) +
2540                              (i * BCE_STATUS_BLK_MSIX_ALIGN));
2541
2542                         sc->tx_rings[i].tx_cid = TX_TSS_CID + i - 1;
2543                         sc->tx_rings[i].tx_hw_cons =
2544                             &sblk->status_tx_quick_consumer_index;
2545                 }
2546
2547                 rc = bce_create_tx_ring(&sc->tx_rings[i]);
2548                 if (rc != 0) {
2549                         device_printf(sc->bce_dev,
2550                             "can't create %dth tx ring\n", i);
2551                         return rc;
2552                 }
2553         }
2554
2555         sc->rx_rings = kmalloc_cachealign(
2556             sizeof(struct bce_rx_ring) * sc->rx_ring_cnt, M_DEVBUF,
2557             M_WAITOK | M_ZERO);
2558         for (i = 0; i < sc->rx_ring_cnt; ++i) {
2559                 sc->rx_rings[i].sc = sc;
2560                 sc->rx_rings[i].idx = i;
2561                 if (i == 0) {
2562                         sc->rx_rings[i].rx_cid = RX_CID;
2563                         sc->rx_rings[i].rx_hw_cons =
2564                             &sc->status_block->status_rx_quick_consumer_index0;
2565                         sc->rx_rings[i].hw_status_idx =
2566                             &sc->status_block->status_idx;
2567                 } else {
2568                         struct status_block_msix *sblk =
2569                             (struct status_block_msix *)
2570                             (((uint8_t *)(sc->status_block)) +
2571                              (i * BCE_STATUS_BLK_MSIX_ALIGN));
2572
2573                         sc->rx_rings[i].rx_cid = RX_RSS_CID + i - 1;
2574                         sc->rx_rings[i].rx_hw_cons =
2575                             &sblk->status_rx_quick_consumer_index;
2576                         sc->rx_rings[i].hw_status_idx = &sblk->status_idx;
2577                 }
2578
2579                 rc = bce_create_rx_ring(&sc->rx_rings[i]);
2580                 if (rc != 0) {
2581                         device_printf(sc->bce_dev,
2582                             "can't create %dth rx ring\n", i);
2583                         return rc;
2584                 }
2585         }
2586
2587         return 0;
2588 }
2589
2590 /****************************************************************************/
2591 /* Firmware synchronization.                                                */
2592 /*                                                                          */
2593 /* Before performing certain events such as a chip reset, synchronize with  */
2594 /* the firmware first.                                                      */
2595 /*                                                                          */
2596 /* Returns:                                                                 */
2597 /*   0 for success, positive value for failure.                             */
2598 /****************************************************************************/
2599 static int
2600 bce_fw_sync(struct bce_softc *sc, uint32_t msg_data)
2601 {
2602         int i, rc = 0;
2603         uint32_t val;
2604
2605         /* Don't waste any time if we've timed out before. */
2606         if (sc->bce_fw_timed_out)
2607                 return EBUSY;
2608
2609         /* Increment the message sequence number. */
2610         sc->bce_fw_wr_seq++;
2611         msg_data |= sc->bce_fw_wr_seq;
2612
2613         /* Send the message to the bootcode driver mailbox. */
2614         bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2615
2616         /* Wait for the bootcode to acknowledge the message. */
2617         for (i = 0; i < FW_ACK_TIME_OUT_MS; i++) {
2618                 /* Check for a response in the bootcode firmware mailbox. */
2619                 val = bce_shmem_rd(sc, BCE_FW_MB);
2620                 if ((val & BCE_FW_MSG_ACK) == (msg_data & BCE_DRV_MSG_SEQ))
2621                         break;
2622                 DELAY(1000);
2623         }
2624
2625         /* If we've timed out, tell the bootcode that we've stopped waiting. */
2626         if ((val & BCE_FW_MSG_ACK) != (msg_data & BCE_DRV_MSG_SEQ) &&
2627             (msg_data & BCE_DRV_MSG_DATA) != BCE_DRV_MSG_DATA_WAIT0) {
2628                 if_printf(&sc->arpcom.ac_if,
2629                           "Firmware synchronization timeout! "
2630                           "msg_data = 0x%08X\n", msg_data);
2631
2632                 msg_data &= ~BCE_DRV_MSG_CODE;
2633                 msg_data |= BCE_DRV_MSG_CODE_FW_TIMEOUT;
2634
2635                 bce_shmem_wr(sc, BCE_DRV_MB, msg_data);
2636
2637                 sc->bce_fw_timed_out = 1;
2638                 rc = EBUSY;
2639         }
2640         return rc;
2641 }
2642
2643 /****************************************************************************/
2644 /* Load Receive Virtual 2 Physical (RV2P) processor firmware.               */
2645 /*                                                                          */
2646 /* Returns:                                                                 */
2647 /*   Nothing.                                                               */
2648 /****************************************************************************/
2649 static void
2650 bce_load_rv2p_fw(struct bce_softc *sc, uint32_t *rv2p_code,
2651                  uint32_t rv2p_code_len, uint32_t rv2p_proc)
2652 {
2653         int i;
2654         uint32_t val;
2655
2656         for (i = 0; i < rv2p_code_len; i += 8) {
2657                 REG_WR(sc, BCE_RV2P_INSTR_HIGH, *rv2p_code);
2658                 rv2p_code++;
2659                 REG_WR(sc, BCE_RV2P_INSTR_LOW, *rv2p_code);
2660                 rv2p_code++;
2661
2662                 if (rv2p_proc == RV2P_PROC1) {
2663                         val = (i / 8) | BCE_RV2P_PROC1_ADDR_CMD_RDWR;
2664                         REG_WR(sc, BCE_RV2P_PROC1_ADDR_CMD, val);
2665                 } else {
2666                         val = (i / 8) | BCE_RV2P_PROC2_ADDR_CMD_RDWR;
2667                         REG_WR(sc, BCE_RV2P_PROC2_ADDR_CMD, val);
2668                 }
2669         }
2670
2671         /* Reset the processor, un-stall is done later. */
2672         if (rv2p_proc == RV2P_PROC1)
2673                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC1_RESET);
2674         else
2675                 REG_WR(sc, BCE_RV2P_COMMAND, BCE_RV2P_COMMAND_PROC2_RESET);
2676 }
2677
2678 /****************************************************************************/
2679 /* Load RISC processor firmware.                                            */
2680 /*                                                                          */
2681 /* Loads firmware from the file if_bcefw.h into the scratchpad memory       */
2682 /* associated with a particular processor.                                  */
2683 /*                                                                          */
2684 /* Returns:                                                                 */
2685 /*   Nothing.                                                               */
2686 /****************************************************************************/
2687 static void
2688 bce_load_cpu_fw(struct bce_softc *sc, struct cpu_reg *cpu_reg,
2689                 struct fw_info *fw)
2690 {
2691         uint32_t offset;
2692         int j;
2693
2694         bce_halt_cpu(sc, cpu_reg);
2695
2696         /* Load the Text area. */
2697         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2698         if (fw->text) {
2699                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4)
2700                         REG_WR_IND(sc, offset, fw->text[j]);
2701         }
2702
2703         /* Load the Data area. */
2704         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2705         if (fw->data) {
2706                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4)
2707                         REG_WR_IND(sc, offset, fw->data[j]);
2708         }
2709
2710         /* Load the SBSS area. */
2711         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2712         if (fw->sbss) {
2713                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4)
2714                         REG_WR_IND(sc, offset, fw->sbss[j]);
2715         }
2716
2717         /* Load the BSS area. */
2718         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2719         if (fw->bss) {
2720                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4)
2721                         REG_WR_IND(sc, offset, fw->bss[j]);
2722         }
2723
2724         /* Load the Read-Only area. */
2725         offset = cpu_reg->spad_base +
2726                 (fw->rodata_addr - cpu_reg->mips_view_base);
2727         if (fw->rodata) {
2728                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4)
2729                         REG_WR_IND(sc, offset, fw->rodata[j]);
2730         }
2731
2732         /* Clear the pre-fetch instruction and set the FW start address. */
2733         REG_WR_IND(sc, cpu_reg->inst, 0);
2734         REG_WR_IND(sc, cpu_reg->pc, fw->start_addr);
2735 }
2736
2737 /****************************************************************************/
2738 /* Starts the RISC processor.                                               */
2739 /*                                                                          */
2740 /* Assumes the CPU starting address has already been set.                   */
2741 /*                                                                          */
2742 /* Returns:                                                                 */
2743 /*   Nothing.                                                               */
2744 /****************************************************************************/
2745 static void
2746 bce_start_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2747 {
2748         uint32_t val;
2749
2750         /* Start the CPU. */
2751         val = REG_RD_IND(sc, cpu_reg->mode);
2752         val &= ~cpu_reg->mode_value_halt;
2753         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2754         REG_WR_IND(sc, cpu_reg->mode, val);
2755 }
2756
2757 /****************************************************************************/
2758 /* Halts the RISC processor.                                                */
2759 /*                                                                          */
2760 /* Returns:                                                                 */
2761 /*   Nothing.                                                               */
2762 /****************************************************************************/
2763 static void
2764 bce_halt_cpu(struct bce_softc *sc, struct cpu_reg *cpu_reg)
2765 {
2766         uint32_t val;
2767
2768         /* Halt the CPU. */
2769         val = REG_RD_IND(sc, cpu_reg->mode);
2770         val |= cpu_reg->mode_value_halt;
2771         REG_WR_IND(sc, cpu_reg->mode, val);
2772         REG_WR_IND(sc, cpu_reg->state, cpu_reg->state_value_clear);
2773 }
2774
2775 /****************************************************************************/
2776 /* Start the RX CPU.                                                        */
2777 /*                                                                          */
2778 /* Returns:                                                                 */
2779 /*   Nothing.                                                               */
2780 /****************************************************************************/
2781 static void
2782 bce_start_rxp_cpu(struct bce_softc *sc)
2783 {
2784         struct cpu_reg cpu_reg;
2785
2786         cpu_reg.mode = BCE_RXP_CPU_MODE;
2787         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2788         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2789         cpu_reg.state = BCE_RXP_CPU_STATE;
2790         cpu_reg.state_value_clear = 0xffffff;
2791         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2792         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2793         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2794         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2795         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2796         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2797         cpu_reg.mips_view_base = 0x8000000;
2798
2799         bce_start_cpu(sc, &cpu_reg);
2800 }
2801
2802 /****************************************************************************/
2803 /* Initialize the RX CPU.                                                   */
2804 /*                                                                          */
2805 /* Returns:                                                                 */
2806 /*   Nothing.                                                               */
2807 /****************************************************************************/
2808 static void
2809 bce_init_rxp_cpu(struct bce_softc *sc)
2810 {
2811         struct cpu_reg cpu_reg;
2812         struct fw_info fw;
2813
2814         cpu_reg.mode = BCE_RXP_CPU_MODE;
2815         cpu_reg.mode_value_halt = BCE_RXP_CPU_MODE_SOFT_HALT;
2816         cpu_reg.mode_value_sstep = BCE_RXP_CPU_MODE_STEP_ENA;
2817         cpu_reg.state = BCE_RXP_CPU_STATE;
2818         cpu_reg.state_value_clear = 0xffffff;
2819         cpu_reg.gpr0 = BCE_RXP_CPU_REG_FILE;
2820         cpu_reg.evmask = BCE_RXP_CPU_EVENT_MASK;
2821         cpu_reg.pc = BCE_RXP_CPU_PROGRAM_COUNTER;
2822         cpu_reg.inst = BCE_RXP_CPU_INSTRUCTION;
2823         cpu_reg.bp = BCE_RXP_CPU_HW_BREAKPOINT;
2824         cpu_reg.spad_base = BCE_RXP_SCRATCH;
2825         cpu_reg.mips_view_base = 0x8000000;
2826
2827         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2828             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2829                 fw.ver_major = bce_RXP_b09FwReleaseMajor;
2830                 fw.ver_minor = bce_RXP_b09FwReleaseMinor;
2831                 fw.ver_fix = bce_RXP_b09FwReleaseFix;
2832                 fw.start_addr = bce_RXP_b09FwStartAddr;
2833
2834                 fw.text_addr = bce_RXP_b09FwTextAddr;
2835                 fw.text_len = bce_RXP_b09FwTextLen;
2836                 fw.text_index = 0;
2837                 fw.text = bce_RXP_b09FwText;
2838
2839                 fw.data_addr = bce_RXP_b09FwDataAddr;
2840                 fw.data_len = bce_RXP_b09FwDataLen;
2841                 fw.data_index = 0;
2842                 fw.data = bce_RXP_b09FwData;
2843
2844                 fw.sbss_addr = bce_RXP_b09FwSbssAddr;
2845                 fw.sbss_len = bce_RXP_b09FwSbssLen;
2846                 fw.sbss_index = 0;
2847                 fw.sbss = bce_RXP_b09FwSbss;
2848
2849                 fw.bss_addr = bce_RXP_b09FwBssAddr;
2850                 fw.bss_len = bce_RXP_b09FwBssLen;
2851                 fw.bss_index = 0;
2852                 fw.bss = bce_RXP_b09FwBss;
2853
2854                 fw.rodata_addr = bce_RXP_b09FwRodataAddr;
2855                 fw.rodata_len = bce_RXP_b09FwRodataLen;
2856                 fw.rodata_index = 0;
2857                 fw.rodata = bce_RXP_b09FwRodata;
2858         } else {
2859                 fw.ver_major = bce_RXP_b06FwReleaseMajor;
2860                 fw.ver_minor = bce_RXP_b06FwReleaseMinor;
2861                 fw.ver_fix = bce_RXP_b06FwReleaseFix;
2862                 fw.start_addr = bce_RXP_b06FwStartAddr;
2863
2864                 fw.text_addr = bce_RXP_b06FwTextAddr;
2865                 fw.text_len = bce_RXP_b06FwTextLen;
2866                 fw.text_index = 0;
2867                 fw.text = bce_RXP_b06FwText;
2868
2869                 fw.data_addr = bce_RXP_b06FwDataAddr;
2870                 fw.data_len = bce_RXP_b06FwDataLen;
2871                 fw.data_index = 0;
2872                 fw.data = bce_RXP_b06FwData;
2873
2874                 fw.sbss_addr = bce_RXP_b06FwSbssAddr;
2875                 fw.sbss_len = bce_RXP_b06FwSbssLen;
2876                 fw.sbss_index = 0;
2877                 fw.sbss = bce_RXP_b06FwSbss;
2878
2879                 fw.bss_addr = bce_RXP_b06FwBssAddr;
2880                 fw.bss_len = bce_RXP_b06FwBssLen;
2881                 fw.bss_index = 0;
2882                 fw.bss = bce_RXP_b06FwBss;
2883
2884                 fw.rodata_addr = bce_RXP_b06FwRodataAddr;
2885                 fw.rodata_len = bce_RXP_b06FwRodataLen;
2886                 fw.rodata_index = 0;
2887                 fw.rodata = bce_RXP_b06FwRodata;
2888         }
2889
2890         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2891         /* Delay RXP start until initialization is complete. */
2892 }
2893
2894 /****************************************************************************/
2895 /* Initialize the TX CPU.                                                   */
2896 /*                                                                          */
2897 /* Returns:                                                                 */
2898 /*   Nothing.                                                               */
2899 /****************************************************************************/
2900 static void
2901 bce_init_txp_cpu(struct bce_softc *sc)
2902 {
2903         struct cpu_reg cpu_reg;
2904         struct fw_info fw;
2905
2906         cpu_reg.mode = BCE_TXP_CPU_MODE;
2907         cpu_reg.mode_value_halt = BCE_TXP_CPU_MODE_SOFT_HALT;
2908         cpu_reg.mode_value_sstep = BCE_TXP_CPU_MODE_STEP_ENA;
2909         cpu_reg.state = BCE_TXP_CPU_STATE;
2910         cpu_reg.state_value_clear = 0xffffff;
2911         cpu_reg.gpr0 = BCE_TXP_CPU_REG_FILE;
2912         cpu_reg.evmask = BCE_TXP_CPU_EVENT_MASK;
2913         cpu_reg.pc = BCE_TXP_CPU_PROGRAM_COUNTER;
2914         cpu_reg.inst = BCE_TXP_CPU_INSTRUCTION;
2915         cpu_reg.bp = BCE_TXP_CPU_HW_BREAKPOINT;
2916         cpu_reg.spad_base = BCE_TXP_SCRATCH;
2917         cpu_reg.mips_view_base = 0x8000000;
2918
2919         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
2920             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
2921                 fw.ver_major = bce_TXP_b09FwReleaseMajor;
2922                 fw.ver_minor = bce_TXP_b09FwReleaseMinor;
2923                 fw.ver_fix = bce_TXP_b09FwReleaseFix;
2924                 fw.start_addr = bce_TXP_b09FwStartAddr;
2925
2926                 fw.text_addr = bce_TXP_b09FwTextAddr;
2927                 fw.text_len = bce_TXP_b09FwTextLen;
2928                 fw.text_index = 0;
2929                 fw.text = bce_TXP_b09FwText;
2930
2931                 fw.data_addr = bce_TXP_b09FwDataAddr;
2932                 fw.data_len = bce_TXP_b09FwDataLen;
2933                 fw.data_index = 0;
2934                 fw.data = bce_TXP_b09FwData;
2935
2936                 fw.sbss_addr = bce_TXP_b09FwSbssAddr;
2937                 fw.sbss_len = bce_TXP_b09FwSbssLen;
2938                 fw.sbss_index = 0;
2939                 fw.sbss = bce_TXP_b09FwSbss;
2940
2941                 fw.bss_addr = bce_TXP_b09FwBssAddr;
2942                 fw.bss_len = bce_TXP_b09FwBssLen;
2943                 fw.bss_index = 0;
2944                 fw.bss = bce_TXP_b09FwBss;
2945
2946                 fw.rodata_addr = bce_TXP_b09FwRodataAddr;
2947                 fw.rodata_len = bce_TXP_b09FwRodataLen;
2948                 fw.rodata_index = 0;
2949                 fw.rodata = bce_TXP_b09FwRodata;
2950         } else {
2951                 fw.ver_major = bce_TXP_b06FwReleaseMajor;
2952                 fw.ver_minor = bce_TXP_b06FwReleaseMinor;
2953                 fw.ver_fix = bce_TXP_b06FwReleaseFix;
2954                 fw.start_addr = bce_TXP_b06FwStartAddr;
2955
2956                 fw.text_addr = bce_TXP_b06FwTextAddr;
2957                 fw.text_len = bce_TXP_b06FwTextLen;
2958                 fw.text_index = 0;
2959                 fw.text = bce_TXP_b06FwText;
2960
2961                 fw.data_addr = bce_TXP_b06FwDataAddr;
2962                 fw.data_len = bce_TXP_b06FwDataLen;
2963                 fw.data_index = 0;
2964                 fw.data = bce_TXP_b06FwData;
2965
2966                 fw.sbss_addr = bce_TXP_b06FwSbssAddr;
2967                 fw.sbss_len = bce_TXP_b06FwSbssLen;
2968                 fw.sbss_index = 0;
2969                 fw.sbss = bce_TXP_b06FwSbss;
2970
2971                 fw.bss_addr = bce_TXP_b06FwBssAddr;
2972                 fw.bss_len = bce_TXP_b06FwBssLen;
2973                 fw.bss_index = 0;
2974                 fw.bss = bce_TXP_b06FwBss;
2975
2976                 fw.rodata_addr = bce_TXP_b06FwRodataAddr;
2977                 fw.rodata_len = bce_TXP_b06FwRodataLen;
2978                 fw.rodata_index = 0;
2979                 fw.rodata = bce_TXP_b06FwRodata;
2980         }
2981
2982         bce_load_cpu_fw(sc, &cpu_reg, &fw);
2983         bce_start_cpu(sc, &cpu_reg);
2984 }
2985
2986 /****************************************************************************/
2987 /* Initialize the TPAT CPU.                                                 */
2988 /*                                                                          */
2989 /* Returns:                                                                 */
2990 /*   Nothing.                                                               */
2991 /****************************************************************************/
2992 static void
2993 bce_init_tpat_cpu(struct bce_softc *sc)
2994 {
2995         struct cpu_reg cpu_reg;
2996         struct fw_info fw;
2997
2998         cpu_reg.mode = BCE_TPAT_CPU_MODE;
2999         cpu_reg.mode_value_halt = BCE_TPAT_CPU_MODE_SOFT_HALT;
3000         cpu_reg.mode_value_sstep = BCE_TPAT_CPU_MODE_STEP_ENA;
3001         cpu_reg.state = BCE_TPAT_CPU_STATE;
3002         cpu_reg.state_value_clear = 0xffffff;
3003         cpu_reg.gpr0 = BCE_TPAT_CPU_REG_FILE;
3004         cpu_reg.evmask = BCE_TPAT_CPU_EVENT_MASK;
3005         cpu_reg.pc = BCE_TPAT_CPU_PROGRAM_COUNTER;
3006         cpu_reg.inst = BCE_TPAT_CPU_INSTRUCTION;
3007         cpu_reg.bp = BCE_TPAT_CPU_HW_BREAKPOINT;
3008         cpu_reg.spad_base = BCE_TPAT_SCRATCH;
3009         cpu_reg.mips_view_base = 0x8000000;
3010
3011         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3012             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3013                 fw.ver_major = bce_TPAT_b09FwReleaseMajor;
3014                 fw.ver_minor = bce_TPAT_b09FwReleaseMinor;
3015                 fw.ver_fix = bce_TPAT_b09FwReleaseFix;
3016                 fw.start_addr = bce_TPAT_b09FwStartAddr;
3017
3018                 fw.text_addr = bce_TPAT_b09FwTextAddr;
3019                 fw.text_len = bce_TPAT_b09FwTextLen;
3020                 fw.text_index = 0;
3021                 fw.text = bce_TPAT_b09FwText;
3022
3023                 fw.data_addr = bce_TPAT_b09FwDataAddr;
3024                 fw.data_len = bce_TPAT_b09FwDataLen;
3025                 fw.data_index = 0;
3026                 fw.data = bce_TPAT_b09FwData;
3027
3028                 fw.sbss_addr = bce_TPAT_b09FwSbssAddr;
3029                 fw.sbss_len = bce_TPAT_b09FwSbssLen;
3030                 fw.sbss_index = 0;
3031                 fw.sbss = bce_TPAT_b09FwSbss;
3032
3033                 fw.bss_addr = bce_TPAT_b09FwBssAddr;
3034                 fw.bss_len = bce_TPAT_b09FwBssLen;
3035                 fw.bss_index = 0;
3036                 fw.bss = bce_TPAT_b09FwBss;
3037
3038                 fw.rodata_addr = bce_TPAT_b09FwRodataAddr;
3039                 fw.rodata_len = bce_TPAT_b09FwRodataLen;
3040                 fw.rodata_index = 0;
3041                 fw.rodata = bce_TPAT_b09FwRodata;
3042         } else {
3043                 fw.ver_major = bce_TPAT_b06FwReleaseMajor;
3044                 fw.ver_minor = bce_TPAT_b06FwReleaseMinor;
3045                 fw.ver_fix = bce_TPAT_b06FwReleaseFix;
3046                 fw.start_addr = bce_TPAT_b06FwStartAddr;
3047
3048                 fw.text_addr = bce_TPAT_b06FwTextAddr;
3049                 fw.text_len = bce_TPAT_b06FwTextLen;
3050                 fw.text_index = 0;
3051                 fw.text = bce_TPAT_b06FwText;
3052
3053                 fw.data_addr = bce_TPAT_b06FwDataAddr;
3054                 fw.data_len = bce_TPAT_b06FwDataLen;
3055                 fw.data_index = 0;
3056                 fw.data = bce_TPAT_b06FwData;
3057
3058                 fw.sbss_addr = bce_TPAT_b06FwSbssAddr;
3059                 fw.sbss_len = bce_TPAT_b06FwSbssLen;
3060                 fw.sbss_index = 0;
3061                 fw.sbss = bce_TPAT_b06FwSbss;
3062
3063                 fw.bss_addr = bce_TPAT_b06FwBssAddr;
3064                 fw.bss_len = bce_TPAT_b06FwBssLen;
3065                 fw.bss_index = 0;
3066                 fw.bss = bce_TPAT_b06FwBss;
3067
3068                 fw.rodata_addr = bce_TPAT_b06FwRodataAddr;
3069                 fw.rodata_len = bce_TPAT_b06FwRodataLen;
3070                 fw.rodata_index = 0;
3071                 fw.rodata = bce_TPAT_b06FwRodata;
3072         }
3073
3074         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3075         bce_start_cpu(sc, &cpu_reg);
3076 }
3077
3078 /****************************************************************************/
3079 /* Initialize the CP CPU.                                                   */
3080 /*                                                                          */
3081 /* Returns:                                                                 */
3082 /*   Nothing.                                                               */
3083 /****************************************************************************/
3084 static void
3085 bce_init_cp_cpu(struct bce_softc *sc)
3086 {
3087         struct cpu_reg cpu_reg;
3088         struct fw_info fw;
3089
3090         cpu_reg.mode = BCE_CP_CPU_MODE;
3091         cpu_reg.mode_value_halt = BCE_CP_CPU_MODE_SOFT_HALT;
3092         cpu_reg.mode_value_sstep = BCE_CP_CPU_MODE_STEP_ENA;
3093         cpu_reg.state = BCE_CP_CPU_STATE;
3094         cpu_reg.state_value_clear = 0xffffff;
3095         cpu_reg.gpr0 = BCE_CP_CPU_REG_FILE;
3096         cpu_reg.evmask = BCE_CP_CPU_EVENT_MASK;
3097         cpu_reg.pc = BCE_CP_CPU_PROGRAM_COUNTER;
3098         cpu_reg.inst = BCE_CP_CPU_INSTRUCTION;
3099         cpu_reg.bp = BCE_CP_CPU_HW_BREAKPOINT;
3100         cpu_reg.spad_base = BCE_CP_SCRATCH;
3101         cpu_reg.mips_view_base = 0x8000000;
3102
3103         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3104             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3105                 fw.ver_major = bce_CP_b09FwReleaseMajor;
3106                 fw.ver_minor = bce_CP_b09FwReleaseMinor;
3107                 fw.ver_fix = bce_CP_b09FwReleaseFix;
3108                 fw.start_addr = bce_CP_b09FwStartAddr;
3109
3110                 fw.text_addr = bce_CP_b09FwTextAddr;
3111                 fw.text_len = bce_CP_b09FwTextLen;
3112                 fw.text_index = 0;
3113                 fw.text = bce_CP_b09FwText;
3114
3115                 fw.data_addr = bce_CP_b09FwDataAddr;
3116                 fw.data_len = bce_CP_b09FwDataLen;
3117                 fw.data_index = 0;
3118                 fw.data = bce_CP_b09FwData;
3119
3120                 fw.sbss_addr = bce_CP_b09FwSbssAddr;
3121                 fw.sbss_len = bce_CP_b09FwSbssLen;
3122                 fw.sbss_index = 0;
3123                 fw.sbss = bce_CP_b09FwSbss;
3124
3125                 fw.bss_addr = bce_CP_b09FwBssAddr;
3126                 fw.bss_len = bce_CP_b09FwBssLen;
3127                 fw.bss_index = 0;
3128                 fw.bss = bce_CP_b09FwBss;
3129
3130                 fw.rodata_addr = bce_CP_b09FwRodataAddr;
3131                 fw.rodata_len = bce_CP_b09FwRodataLen;
3132                 fw.rodata_index = 0;
3133                 fw.rodata = bce_CP_b09FwRodata;
3134         } else {
3135                 fw.ver_major = bce_CP_b06FwReleaseMajor;
3136                 fw.ver_minor = bce_CP_b06FwReleaseMinor;
3137                 fw.ver_fix = bce_CP_b06FwReleaseFix;
3138                 fw.start_addr = bce_CP_b06FwStartAddr;
3139
3140                 fw.text_addr = bce_CP_b06FwTextAddr;
3141                 fw.text_len = bce_CP_b06FwTextLen;
3142                 fw.text_index = 0;
3143                 fw.text = bce_CP_b06FwText;
3144
3145                 fw.data_addr = bce_CP_b06FwDataAddr;
3146                 fw.data_len = bce_CP_b06FwDataLen;
3147                 fw.data_index = 0;
3148                 fw.data = bce_CP_b06FwData;
3149
3150                 fw.sbss_addr = bce_CP_b06FwSbssAddr;
3151                 fw.sbss_len = bce_CP_b06FwSbssLen;
3152                 fw.sbss_index = 0;
3153                 fw.sbss = bce_CP_b06FwSbss;
3154
3155                 fw.bss_addr = bce_CP_b06FwBssAddr;
3156                 fw.bss_len = bce_CP_b06FwBssLen;
3157                 fw.bss_index = 0;
3158                 fw.bss = bce_CP_b06FwBss;
3159
3160                 fw.rodata_addr = bce_CP_b06FwRodataAddr;
3161                 fw.rodata_len = bce_CP_b06FwRodataLen;
3162                 fw.rodata_index = 0;
3163                 fw.rodata = bce_CP_b06FwRodata;
3164         }
3165
3166         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3167         bce_start_cpu(sc, &cpu_reg);
3168 }
3169
3170 /****************************************************************************/
3171 /* Initialize the COM CPU.                                                 */
3172 /*                                                                          */
3173 /* Returns:                                                                 */
3174 /*   Nothing.                                                               */
3175 /****************************************************************************/
3176 static void
3177 bce_init_com_cpu(struct bce_softc *sc)
3178 {
3179         struct cpu_reg cpu_reg;
3180         struct fw_info fw;
3181
3182         cpu_reg.mode = BCE_COM_CPU_MODE;
3183         cpu_reg.mode_value_halt = BCE_COM_CPU_MODE_SOFT_HALT;
3184         cpu_reg.mode_value_sstep = BCE_COM_CPU_MODE_STEP_ENA;
3185         cpu_reg.state = BCE_COM_CPU_STATE;
3186         cpu_reg.state_value_clear = 0xffffff;
3187         cpu_reg.gpr0 = BCE_COM_CPU_REG_FILE;
3188         cpu_reg.evmask = BCE_COM_CPU_EVENT_MASK;
3189         cpu_reg.pc = BCE_COM_CPU_PROGRAM_COUNTER;
3190         cpu_reg.inst = BCE_COM_CPU_INSTRUCTION;
3191         cpu_reg.bp = BCE_COM_CPU_HW_BREAKPOINT;
3192         cpu_reg.spad_base = BCE_COM_SCRATCH;
3193         cpu_reg.mips_view_base = 0x8000000;
3194
3195         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3196             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3197                 fw.ver_major = bce_COM_b09FwReleaseMajor;
3198                 fw.ver_minor = bce_COM_b09FwReleaseMinor;
3199                 fw.ver_fix = bce_COM_b09FwReleaseFix;
3200                 fw.start_addr = bce_COM_b09FwStartAddr;
3201
3202                 fw.text_addr = bce_COM_b09FwTextAddr;
3203                 fw.text_len = bce_COM_b09FwTextLen;
3204                 fw.text_index = 0;
3205                 fw.text = bce_COM_b09FwText;
3206
3207                 fw.data_addr = bce_COM_b09FwDataAddr;
3208                 fw.data_len = bce_COM_b09FwDataLen;
3209                 fw.data_index = 0;
3210                 fw.data = bce_COM_b09FwData;
3211
3212                 fw.sbss_addr = bce_COM_b09FwSbssAddr;
3213                 fw.sbss_len = bce_COM_b09FwSbssLen;
3214                 fw.sbss_index = 0;
3215                 fw.sbss = bce_COM_b09FwSbss;
3216
3217                 fw.bss_addr = bce_COM_b09FwBssAddr;
3218                 fw.bss_len = bce_COM_b09FwBssLen;
3219                 fw.bss_index = 0;
3220                 fw.bss = bce_COM_b09FwBss;
3221
3222                 fw.rodata_addr = bce_COM_b09FwRodataAddr;
3223                 fw.rodata_len = bce_COM_b09FwRodataLen;
3224                 fw.rodata_index = 0;
3225                 fw.rodata = bce_COM_b09FwRodata;
3226         } else {
3227                 fw.ver_major = bce_COM_b06FwReleaseMajor;
3228                 fw.ver_minor = bce_COM_b06FwReleaseMinor;
3229                 fw.ver_fix = bce_COM_b06FwReleaseFix;
3230                 fw.start_addr = bce_COM_b06FwStartAddr;
3231
3232                 fw.text_addr = bce_COM_b06FwTextAddr;
3233                 fw.text_len = bce_COM_b06FwTextLen;
3234                 fw.text_index = 0;
3235                 fw.text = bce_COM_b06FwText;
3236
3237                 fw.data_addr = bce_COM_b06FwDataAddr;
3238                 fw.data_len = bce_COM_b06FwDataLen;
3239                 fw.data_index = 0;
3240                 fw.data = bce_COM_b06FwData;
3241
3242                 fw.sbss_addr = bce_COM_b06FwSbssAddr;
3243                 fw.sbss_len = bce_COM_b06FwSbssLen;
3244                 fw.sbss_index = 0;
3245                 fw.sbss = bce_COM_b06FwSbss;
3246
3247                 fw.bss_addr = bce_COM_b06FwBssAddr;
3248                 fw.bss_len = bce_COM_b06FwBssLen;
3249                 fw.bss_index = 0;
3250                 fw.bss = bce_COM_b06FwBss;
3251
3252                 fw.rodata_addr = bce_COM_b06FwRodataAddr;
3253                 fw.rodata_len = bce_COM_b06FwRodataLen;
3254                 fw.rodata_index = 0;
3255                 fw.rodata = bce_COM_b06FwRodata;
3256         }
3257
3258         bce_load_cpu_fw(sc, &cpu_reg, &fw);
3259         bce_start_cpu(sc, &cpu_reg);
3260 }
3261
3262 /****************************************************************************/
3263 /* Initialize the RV2P, RX, TX, TPAT, COM, and CP CPUs.                     */
3264 /*                                                                          */
3265 /* Loads the firmware for each CPU and starts the CPU.                      */
3266 /*                                                                          */
3267 /* Returns:                                                                 */
3268 /*   Nothing.                                                               */
3269 /****************************************************************************/
3270 static void
3271 bce_init_cpus(struct bce_softc *sc)
3272 {
3273         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3274             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3275                 if (BCE_CHIP_REV(sc) == BCE_CHIP_REV_Ax) {
3276                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc1,
3277                             sizeof(bce_xi90_rv2p_proc1), RV2P_PROC1);
3278                         bce_load_rv2p_fw(sc, bce_xi90_rv2p_proc2,
3279                             sizeof(bce_xi90_rv2p_proc2), RV2P_PROC2);
3280                 } else {
3281                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc1,
3282                             sizeof(bce_xi_rv2p_proc1), RV2P_PROC1);
3283                         bce_load_rv2p_fw(sc, bce_xi_rv2p_proc2,
3284                             sizeof(bce_xi_rv2p_proc2), RV2P_PROC2);
3285                 }
3286         } else {
3287                 bce_load_rv2p_fw(sc, bce_rv2p_proc1,
3288                     sizeof(bce_rv2p_proc1), RV2P_PROC1);
3289                 bce_load_rv2p_fw(sc, bce_rv2p_proc2,
3290                     sizeof(bce_rv2p_proc2), RV2P_PROC2);
3291         }
3292
3293         bce_init_rxp_cpu(sc);
3294         bce_init_txp_cpu(sc);
3295         bce_init_tpat_cpu(sc);
3296         bce_init_com_cpu(sc);
3297         bce_init_cp_cpu(sc);
3298 }
3299
3300 /****************************************************************************/
3301 /* Initialize context memory.                                               */
3302 /*                                                                          */
3303 /* Clears the memory associated with each Context ID (CID).                 */
3304 /*                                                                          */
3305 /* Returns:                                                                 */
3306 /*   Nothing.                                                               */
3307 /****************************************************************************/
3308 static int
3309 bce_init_ctx(struct bce_softc *sc)
3310 {
3311         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3312             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3313                 /* DRC: Replace this constant value with a #define. */
3314                 int i, retry_cnt = 10;
3315                 uint32_t val;
3316
3317                 /*
3318                  * BCM5709 context memory may be cached
3319                  * in host memory so prepare the host memory
3320                  * for access.
3321                  */
3322                 val = BCE_CTX_COMMAND_ENABLED | BCE_CTX_COMMAND_MEM_INIT |
3323                     (1 << 12);
3324                 val |= (BCM_PAGE_BITS - 8) << 16;
3325                 REG_WR(sc, BCE_CTX_COMMAND, val);
3326
3327                 /* Wait for mem init command to complete. */
3328                 for (i = 0; i < retry_cnt; i++) {
3329                         val = REG_RD(sc, BCE_CTX_COMMAND);
3330                         if (!(val & BCE_CTX_COMMAND_MEM_INIT))
3331                                 break;
3332                         DELAY(2);
3333                 }
3334                 if (i == retry_cnt) {
3335                         device_printf(sc->bce_dev,
3336                             "Context memory initialization failed!\n");
3337                         return ETIMEDOUT;
3338                 }
3339
3340                 for (i = 0; i < sc->ctx_pages; i++) {
3341                         int j;
3342
3343                         /*
3344                          * Set the physical address of the context
3345                          * memory cache.
3346                          */
3347                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA0,
3348                             BCE_ADDR_LO(sc->ctx_paddr[i] & 0xfffffff0) |
3349                             BCE_CTX_HOST_PAGE_TBL_DATA0_VALID);
3350                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_DATA1,
3351                             BCE_ADDR_HI(sc->ctx_paddr[i]));
3352                         REG_WR(sc, BCE_CTX_HOST_PAGE_TBL_CTRL,
3353                             i | BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
3354
3355                         /*
3356                          * Verify that the context memory write was successful.
3357                          */
3358                         for (j = 0; j < retry_cnt; j++) {
3359                                 val = REG_RD(sc, BCE_CTX_HOST_PAGE_TBL_CTRL);
3360                                 if ((val &
3361                                     BCE_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) == 0)
3362                                         break;
3363                                 DELAY(5);
3364                         }
3365                         if (j == retry_cnt) {
3366                                 device_printf(sc->bce_dev,
3367                                     "Failed to initialize context page!\n");
3368                                 return ETIMEDOUT;
3369                         }
3370                 }
3371         } else {
3372                 uint32_t vcid_addr, offset;
3373
3374                 /*
3375                  * For the 5706/5708, context memory is local to
3376                  * the controller, so initialize the controller
3377                  * context memory.
3378                  */
3379
3380                 vcid_addr = GET_CID_ADDR(96);
3381                 while (vcid_addr) {
3382                         vcid_addr -= PHY_CTX_SIZE;
3383
3384                         REG_WR(sc, BCE_CTX_VIRT_ADDR, 0);
3385                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3386
3387                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
3388                                 CTX_WR(sc, 0x00, offset, 0);
3389
3390                         REG_WR(sc, BCE_CTX_VIRT_ADDR, vcid_addr);
3391                         REG_WR(sc, BCE_CTX_PAGE_TBL, vcid_addr);
3392                 }
3393         }
3394         return 0;
3395 }
3396
3397 /****************************************************************************/
3398 /* Fetch the permanent MAC address of the controller.                       */
3399 /*                                                                          */
3400 /* Returns:                                                                 */
3401 /*   Nothing.                                                               */
3402 /****************************************************************************/
3403 static void
3404 bce_get_mac_addr(struct bce_softc *sc)
3405 {
3406         uint32_t mac_lo = 0, mac_hi = 0;
3407
3408         /*
3409          * The NetXtreme II bootcode populates various NIC
3410          * power-on and runtime configuration items in a
3411          * shared memory area.  The factory configured MAC
3412          * address is available from both NVRAM and the
3413          * shared memory area so we'll read the value from
3414          * shared memory for speed.
3415          */
3416
3417         mac_hi = bce_shmem_rd(sc,  BCE_PORT_HW_CFG_MAC_UPPER);
3418         mac_lo = bce_shmem_rd(sc, BCE_PORT_HW_CFG_MAC_LOWER);
3419
3420         if (mac_lo == 0 && mac_hi == 0) {
3421                 if_printf(&sc->arpcom.ac_if, "Invalid Ethernet address!\n");
3422         } else {
3423                 sc->eaddr[0] = (u_char)(mac_hi >> 8);
3424                 sc->eaddr[1] = (u_char)(mac_hi >> 0);
3425                 sc->eaddr[2] = (u_char)(mac_lo >> 24);
3426                 sc->eaddr[3] = (u_char)(mac_lo >> 16);
3427                 sc->eaddr[4] = (u_char)(mac_lo >> 8);
3428                 sc->eaddr[5] = (u_char)(mac_lo >> 0);
3429         }
3430 }
3431
3432 /****************************************************************************/
3433 /* Program the MAC address.                                                 */
3434 /*                                                                          */
3435 /* Returns:                                                                 */
3436 /*   Nothing.                                                               */
3437 /****************************************************************************/
3438 static void
3439 bce_set_mac_addr(struct bce_softc *sc)
3440 {
3441         const uint8_t *mac_addr = sc->eaddr;
3442         uint32_t val;
3443
3444         val = (mac_addr[0] << 8) | mac_addr[1];
3445         REG_WR(sc, BCE_EMAC_MAC_MATCH0, val);
3446
3447         val = (mac_addr[2] << 24) |
3448               (mac_addr[3] << 16) |
3449               (mac_addr[4] << 8) |
3450               mac_addr[5];
3451         REG_WR(sc, BCE_EMAC_MAC_MATCH1, val);
3452 }
3453
3454 /****************************************************************************/
3455 /* Stop the controller.                                                     */
3456 /*                                                                          */
3457 /* Returns:                                                                 */
3458 /*   Nothing.                                                               */
3459 /****************************************************************************/
3460 static void
3461 bce_stop(struct bce_softc *sc)
3462 {
3463         struct ifnet *ifp = &sc->arpcom.ac_if;
3464         int i;
3465
3466         ASSERT_IFNET_SERIALIZED_ALL(ifp);
3467
3468         callout_stop(&sc->bce_tick_callout);
3469
3470         /* Disable the transmit/receive blocks. */
3471         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS, BCE_MISC_ENABLE_CLR_DEFAULT);
3472         REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3473         DELAY(20);
3474
3475         bce_disable_intr(sc);
3476
3477         ifp->if_flags &= ~IFF_RUNNING;
3478         for (i = 0; i < sc->tx_ring_cnt; ++i) {
3479                 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
3480                 ifsq_watchdog_stop(&sc->tx_rings[i].tx_watchdog);
3481         }
3482
3483         /* Free the RX lists. */
3484         for (i = 0; i < sc->rx_ring_cnt; ++i)
3485                 bce_free_rx_chain(&sc->rx_rings[i]);
3486
3487         /* Free TX buffers. */
3488         for (i = 0; i < sc->tx_ring_cnt; ++i)
3489                 bce_free_tx_chain(&sc->tx_rings[i]);
3490
3491         sc->bce_link = 0;
3492         sc->bce_coalchg_mask = 0;
3493 }
3494
3495 static int
3496 bce_reset(struct bce_softc *sc, uint32_t reset_code)
3497 {
3498         uint32_t val;
3499         int i, rc = 0;
3500
3501         /* Wait for pending PCI transactions to complete. */
3502         REG_WR(sc, BCE_MISC_ENABLE_CLR_BITS,
3503                BCE_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3504                BCE_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3505                BCE_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3506                BCE_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3507         val = REG_RD(sc, BCE_MISC_ENABLE_CLR_BITS);
3508         DELAY(5);
3509
3510         /* Disable DMA */
3511         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3512             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3513                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3514                 val &= ~BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3515                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3516         }
3517
3518         /* Assume bootcode is running. */
3519         sc->bce_fw_timed_out = 0;
3520         sc->bce_drv_cardiac_arrest = 0;
3521
3522         /* Give the firmware a chance to prepare for the reset. */
3523         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT0 | reset_code);
3524         if (rc) {
3525                 if_printf(&sc->arpcom.ac_if,
3526                           "Firmware is not ready for reset\n");
3527                 return rc;
3528         }
3529
3530         /* Set a firmware reminder that this is a soft reset. */
3531         bce_shmem_wr(sc, BCE_DRV_RESET_SIGNATURE,
3532             BCE_DRV_RESET_SIGNATURE_MAGIC);
3533
3534         /* Dummy read to force the chip to complete all current transactions. */
3535         val = REG_RD(sc, BCE_MISC_ID);
3536
3537         /* Chip reset. */
3538         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3539             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3540                 REG_WR(sc, BCE_MISC_COMMAND, BCE_MISC_COMMAND_SW_RESET);
3541                 REG_RD(sc, BCE_MISC_COMMAND);
3542                 DELAY(5);
3543
3544                 val = BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3545                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3546
3547                 pci_write_config(sc->bce_dev, BCE_PCICFG_MISC_CONFIG, val, 4);
3548         } else {
3549                 val = BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3550                     BCE_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3551                     BCE_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3552                 REG_WR(sc, BCE_PCICFG_MISC_CONFIG, val);
3553
3554                 /* Allow up to 30us for reset to complete. */
3555                 for (i = 0; i < 10; i++) {
3556                         val = REG_RD(sc, BCE_PCICFG_MISC_CONFIG);
3557                         if ((val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3558                             BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3559                                 break;
3560                         DELAY(10);
3561                 }
3562
3563                 /* Check that reset completed successfully. */
3564                 if (val & (BCE_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3565                     BCE_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3566                         if_printf(&sc->arpcom.ac_if, "Reset failed!\n");
3567                         return EBUSY;
3568                 }
3569         }
3570
3571         /* Make sure byte swapping is properly configured. */
3572         val = REG_RD(sc, BCE_PCI_SWAP_DIAG0);
3573         if (val != 0x01020304) {
3574                 if_printf(&sc->arpcom.ac_if, "Byte swap is incorrect!\n");
3575                 return ENODEV;
3576         }
3577
3578         /* Just completed a reset, assume that firmware is running again. */
3579         sc->bce_fw_timed_out = 0;
3580         sc->bce_drv_cardiac_arrest = 0;
3581
3582         /* Wait for the firmware to finish its initialization. */
3583         rc = bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT1 | reset_code);
3584         if (rc) {
3585                 if_printf(&sc->arpcom.ac_if,
3586                           "Firmware did not complete initialization!\n");
3587         }
3588
3589         if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3590                 bce_setup_msix_table(sc);
3591                 /* Prevent MSIX table reads and write from timing out */
3592                 REG_WR(sc, BCE_MISC_ECO_HW_CTL,
3593                     BCE_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
3594
3595         }
3596         return rc;
3597 }
3598
3599 static int
3600 bce_chipinit(struct bce_softc *sc)
3601 {
3602         uint32_t val;
3603         int rc = 0;
3604
3605         /* Make sure the interrupt is not active. */
3606         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, BCE_PCICFG_INT_ACK_CMD_MASK_INT);
3607         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
3608
3609         /*
3610          * Initialize DMA byte/word swapping, configure the number of DMA
3611          * channels and PCI clock compensation delay.
3612          */
3613         val = BCE_DMA_CONFIG_DATA_BYTE_SWAP |
3614               BCE_DMA_CONFIG_DATA_WORD_SWAP |
3615 #if BYTE_ORDER == BIG_ENDIAN
3616               BCE_DMA_CONFIG_CNTL_BYTE_SWAP |
3617 #endif
3618               BCE_DMA_CONFIG_CNTL_WORD_SWAP |
3619               DMA_READ_CHANS << 12 |
3620               DMA_WRITE_CHANS << 16;
3621
3622         val |= (0x2 << 20) | BCE_DMA_CONFIG_CNTL_PCI_COMP_DLY;
3623
3624         if ((sc->bce_flags & BCE_PCIX_FLAG) && sc->bus_speed_mhz == 133)
3625                 val |= BCE_DMA_CONFIG_PCI_FAST_CLK_CMP;
3626
3627         /*
3628          * This setting resolves a problem observed on certain Intel PCI
3629          * chipsets that cannot handle multiple outstanding DMA operations.
3630          * See errata E9_5706A1_65.
3631          */
3632         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706 &&
3633             BCE_CHIP_ID(sc) != BCE_CHIP_ID_5706_A0 &&
3634             !(sc->bce_flags & BCE_PCIX_FLAG))
3635                 val |= BCE_DMA_CONFIG_CNTL_PING_PONG_DMA;
3636
3637         REG_WR(sc, BCE_DMA_CONFIG, val);
3638
3639         /* Enable the RX_V2P and Context state machines before access. */
3640         REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3641                BCE_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3642                BCE_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3643                BCE_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3644
3645         /* Initialize context mapping and zero out the quick contexts. */
3646         rc = bce_init_ctx(sc);
3647         if (rc != 0)
3648                 return rc;
3649
3650         /* Initialize the on-boards CPUs */
3651         bce_init_cpus(sc);
3652
3653         /* Enable management frames (NC-SI) to flow to the MCP. */
3654         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3655                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) |
3656                     BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3657                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3658         }
3659
3660         /* Prepare NVRAM for access. */
3661         rc = bce_init_nvram(sc);
3662         if (rc != 0)
3663                 return rc;
3664
3665         /* Set the kernel bypass block size */
3666         val = REG_RD(sc, BCE_MQ_CONFIG);
3667         val &= ~BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3668         val |= BCE_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3669
3670         /* Enable bins used on the 5709/5716. */
3671         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3672             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3673                 val |= BCE_MQ_CONFIG_BIN_MQ_MODE;
3674                 if (BCE_CHIP_ID(sc) == BCE_CHIP_ID_5709_A1)
3675                         val |= BCE_MQ_CONFIG_HALT_DIS;
3676         }
3677
3678         REG_WR(sc, BCE_MQ_CONFIG, val);
3679
3680         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3681         REG_WR(sc, BCE_MQ_KNL_BYP_WIND_START, val);
3682         REG_WR(sc, BCE_MQ_KNL_WIND_END, val);
3683
3684         /* Set the page size and clear the RV2P processor stall bits. */
3685         val = (BCM_PAGE_BITS - 8) << 24;
3686         REG_WR(sc, BCE_RV2P_CONFIG, val);
3687
3688         /* Configure page size. */
3689         val = REG_RD(sc, BCE_TBDR_CONFIG);
3690         val &= ~BCE_TBDR_CONFIG_PAGE_SIZE;
3691         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3692         REG_WR(sc, BCE_TBDR_CONFIG, val);
3693
3694         /* Set the perfect match control register to default. */
3695         REG_WR_IND(sc, BCE_RXP_PM_CTRL, 0);
3696
3697         return 0;
3698 }
3699
3700 /****************************************************************************/
3701 /* Initialize the controller in preparation to send/receive traffic.        */
3702 /*                                                                          */
3703 /* Returns:                                                                 */
3704 /*   0 for success, positive value for failure.                             */
3705 /****************************************************************************/
3706 static int
3707 bce_blockinit(struct bce_softc *sc)
3708 {
3709         uint32_t reg, val;
3710         int i;
3711
3712         /* Load the hardware default MAC address. */
3713         bce_set_mac_addr(sc);
3714
3715         /* Set the Ethernet backoff seed value */
3716         val = sc->eaddr[0] + (sc->eaddr[1] << 8) + (sc->eaddr[2] << 16) +
3717               sc->eaddr[3] + (sc->eaddr[4] << 8) + (sc->eaddr[5] << 16);
3718         REG_WR(sc, BCE_EMAC_BACKOFF_SEED, val);
3719
3720         sc->rx_mode = BCE_EMAC_RX_MODE_SORT_MODE;
3721
3722         /* Set up link change interrupt generation. */
3723         REG_WR(sc, BCE_EMAC_ATTENTION_ENA, BCE_EMAC_ATTENTION_ENA_LINK);
3724
3725         /* Program the physical address of the status block. */
3726         REG_WR(sc, BCE_HC_STATUS_ADDR_L, BCE_ADDR_LO(sc->status_block_paddr));
3727         REG_WR(sc, BCE_HC_STATUS_ADDR_H, BCE_ADDR_HI(sc->status_block_paddr));
3728
3729         /* Program the physical address of the statistics block. */
3730         REG_WR(sc, BCE_HC_STATISTICS_ADDR_L,
3731                BCE_ADDR_LO(sc->stats_block_paddr));
3732         REG_WR(sc, BCE_HC_STATISTICS_ADDR_H,
3733                BCE_ADDR_HI(sc->stats_block_paddr));
3734
3735         /* Program various host coalescing parameters. */
3736         REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
3737                (sc->bce_tx_quick_cons_trip_int << 16) |
3738                sc->bce_tx_quick_cons_trip);
3739         REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
3740                (sc->bce_rx_quick_cons_trip_int << 16) |
3741                sc->bce_rx_quick_cons_trip);
3742         REG_WR(sc, BCE_HC_COMP_PROD_TRIP,
3743                (sc->bce_comp_prod_trip_int << 16) | sc->bce_comp_prod_trip);
3744         REG_WR(sc, BCE_HC_TX_TICKS,
3745                (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3746         REG_WR(sc, BCE_HC_RX_TICKS,
3747                (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3748         REG_WR(sc, BCE_HC_COM_TICKS,
3749                (sc->bce_com_ticks_int << 16) | sc->bce_com_ticks);
3750         REG_WR(sc, BCE_HC_CMD_TICKS,
3751                (sc->bce_cmd_ticks_int << 16) | sc->bce_cmd_ticks);
3752         REG_WR(sc, BCE_HC_STATS_TICKS, (sc->bce_stats_ticks & 0xffff00));
3753         REG_WR(sc, BCE_HC_STAT_COLLECT_TICKS, 0xbb8);   /* 3ms */
3754
3755         if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3756                 REG_WR(sc, BCE_HC_MSIX_BIT_VECTOR, BCE_HC_MSIX_BIT_VECTOR_VAL);
3757
3758         val = BCE_HC_CONFIG_TX_TMR_MODE | BCE_HC_CONFIG_COLLECT_STATS;
3759         if ((sc->bce_flags & BCE_ONESHOT_MSI_FLAG) ||
3760             sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3761                 if (bootverbose) {
3762                         if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX) {
3763                                 if_printf(&sc->arpcom.ac_if,
3764                                     "using MSI-X\n");
3765                         } else {
3766                                 if_printf(&sc->arpcom.ac_if,
3767                                     "using oneshot MSI\n");
3768                         }
3769                 }
3770                 val |= BCE_HC_CONFIG_ONE_SHOT | BCE_HC_CONFIG_USE_INT_PARAM;
3771                 if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
3772                         val |= BCE_HC_CONFIG_SB_ADDR_INC_128B;
3773         }
3774         REG_WR(sc, BCE_HC_CONFIG, val);
3775
3776         for (i = 1; i < sc->rx_ring_cnt; ++i) {
3777                 uint32_t base;
3778
3779                 base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) + BCE_HC_SB_CONFIG_1;
3780                 KKASSERT(base <= BCE_HC_SB_CONFIG_8);
3781
3782                 REG_WR(sc, base,
3783                     BCE_HC_SB_CONFIG_1_TX_TMR_MODE |
3784                     /* BCE_HC_SB_CONFIG_1_RX_TMR_MODE | */
3785                     BCE_HC_SB_CONFIG_1_ONE_SHOT);
3786
3787                 REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
3788                     (sc->bce_tx_quick_cons_trip_int << 16) |
3789                     sc->bce_tx_quick_cons_trip);
3790                 REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
3791                     (sc->bce_rx_quick_cons_trip_int << 16) |
3792                     sc->bce_rx_quick_cons_trip);
3793                 REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
3794                     (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
3795                 REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
3796                     (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
3797         }
3798
3799         /* Clear the internal statistics counters. */
3800         REG_WR(sc, BCE_HC_COMMAND, BCE_HC_COMMAND_CLR_STAT_NOW);
3801
3802         /* Verify that bootcode is running. */
3803         reg = bce_shmem_rd(sc, BCE_DEV_INFO_SIGNATURE);
3804
3805         if ((reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
3806             BCE_DEV_INFO_SIGNATURE_MAGIC) {
3807                 if_printf(&sc->arpcom.ac_if,
3808                           "Bootcode not running! Found: 0x%08X, "
3809                           "Expected: 08%08X\n",
3810                           reg & BCE_DEV_INFO_SIGNATURE_MAGIC_MASK,
3811                           BCE_DEV_INFO_SIGNATURE_MAGIC);
3812                 return ENODEV;
3813         }
3814
3815         /* Enable DMA */
3816         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3817             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3818                 val = REG_RD(sc, BCE_MISC_NEW_CORE_CTL);
3819                 val |= BCE_MISC_NEW_CORE_CTL_DMA_ENABLE;
3820                 REG_WR(sc, BCE_MISC_NEW_CORE_CTL, val);
3821         }
3822
3823         /* Allow bootcode to apply any additional fixes before enabling MAC. */
3824         bce_fw_sync(sc, BCE_DRV_MSG_DATA_WAIT2 | BCE_DRV_MSG_CODE_RESET);
3825
3826         /* Enable link state change interrupt generation. */
3827         REG_WR(sc, BCE_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3828
3829         /* Enable the RXP. */
3830         bce_start_rxp_cpu(sc);
3831
3832         /* Disable management frames (NC-SI) from flowing to the MCP. */
3833         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
3834                 val = REG_RD(sc, BCE_RPM_MGMT_PKT_CTRL) &
3835                     ~BCE_RPM_MGMT_PKT_CTRL_MGMT_EN;
3836                 REG_WR(sc, BCE_RPM_MGMT_PKT_CTRL, val);
3837         }
3838
3839         /* Enable all remaining blocks in the MAC. */
3840         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
3841             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
3842                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
3843                     BCE_MISC_ENABLE_DEFAULT_XI);
3844         } else {
3845                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
3846         }
3847         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
3848         DELAY(20);
3849
3850         /* Save the current host coalescing block settings. */
3851         sc->hc_command = REG_RD(sc, BCE_HC_COMMAND);
3852
3853         return 0;
3854 }
3855
3856 /****************************************************************************/
3857 /* Encapsulate an mbuf cluster into the rx_bd chain.                        */
3858 /*                                                                          */
3859 /* The NetXtreme II can support Jumbo frames by using multiple rx_bd's.     */
3860 /* This routine will map an mbuf cluster into 1 or more rx_bd's as          */
3861 /* necessary.                                                               */
3862 /*                                                                          */
3863 /* Returns:                                                                 */
3864 /*   0 for success, positive value for failure.                             */
3865 /****************************************************************************/
3866 static int
3867 bce_newbuf_std(struct bce_rx_ring *rxr, uint16_t *prod, uint16_t *chain_prod,
3868     uint32_t *prod_bseq, int init)
3869 {
3870         bus_dmamap_t map;
3871         bus_dma_segment_t seg;
3872         struct mbuf *m_new;
3873         int error, nseg;
3874
3875         /* This is a new mbuf allocation. */
3876         m_new = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
3877         if (m_new == NULL)
3878                 return ENOBUFS;
3879
3880         m_new->m_len = m_new->m_pkthdr.len = MCLBYTES;
3881
3882         /* Map the mbuf cluster into device memory. */
3883         error = bus_dmamap_load_mbuf_segment(rxr->rx_mbuf_tag,
3884             rxr->rx_mbuf_tmpmap, m_new, &seg, 1, &nseg, BUS_DMA_NOWAIT);
3885         if (error) {
3886                 m_freem(m_new);
3887                 if (init) {
3888                         if_printf(&rxr->sc->arpcom.ac_if,
3889                             "Error mapping mbuf into RX chain!\n");
3890                 }
3891                 return error;
3892         }
3893
3894         if (rxr->rx_mbuf_ptr[*chain_prod] != NULL) {
3895                 bus_dmamap_unload(rxr->rx_mbuf_tag,
3896                     rxr->rx_mbuf_map[*chain_prod]);
3897         }
3898
3899         map = rxr->rx_mbuf_map[*chain_prod];
3900         rxr->rx_mbuf_map[*chain_prod] = rxr->rx_mbuf_tmpmap;
3901         rxr->rx_mbuf_tmpmap = map;
3902
3903         /* Save the mbuf and update our counter. */
3904         rxr->rx_mbuf_ptr[*chain_prod] = m_new;
3905         rxr->rx_mbuf_paddr[*chain_prod] = seg.ds_addr;
3906         rxr->free_rx_bd--;
3907
3908         bce_setup_rxdesc_std(rxr, *chain_prod, prod_bseq);
3909
3910         return 0;
3911 }
3912
3913 static void
3914 bce_setup_rxdesc_std(struct bce_rx_ring *rxr, uint16_t chain_prod,
3915     uint32_t *prod_bseq)
3916 {
3917         struct rx_bd *rxbd;
3918         bus_addr_t paddr;
3919         int len;
3920
3921         paddr = rxr->rx_mbuf_paddr[chain_prod];
3922         len = rxr->rx_mbuf_ptr[chain_prod]->m_len;
3923
3924         /* Setup the rx_bd for the first segment. */
3925         rxbd = &rxr->rx_bd_chain[RX_PAGE(chain_prod)][RX_IDX(chain_prod)];
3926
3927         rxbd->rx_bd_haddr_lo = htole32(BCE_ADDR_LO(paddr));
3928         rxbd->rx_bd_haddr_hi = htole32(BCE_ADDR_HI(paddr));
3929         rxbd->rx_bd_len = htole32(len);
3930         rxbd->rx_bd_flags = htole32(RX_BD_FLAGS_START);
3931         *prod_bseq += len;
3932
3933         rxbd->rx_bd_flags |= htole32(RX_BD_FLAGS_END);
3934 }
3935
3936 /****************************************************************************/
3937 /* Initialize the TX context memory.                                        */
3938 /*                                                                          */
3939 /* Returns:                                                                 */
3940 /*   Nothing                                                                */
3941 /****************************************************************************/
3942 static void
3943 bce_init_tx_context(struct bce_tx_ring *txr)
3944 {
3945         uint32_t val;
3946
3947         /* Initialize the context ID for an L2 TX chain. */
3948         if (BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5709 ||
3949             BCE_CHIP_NUM(txr->sc) == BCE_CHIP_NUM_5716) {
3950                 /* Set the CID type to support an L2 connection. */
3951                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3952                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3953                     BCE_L2CTX_TX_TYPE_XI, val);
3954                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3955                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3956                     BCE_L2CTX_TX_CMD_TYPE_XI, val);
3957
3958                 /* Point the hardware to the first page in the chain. */
3959                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3960                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3961                     BCE_L2CTX_TX_TBDR_BHADDR_HI_XI, val);
3962                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3963                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3964                     BCE_L2CTX_TX_TBDR_BHADDR_LO_XI, val);
3965         } else {
3966                 /* Set the CID type to support an L2 connection. */
3967                 val = BCE_L2CTX_TX_TYPE_TYPE_L2 | BCE_L2CTX_TX_TYPE_SIZE_L2;
3968                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3969                     BCE_L2CTX_TX_TYPE, val);
3970                 val = BCE_L2CTX_TX_CMD_TYPE_TYPE_L2 | (8 << 16);
3971                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3972                     BCE_L2CTX_TX_CMD_TYPE, val);
3973
3974                 /* Point the hardware to the first page in the chain. */
3975                 val = BCE_ADDR_HI(txr->tx_bd_chain_paddr[0]);
3976                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3977                     BCE_L2CTX_TX_TBDR_BHADDR_HI, val);
3978                 val = BCE_ADDR_LO(txr->tx_bd_chain_paddr[0]);
3979                 CTX_WR(txr->sc, GET_CID_ADDR(txr->tx_cid),
3980                     BCE_L2CTX_TX_TBDR_BHADDR_LO, val);
3981         }
3982 }
3983
3984 /****************************************************************************/
3985 /* Allocate memory and initialize the TX data structures.                   */
3986 /*                                                                          */
3987 /* Returns:                                                                 */
3988 /*   0 for success, positive value for failure.                             */
3989 /****************************************************************************/
3990 static int
3991 bce_init_tx_chain(struct bce_tx_ring *txr)
3992 {
3993         struct tx_bd *txbd;
3994         int i, rc = 0;
3995
3996         /* Set the initial TX producer/consumer indices. */
3997         txr->tx_prod = 0;
3998         txr->tx_cons = 0;
3999         txr->tx_prod_bseq = 0;
4000         txr->used_tx_bd = 0;
4001         txr->max_tx_bd = USABLE_TX_BD(txr);
4002
4003         /*
4004          * The NetXtreme II supports a linked-list structre called
4005          * a Buffer Descriptor Chain (or BD chain).  A BD chain
4006          * consists of a series of 1 or more chain pages, each of which
4007          * consists of a fixed number of BD entries.
4008          * The last BD entry on each page is a pointer to the next page
4009          * in the chain, and the last pointer in the BD chain
4010          * points back to the beginning of the chain.
4011          */
4012
4013         /* Set the TX next pointer chain entries. */
4014         for (i = 0; i < txr->tx_pages; i++) {
4015                 int j;
4016
4017                 txbd = &txr->tx_bd_chain[i][USABLE_TX_BD_PER_PAGE];
4018
4019                 /* Check if we've reached the last page. */
4020                 if (i == (txr->tx_pages - 1))
4021                         j = 0;
4022                 else
4023                         j = i + 1;
4024
4025                 txbd->tx_bd_haddr_hi =
4026                     htole32(BCE_ADDR_HI(txr->tx_bd_chain_paddr[j]));
4027                 txbd->tx_bd_haddr_lo =
4028                     htole32(BCE_ADDR_LO(txr->tx_bd_chain_paddr[j]));
4029         }
4030         bce_init_tx_context(txr);
4031
4032         return(rc);
4033 }
4034
4035 /****************************************************************************/
4036 /* Free memory and clear the TX data structures.                            */
4037 /*                                                                          */
4038 /* Returns:                                                                 */
4039 /*   Nothing.                                                               */
4040 /****************************************************************************/
4041 static void
4042 bce_free_tx_chain(struct bce_tx_ring *txr)
4043 {
4044         int i;
4045
4046         /* Unmap, unload, and free any mbufs still in the TX mbuf chain. */
4047         for (i = 0; i < TOTAL_TX_BD(txr); i++) {
4048                 if (txr->tx_mbuf_ptr[i] != NULL) {
4049                         bus_dmamap_unload(txr->tx_mbuf_tag,
4050                             txr->tx_mbuf_map[i]);
4051                         m_freem(txr->tx_mbuf_ptr[i]);
4052                         txr->tx_mbuf_ptr[i] = NULL;
4053                 }
4054         }
4055
4056         /* Clear each TX chain page. */
4057         for (i = 0; i < txr->tx_pages; i++)
4058                 bzero(txr->tx_bd_chain[i], BCE_TX_CHAIN_PAGE_SZ);
4059         txr->used_tx_bd = 0;
4060 }
4061
4062 /****************************************************************************/
4063 /* Initialize the RX context memory.                                        */
4064 /*                                                                          */
4065 /* Returns:                                                                 */
4066 /*   Nothing                                                                */
4067 /****************************************************************************/
4068 static void
4069 bce_init_rx_context(struct bce_rx_ring *rxr)
4070 {
4071         uint32_t val;
4072
4073         /* Initialize the context ID for an L2 RX chain. */
4074         val = BCE_L2CTX_RX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4075             BCE_L2CTX_RX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4076
4077         /*
4078          * Set the level for generating pause frames
4079          * when the number of available rx_bd's gets
4080          * too low (the low watermark) and the level
4081          * when pause frames can be stopped (the high
4082          * watermark).
4083          */
4084         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4085             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4086                 uint32_t lo_water, hi_water;
4087
4088                 lo_water = BCE_L2CTX_RX_LO_WATER_MARK_DEFAULT;
4089                 hi_water = USABLE_RX_BD(rxr) / 4;
4090
4091                 lo_water /= BCE_L2CTX_RX_LO_WATER_MARK_SCALE;
4092                 hi_water /= BCE_L2CTX_RX_HI_WATER_MARK_SCALE;
4093
4094                 if (hi_water > 0xf)
4095                         hi_water = 0xf;
4096                 else if (hi_water == 0)
4097                         lo_water = 0;
4098                 val |= lo_water |
4099                     (hi_water << BCE_L2CTX_RX_HI_WATER_MARK_SHIFT);
4100         }
4101
4102         CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4103             BCE_L2CTX_RX_CTX_TYPE, val);
4104
4105         /* Setup the MQ BIN mapping for l2_ctx_host_bseq. */
4106         if (BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5709 ||
4107             BCE_CHIP_NUM(rxr->sc) == BCE_CHIP_NUM_5716) {
4108                 val = REG_RD(rxr->sc, BCE_MQ_MAP_L2_5);
4109                 REG_WR(rxr->sc, BCE_MQ_MAP_L2_5, val | BCE_MQ_MAP_L2_5_ARM);
4110         }
4111
4112         /* Point the hardware to the first page in the chain. */
4113         val = BCE_ADDR_HI(rxr->rx_bd_chain_paddr[0]);
4114         CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4115             BCE_L2CTX_RX_NX_BDHADDR_HI, val);
4116         val = BCE_ADDR_LO(rxr->rx_bd_chain_paddr[0]);
4117         CTX_WR(rxr->sc, GET_CID_ADDR(rxr->rx_cid),
4118             BCE_L2CTX_RX_NX_BDHADDR_LO, val);
4119 }
4120
4121 /****************************************************************************/
4122 /* Allocate memory and initialize the RX data structures.                   */
4123 /*                                                                          */
4124 /* Returns:                                                                 */
4125 /*   0 for success, positive value for failure.                             */
4126 /****************************************************************************/
4127 static int
4128 bce_init_rx_chain(struct bce_rx_ring *rxr)
4129 {
4130         struct rx_bd *rxbd;
4131         int i, rc = 0;
4132         uint16_t prod, chain_prod;
4133         uint32_t prod_bseq;
4134
4135         /* Initialize the RX producer and consumer indices. */
4136         rxr->rx_prod = 0;
4137         rxr->rx_cons = 0;
4138         rxr->rx_prod_bseq = 0;
4139         rxr->free_rx_bd = USABLE_RX_BD(rxr);
4140         rxr->max_rx_bd = USABLE_RX_BD(rxr);
4141
4142         /* Clear cache status index */
4143         rxr->last_status_idx = 0;
4144
4145         /* Initialize the RX next pointer chain entries. */
4146         for (i = 0; i < rxr->rx_pages; i++) {
4147                 int j;
4148
4149                 rxbd = &rxr->rx_bd_chain[i][USABLE_RX_BD_PER_PAGE];
4150
4151                 /* Check if we've reached the last page. */
4152                 if (i == (rxr->rx_pages - 1))
4153                         j = 0;
4154                 else
4155                         j = i + 1;
4156
4157                 /* Setup the chain page pointers. */
4158                 rxbd->rx_bd_haddr_hi =
4159                     htole32(BCE_ADDR_HI(rxr->rx_bd_chain_paddr[j]));
4160                 rxbd->rx_bd_haddr_lo =
4161                     htole32(BCE_ADDR_LO(rxr->rx_bd_chain_paddr[j]));
4162         }
4163
4164         /* Allocate mbuf clusters for the rx_bd chain. */
4165         prod = prod_bseq = 0;
4166         while (prod < TOTAL_RX_BD(rxr)) {
4167                 chain_prod = RX_CHAIN_IDX(rxr, prod);
4168                 if (bce_newbuf_std(rxr, &prod, &chain_prod, &prod_bseq, 1)) {
4169                         if_printf(&rxr->sc->arpcom.ac_if,
4170                             "Error filling RX chain: rx_bd[0x%04X]!\n",
4171                             chain_prod);
4172                         rc = ENOBUFS;
4173                         break;
4174                 }
4175                 prod = NEXT_RX_BD(prod);
4176         }
4177
4178         /* Save the RX chain producer index. */
4179         rxr->rx_prod = prod;
4180         rxr->rx_prod_bseq = prod_bseq;
4181
4182         /* Tell the chip about the waiting rx_bd's. */
4183         REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4184             rxr->rx_prod);
4185         REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4186             rxr->rx_prod_bseq);
4187
4188         bce_init_rx_context(rxr);
4189
4190         return(rc);
4191 }
4192
4193 /****************************************************************************/
4194 /* Free memory and clear the RX data structures.                            */
4195 /*                                                                          */
4196 /* Returns:                                                                 */
4197 /*   Nothing.                                                               */
4198 /****************************************************************************/
4199 static void
4200 bce_free_rx_chain(struct bce_rx_ring *rxr)
4201 {
4202         int i;
4203
4204         /* Free any mbufs still in the RX mbuf chain. */
4205         for (i = 0; i < TOTAL_RX_BD(rxr); i++) {
4206                 if (rxr->rx_mbuf_ptr[i] != NULL) {
4207                         bus_dmamap_unload(rxr->rx_mbuf_tag,
4208                             rxr->rx_mbuf_map[i]);
4209                         m_freem(rxr->rx_mbuf_ptr[i]);
4210                         rxr->rx_mbuf_ptr[i] = NULL;
4211                 }
4212         }
4213
4214         /* Clear each RX chain page. */
4215         for (i = 0; i < rxr->rx_pages; i++)
4216                 bzero(rxr->rx_bd_chain[i], BCE_RX_CHAIN_PAGE_SZ);
4217 }
4218
4219 /****************************************************************************/
4220 /* Set media options.                                                       */
4221 /*                                                                          */
4222 /* Returns:                                                                 */
4223 /*   0 for success, positive value for failure.                             */
4224 /****************************************************************************/
4225 static int
4226 bce_ifmedia_upd(struct ifnet *ifp)
4227 {
4228         struct bce_softc *sc = ifp->if_softc;
4229         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4230         int error = 0;
4231
4232         /*
4233          * 'mii' will be NULL, when this function is called on following
4234          * code path: bce_attach() -> bce_mgmt_init()
4235          */
4236         if (mii != NULL) {
4237                 /* Make sure the MII bus has been enumerated. */
4238                 sc->bce_link = 0;
4239                 if (mii->mii_instance) {
4240                         struct mii_softc *miisc;
4241
4242                         LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
4243                                 mii_phy_reset(miisc);
4244                 }
4245                 error = mii_mediachg(mii);
4246         }
4247         return error;
4248 }
4249
4250 /****************************************************************************/
4251 /* Reports current media status.                                            */
4252 /*                                                                          */
4253 /* Returns:                                                                 */
4254 /*   Nothing.                                                               */
4255 /****************************************************************************/
4256 static void
4257 bce_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
4258 {
4259         struct bce_softc *sc = ifp->if_softc;
4260         struct mii_data *mii = device_get_softc(sc->bce_miibus);
4261
4262         mii_pollstat(mii);
4263         ifmr->ifm_active = mii->mii_media_active;
4264         ifmr->ifm_status = mii->mii_media_status;
4265 }
4266
4267 /****************************************************************************/
4268 /* Handles PHY generated interrupt events.                                  */
4269 /*                                                                          */
4270 /* Returns:                                                                 */
4271 /*   Nothing.                                                               */
4272 /****************************************************************************/
4273 static void
4274 bce_phy_intr(struct bce_softc *sc)
4275 {
4276         uint32_t new_link_state, old_link_state;
4277         struct ifnet *ifp = &sc->arpcom.ac_if;
4278
4279         ASSERT_SERIALIZED(&sc->main_serialize);
4280
4281         new_link_state = sc->status_block->status_attn_bits &
4282                          STATUS_ATTN_BITS_LINK_STATE;
4283         old_link_state = sc->status_block->status_attn_bits_ack &
4284                          STATUS_ATTN_BITS_LINK_STATE;
4285
4286         /* Handle any changes if the link state has changed. */
4287         if (new_link_state != old_link_state) { /* XXX redundant? */
4288                 /* Update the status_attn_bits_ack field in the status block. */
4289                 if (new_link_state) {
4290                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_SET_CMD,
4291                                STATUS_ATTN_BITS_LINK_STATE);
4292                         if (bootverbose)
4293                                 if_printf(ifp, "Link is now UP.\n");
4294                 } else {
4295                         REG_WR(sc, BCE_PCICFG_STATUS_BIT_CLEAR_CMD,
4296                                STATUS_ATTN_BITS_LINK_STATE);
4297                         if (bootverbose)
4298                                 if_printf(ifp, "Link is now DOWN.\n");
4299                 }
4300
4301                 /*
4302                  * Assume link is down and allow tick routine to
4303                  * update the state based on the actual media state.
4304                  */
4305                 sc->bce_link = 0;
4306                 callout_stop(&sc->bce_tick_callout);
4307                 bce_tick_serialized(sc);
4308         }
4309
4310         /* Acknowledge the link change interrupt. */
4311         REG_WR(sc, BCE_EMAC_STATUS, BCE_EMAC_STATUS_LINK_CHANGE);
4312 }
4313
4314 /****************************************************************************/
4315 /* Reads the receive consumer value from the status block (skipping over    */
4316 /* chain page pointer if necessary).                                        */
4317 /*                                                                          */
4318 /* Returns:                                                                 */
4319 /*   hw_cons                                                                */
4320 /****************************************************************************/
4321 static __inline uint16_t
4322 bce_get_hw_rx_cons(struct bce_rx_ring *rxr)
4323 {
4324         uint16_t hw_cons = *rxr->rx_hw_cons;
4325
4326         if ((hw_cons & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE)
4327                 hw_cons++;
4328         return hw_cons;
4329 }
4330
4331 /****************************************************************************/
4332 /* Handles received frame interrupt events.                                 */
4333 /*                                                                          */
4334 /* Returns:                                                                 */
4335 /*   Nothing.                                                               */
4336 /****************************************************************************/
4337 static void
4338 bce_rx_intr(struct bce_rx_ring *rxr, int count, uint16_t hw_cons)
4339 {
4340         struct ifnet *ifp = &rxr->sc->arpcom.ac_if;
4341         uint16_t sw_cons, sw_chain_cons, sw_prod, sw_chain_prod;
4342         uint32_t sw_prod_bseq;
4343
4344         ASSERT_SERIALIZED(&rxr->rx_serialize);
4345
4346         /* Get working copies of the driver's view of the RX indices. */
4347         sw_cons = rxr->rx_cons;
4348         sw_prod = rxr->rx_prod;
4349         sw_prod_bseq = rxr->rx_prod_bseq;
4350
4351         /* Scan through the receive chain as long as there is work to do. */
4352         while (sw_cons != hw_cons) {
4353                 struct mbuf *m = NULL;
4354                 struct l2_fhdr *l2fhdr = NULL;
4355                 unsigned int len;
4356                 uint32_t status = 0;
4357
4358 #ifdef IFPOLL_ENABLE
4359                 if (count >= 0 && count-- == 0)
4360                         break;
4361 #endif
4362
4363                 /*
4364                  * Convert the producer/consumer indices
4365                  * to an actual rx_bd index.
4366                  */
4367                 sw_chain_cons = RX_CHAIN_IDX(rxr, sw_cons);
4368                 sw_chain_prod = RX_CHAIN_IDX(rxr, sw_prod);
4369
4370                 rxr->free_rx_bd++;
4371
4372                 /* The mbuf is stored with the last rx_bd entry of a packet. */
4373                 if (rxr->rx_mbuf_ptr[sw_chain_cons] != NULL) {
4374                         if (sw_chain_cons != sw_chain_prod) {
4375                                 if_printf(ifp, "RX cons(%d) != prod(%d), "
4376                                     "drop!\n", sw_chain_cons, sw_chain_prod);
4377                                 IFNET_STAT_INC(ifp, ierrors, 1);
4378
4379                                 bce_setup_rxdesc_std(rxr, sw_chain_cons,
4380                                     &sw_prod_bseq);
4381                                 m = NULL;
4382                                 goto bce_rx_int_next_rx;
4383                         }
4384
4385                         /* Unmap the mbuf from DMA space. */
4386                         bus_dmamap_sync(rxr->rx_mbuf_tag,
4387                             rxr->rx_mbuf_map[sw_chain_cons],
4388                             BUS_DMASYNC_POSTREAD);
4389
4390                         /* Save the mbuf from the driver's chain. */
4391                         m = rxr->rx_mbuf_ptr[sw_chain_cons];
4392
4393                         /*
4394                          * Frames received on the NetXteme II are prepended 
4395                          * with an l2_fhdr structure which provides status
4396                          * information about the received frame (including
4397                          * VLAN tags and checksum info).  The frames are also
4398                          * automatically adjusted to align the IP header
4399                          * (i.e. two null bytes are inserted before the 
4400                          * Ethernet header).  As a result the data DMA'd by
4401                          * the controller into the mbuf is as follows:
4402                          *
4403                          * +---------+-----+---------------------+-----+
4404                          * | l2_fhdr | pad | packet data         | FCS |
4405                          * +---------+-----+---------------------+-----+
4406                          * 
4407                          * The l2_fhdr needs to be checked and skipped and the
4408                          * FCS needs to be stripped before sending the packet
4409                          * up the stack.
4410                          */
4411                         l2fhdr = mtod(m, struct l2_fhdr *);
4412
4413                         len = l2fhdr->l2_fhdr_pkt_len;
4414                         status = l2fhdr->l2_fhdr_status;
4415
4416                         len -= ETHER_CRC_LEN;
4417
4418                         /* Check the received frame for errors. */
4419                         if (status & (L2_FHDR_ERRORS_BAD_CRC |
4420                                       L2_FHDR_ERRORS_PHY_DECODE |
4421                                       L2_FHDR_ERRORS_ALIGNMENT |
4422                                       L2_FHDR_ERRORS_TOO_SHORT |
4423                                       L2_FHDR_ERRORS_GIANT_FRAME)) {
4424                                 IFNET_STAT_INC(ifp, ierrors, 1);
4425
4426                                 /* Reuse the mbuf for a new frame. */
4427                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4428                                     &sw_prod_bseq);
4429                                 m = NULL;
4430                                 goto bce_rx_int_next_rx;
4431                         }
4432
4433                         /* 
4434                          * Get a new mbuf for the rx_bd.   If no new
4435                          * mbufs are available then reuse the current mbuf,
4436                          * log an ierror on the interface, and generate
4437                          * an error in the system log.
4438                          */
4439                         if (bce_newbuf_std(rxr, &sw_prod, &sw_chain_prod,
4440                             &sw_prod_bseq, 0)) {
4441                                 IFNET_STAT_INC(ifp, ierrors, 1);
4442
4443                                 /* Try and reuse the exisitng mbuf. */
4444                                 bce_setup_rxdesc_std(rxr, sw_chain_prod,
4445                                     &sw_prod_bseq);
4446                                 m = NULL;
4447                                 goto bce_rx_int_next_rx;
4448                         }
4449
4450                         /*
4451                          * Skip over the l2_fhdr when passing
4452                          * the data up the stack.
4453                          */
4454                         m_adj(m, sizeof(struct l2_fhdr) + ETHER_ALIGN);
4455
4456                         m->m_pkthdr.len = m->m_len = len;
4457                         m->m_pkthdr.rcvif = ifp;
4458
4459                         /* Validate the checksum if offload enabled. */
4460                         if (ifp->if_capenable & IFCAP_RXCSUM) {
4461                                 /* Check for an IP datagram. */
4462                                 if (status & L2_FHDR_STATUS_IP_DATAGRAM) {
4463                                         m->m_pkthdr.csum_flags |=
4464                                                 CSUM_IP_CHECKED;
4465
4466                                         /* Check if the IP checksum is valid. */
4467                                         if ((l2fhdr->l2_fhdr_ip_xsum ^
4468                                              0xffff) == 0) {
4469                                                 m->m_pkthdr.csum_flags |=
4470                                                         CSUM_IP_VALID;
4471                                         }
4472                                 }
4473
4474                                 /* Check for a valid TCP/UDP frame. */
4475                                 if (status & (L2_FHDR_STATUS_TCP_SEGMENT |
4476                                               L2_FHDR_STATUS_UDP_DATAGRAM)) {
4477
4478                                         /* Check for a good TCP/UDP checksum. */
4479                                         if ((status &
4480                                              (L2_FHDR_ERRORS_TCP_XSUM |
4481                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0) {
4482                                                 m->m_pkthdr.csum_data =
4483                                                 l2fhdr->l2_fhdr_tcp_udp_xsum;
4484                                                 m->m_pkthdr.csum_flags |=
4485                                                         CSUM_DATA_VALID |
4486                                                         CSUM_PSEUDO_HDR;
4487                                         }
4488                                 }
4489                         }
4490
4491                         IFNET_STAT_INC(ifp, ipackets, 1);
4492 bce_rx_int_next_rx:
4493                         sw_prod = NEXT_RX_BD(sw_prod);
4494                 }
4495
4496                 sw_cons = NEXT_RX_BD(sw_cons);
4497
4498                 /* If we have a packet, pass it up the stack */
4499                 if (m) {
4500                         if (status & L2_FHDR_STATUS_L2_VLAN_TAG) {
4501                                 m->m_flags |= M_VLANTAG;
4502                                 m->m_pkthdr.ether_vlantag =
4503                                         l2fhdr->l2_fhdr_vlan_tag;
4504                         }
4505                         ifp->if_input(ifp, m);
4506 #ifdef BCE_RSS_DEBUG
4507                         rxr->rx_pkts++;
4508 #endif
4509                 }
4510         }
4511
4512         rxr->rx_cons = sw_cons;
4513         rxr->rx_prod = sw_prod;
4514         rxr->rx_prod_bseq = sw_prod_bseq;
4515
4516         REG_WR16(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BDIDX,
4517             rxr->rx_prod);
4518         REG_WR(rxr->sc, MB_GET_CID_ADDR(rxr->rx_cid) + BCE_L2MQ_RX_HOST_BSEQ,
4519             rxr->rx_prod_bseq);
4520 }
4521
4522 /****************************************************************************/
4523 /* Reads the transmit consumer value from the status block (skipping over   */
4524 /* chain page pointer if necessary).                                        */
4525 /*                                                                          */
4526 /* Returns:                                                                 */
4527 /*   hw_cons                                                                */
4528 /****************************************************************************/
4529 static __inline uint16_t
4530 bce_get_hw_tx_cons(struct bce_tx_ring *txr)
4531 {
4532         uint16_t hw_cons = *txr->tx_hw_cons;
4533
4534         if ((hw_cons & USABLE_TX_BD_PER_PAGE) == USABLE_TX_BD_PER_PAGE)
4535                 hw_cons++;
4536         return hw_cons;
4537 }
4538
4539 /****************************************************************************/
4540 /* Handles transmit completion interrupt events.                            */
4541 /*                                                                          */
4542 /* Returns:                                                                 */
4543 /*   Nothing.                                                               */
4544 /****************************************************************************/
4545 static void
4546 bce_tx_intr(struct bce_tx_ring *txr, uint16_t hw_tx_cons)
4547 {
4548         struct ifnet *ifp = &txr->sc->arpcom.ac_if;
4549         uint16_t sw_tx_cons, sw_tx_chain_cons;
4550
4551         ASSERT_SERIALIZED(&txr->tx_serialize);
4552
4553         /* Get the hardware's view of the TX consumer index. */
4554         sw_tx_cons = txr->tx_cons;
4555
4556         /* Cycle through any completed TX chain page entries. */
4557         while (sw_tx_cons != hw_tx_cons) {
4558                 sw_tx_chain_cons = TX_CHAIN_IDX(txr, sw_tx_cons);
4559
4560                 /*
4561                  * Free the associated mbuf. Remember
4562                  * that only the last tx_bd of a packet
4563                  * has an mbuf pointer and DMA map.
4564                  */
4565                 if (txr->tx_mbuf_ptr[sw_tx_chain_cons] != NULL) {
4566                         /* Unmap the mbuf. */
4567                         bus_dmamap_unload(txr->tx_mbuf_tag,
4568                             txr->tx_mbuf_map[sw_tx_chain_cons]);
4569
4570                         /* Free the mbuf. */
4571                         m_freem(txr->tx_mbuf_ptr[sw_tx_chain_cons]);
4572                         txr->tx_mbuf_ptr[sw_tx_chain_cons] = NULL;
4573
4574                         IFNET_STAT_INC(ifp, opackets, 1);
4575 #ifdef BCE_TSS_DEBUG
4576                         txr->tx_pkts++;
4577 #endif
4578                 }
4579
4580                 txr->used_tx_bd--;
4581                 sw_tx_cons = NEXT_TX_BD(sw_tx_cons);
4582         }
4583
4584         if (txr->used_tx_bd == 0) {
4585                 /* Clear the TX timeout timer. */
4586                 txr->tx_watchdog.wd_timer = 0;
4587         }
4588
4589         /* Clear the tx hardware queue full flag. */
4590         if (txr->max_tx_bd - txr->used_tx_bd >= BCE_TX_SPARE_SPACE)
4591                 ifsq_clr_oactive(txr->ifsq);
4592         txr->tx_cons = sw_tx_cons;
4593 }
4594
4595 /****************************************************************************/
4596 /* Disables interrupt generation.                                           */
4597 /*                                                                          */
4598 /* Returns:                                                                 */
4599 /*   Nothing.                                                               */
4600 /****************************************************************************/
4601 static void
4602 bce_disable_intr(struct bce_softc *sc)
4603 {
4604         int i;
4605
4606         for (i = 0; i < sc->rx_ring_cnt; ++i) {
4607                 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
4608                     (sc->rx_rings[i].idx << 24) |
4609                     BCE_PCICFG_INT_ACK_CMD_MASK_INT);
4610         }
4611         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
4612
4613         callout_stop(&sc->bce_ckmsi_callout);
4614         sc->bce_msi_maylose = FALSE;
4615         sc->bce_check_rx_cons = 0;
4616         sc->bce_check_tx_cons = 0;
4617         sc->bce_check_status_idx = 0xffff;
4618
4619         for (i = 0; i < sc->rx_ring_cnt; ++i)
4620                 lwkt_serialize_handler_disable(sc->bce_msix[i].msix_serialize);
4621 }
4622
4623 /****************************************************************************/
4624 /* Enables interrupt generation.                                            */
4625 /*                                                                          */
4626 /* Returns:                                                                 */
4627 /*   Nothing.                                                               */
4628 /****************************************************************************/
4629 static void
4630 bce_enable_intr(struct bce_softc *sc)
4631 {
4632         int i;
4633
4634         for (i = 0; i < sc->rx_ring_cnt; ++i)
4635                 lwkt_serialize_handler_enable(sc->bce_msix[i].msix_serialize);
4636
4637         for (i = 0; i < sc->rx_ring_cnt; ++i) {
4638                 struct bce_rx_ring *rxr = &sc->rx_rings[i];
4639
4640                 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4641                        BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4642                        BCE_PCICFG_INT_ACK_CMD_MASK_INT |
4643                        rxr->last_status_idx);
4644                 REG_WR(sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4645                        BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
4646                        rxr->last_status_idx);
4647         }
4648         REG_WR(sc, BCE_HC_COMMAND, sc->hc_command | BCE_HC_COMMAND_COAL_NOW);
4649
4650         if (sc->bce_flags & BCE_CHECK_MSI_FLAG) {
4651                 sc->bce_msi_maylose = FALSE;
4652                 sc->bce_check_rx_cons = 0;
4653                 sc->bce_check_tx_cons = 0;
4654                 sc->bce_check_status_idx = 0xffff;
4655
4656                 if (bootverbose)
4657                         if_printf(&sc->arpcom.ac_if, "check msi\n");
4658
4659                 callout_reset_bycpu(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
4660                     bce_check_msi, sc, sc->bce_msix[0].msix_cpuid);
4661         }
4662 }
4663
4664 /****************************************************************************/
4665 /* Reenables interrupt generation during interrupt handling.                */
4666 /*                                                                          */
4667 /* Returns:                                                                 */
4668 /*   Nothing.                                                               */
4669 /****************************************************************************/
4670 static void
4671 bce_reenable_intr(struct bce_rx_ring *rxr)
4672 {
4673         REG_WR(rxr->sc, BCE_PCICFG_INT_ACK_CMD, (rxr->idx << 24) |
4674                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID | rxr->last_status_idx);
4675 }
4676
4677 /****************************************************************************/
4678 /* Handles controller initialization.                                       */
4679 /*                                                                          */
4680 /* Returns:                                                                 */
4681 /*   Nothing.                                                               */
4682 /****************************************************************************/
4683 static void
4684 bce_init(void *xsc)
4685 {
4686         struct bce_softc *sc = xsc;
4687         struct ifnet *ifp = &sc->arpcom.ac_if;
4688         uint32_t ether_mtu;
4689         int error, i;
4690         boolean_t polling;
4691
4692         ASSERT_IFNET_SERIALIZED_ALL(ifp);
4693
4694         /* Check if the driver is still running and bail out if it is. */
4695         if (ifp->if_flags & IFF_RUNNING)
4696                 return;
4697
4698         bce_stop(sc);
4699
4700         error = bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
4701         if (error) {
4702                 if_printf(ifp, "Controller reset failed!\n");
4703                 goto back;
4704         }
4705
4706         error = bce_chipinit(sc);
4707         if (error) {
4708                 if_printf(ifp, "Controller initialization failed!\n");
4709                 goto back;
4710         }
4711
4712         error = bce_blockinit(sc);
4713         if (error) {
4714                 if_printf(ifp, "Block initialization failed!\n");
4715                 goto back;
4716         }
4717
4718         /* Load our MAC address. */
4719         bcopy(IF_LLADDR(ifp), sc->eaddr, ETHER_ADDR_LEN);
4720         bce_set_mac_addr(sc);
4721
4722         /* Calculate and program the Ethernet MTU size. */
4723         ether_mtu = ETHER_HDR_LEN + EVL_ENCAPLEN + ifp->if_mtu + ETHER_CRC_LEN;
4724
4725         /* 
4726          * Program the mtu, enabling jumbo frame 
4727          * support if necessary.  Also set the mbuf
4728          * allocation count for RX frames.
4729          */
4730         if (ether_mtu > ETHER_MAX_LEN + EVL_ENCAPLEN) {
4731 #ifdef notyet
4732                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE,
4733                        min(ether_mtu, BCE_MAX_JUMBO_ETHER_MTU) |
4734                        BCE_EMAC_RX_MTU_SIZE_JUMBO_ENA);
4735 #else
4736                 panic("jumbo buffer is not supported yet");
4737 #endif
4738         } else {
4739                 REG_WR(sc, BCE_EMAC_RX_MTU_SIZE, ether_mtu);
4740         }
4741
4742         /* Program appropriate promiscuous/multicast filtering. */
4743         bce_set_rx_mode(sc);
4744
4745         /*
4746          * Init RX buffer descriptor chain.
4747          */
4748         REG_WR(sc, BCE_RLUP_RSS_CONFIG, 0);
4749         bce_reg_wr_ind(sc, BCE_RXP_SCRATCH_RSS_TBL_SZ, 0);
4750
4751         for (i = 0; i < sc->rx_ring_cnt; ++i)
4752                 bce_init_rx_chain(&sc->rx_rings[i]);    /* XXX return value */
4753
4754         if (sc->rx_ring_cnt > 1)
4755                 bce_init_rss(sc);
4756
4757         /*
4758          * Init TX buffer descriptor chain.
4759          */
4760         REG_WR(sc, BCE_TSCH_TSS_CFG, 0);
4761
4762         for (i = 0; i < sc->tx_ring_cnt; ++i)
4763                 bce_init_tx_chain(&sc->tx_rings[i]);
4764
4765         if (sc->tx_ring_cnt > 1) {
4766                 REG_WR(sc, BCE_TSCH_TSS_CFG,
4767                     ((sc->tx_ring_cnt - 1) << 24) | (TX_TSS_CID << 7));
4768         }
4769
4770         polling = FALSE;
4771 #ifdef IFPOLL_ENABLE
4772         if (ifp->if_flags & IFF_NPOLLING)
4773                 polling = TRUE;
4774 #endif
4775
4776         if (polling) {
4777                 /* Disable interrupts if we are polling. */
4778                 bce_disable_intr(sc);
4779
4780                 /* Change coalesce parameters */
4781                 bce_npoll_coal_change(sc);
4782         } else {
4783                 /* Enable host interrupts. */
4784                 bce_enable_intr(sc);
4785         }
4786         bce_set_timer_cpuid(sc, polling);
4787
4788         bce_ifmedia_upd(ifp);
4789
4790         ifp->if_flags |= IFF_RUNNING;
4791         for (i = 0; i < sc->tx_ring_cnt; ++i) {
4792                 ifsq_clr_oactive(sc->tx_rings[i].ifsq);
4793                 ifsq_watchdog_start(&sc->tx_rings[i].tx_watchdog);
4794         }
4795
4796         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
4797             sc->bce_timer_cpuid);
4798 back:
4799         if (error)
4800                 bce_stop(sc);
4801 }
4802
4803 /****************************************************************************/
4804 /* Initialize the controller just enough so that any management firmware    */
4805 /* running on the device will continue to operate corectly.                 */
4806 /*                                                                          */
4807 /* Returns:                                                                 */
4808 /*   Nothing.                                                               */
4809 /****************************************************************************/
4810 static void
4811 bce_mgmt_init(struct bce_softc *sc)
4812 {
4813         struct ifnet *ifp = &sc->arpcom.ac_if;
4814
4815         /* Bail out if management firmware is not running. */
4816         if (!(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
4817                 return;
4818
4819         /* Enable all critical blocks in the MAC. */
4820         if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709 ||
4821             BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5716) {
4822                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS,
4823                     BCE_MISC_ENABLE_DEFAULT_XI);
4824         } else {
4825                 REG_WR(sc, BCE_MISC_ENABLE_SET_BITS, BCE_MISC_ENABLE_DEFAULT);
4826         }
4827         REG_RD(sc, BCE_MISC_ENABLE_SET_BITS);
4828         DELAY(20);
4829
4830         bce_ifmedia_upd(ifp);
4831 }
4832
4833 /****************************************************************************/
4834 /* Encapsultes an mbuf cluster into the tx_bd chain structure and makes the */
4835 /* memory visible to the controller.                                        */
4836 /*                                                                          */
4837 /* Returns:                                                                 */
4838 /*   0 for success, positive value for failure.                             */
4839 /****************************************************************************/
4840 static int
4841 bce_encap(struct bce_tx_ring *txr, struct mbuf **m_head, int *nsegs_used)
4842 {
4843         bus_dma_segment_t segs[BCE_MAX_SEGMENTS];
4844         bus_dmamap_t map, tmp_map;
4845         struct mbuf *m0 = *m_head;
4846         struct tx_bd *txbd = NULL;
4847         uint16_t vlan_tag = 0, flags = 0, mss = 0;
4848         uint16_t chain_prod, chain_prod_start, prod;
4849         uint32_t prod_bseq;
4850         int i, error, maxsegs, nsegs;
4851
4852         /* Transfer any checksum offload flags to the bd. */
4853         if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
4854                 error = bce_tso_setup(txr, m_head, &flags, &mss);
4855                 if (error)
4856                         return ENOBUFS;
4857                 m0 = *m_head;
4858         } else if (m0->m_pkthdr.csum_flags & BCE_CSUM_FEATURES) {
4859                 if (m0->m_pkthdr.csum_flags & CSUM_IP)
4860                         flags |= TX_BD_FLAGS_IP_CKSUM;
4861                 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))
4862                         flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4863         }
4864
4865         /* Transfer any VLAN tags to the bd. */
4866         if (m0->m_flags & M_VLANTAG) {
4867                 flags |= TX_BD_FLAGS_VLAN_TAG;
4868                 vlan_tag = m0->m_pkthdr.ether_vlantag;
4869         }
4870
4871         prod = txr->tx_prod;
4872         chain_prod_start = chain_prod = TX_CHAIN_IDX(txr, prod);
4873
4874         /* Map the mbuf into DMAable memory. */
4875         map = txr->tx_mbuf_map[chain_prod_start];
4876
4877         maxsegs = txr->max_tx_bd - txr->used_tx_bd;
4878         KASSERT(maxsegs >= BCE_TX_SPARE_SPACE,
4879                 ("not enough segments %d", maxsegs));
4880         if (maxsegs > BCE_MAX_SEGMENTS)
4881                 maxsegs = BCE_MAX_SEGMENTS;
4882
4883         /* Map the mbuf into our DMA address space. */
4884         error = bus_dmamap_load_mbuf_defrag(txr->tx_mbuf_tag, map, m_head,
4885                         segs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
4886         if (error)
4887                 goto back;
4888         bus_dmamap_sync(txr->tx_mbuf_tag, map, BUS_DMASYNC_PREWRITE);
4889
4890         *nsegs_used += nsegs;
4891
4892         /* Reset m0 */
4893         m0 = *m_head;
4894
4895         /* prod points to an empty tx_bd at this point. */
4896         prod_bseq  = txr->tx_prod_bseq;
4897
4898         /*
4899          * Cycle through each mbuf segment that makes up
4900          * the outgoing frame, gathering the mapping info
4901          * for that segment and creating a tx_bd to for
4902          * the mbuf.
4903          */
4904         for (i = 0; i < nsegs; i++) {
4905                 chain_prod = TX_CHAIN_IDX(txr, prod);
4906                 txbd =
4907                 &txr->tx_bd_chain[TX_PAGE(chain_prod)][TX_IDX(chain_prod)];
4908
4909                 txbd->tx_bd_haddr_lo = htole32(BCE_ADDR_LO(segs[i].ds_addr));
4910                 txbd->tx_bd_haddr_hi = htole32(BCE_ADDR_HI(segs[i].ds_addr));
4911                 txbd->tx_bd_mss_nbytes = htole32(mss << 16) |
4912                     htole16(segs[i].ds_len);
4913                 txbd->tx_bd_vlan_tag = htole16(vlan_tag);
4914                 txbd->tx_bd_flags = htole16(flags);
4915
4916                 prod_bseq += segs[i].ds_len;
4917                 if (i == 0)
4918                         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_START);
4919                 prod = NEXT_TX_BD(prod);
4920         }
4921
4922         /* Set the END flag on the last TX buffer descriptor. */
4923         txbd->tx_bd_flags |= htole16(TX_BD_FLAGS_END);
4924
4925         /*
4926          * Ensure that the mbuf pointer for this transmission
4927          * is placed at the array index of the last
4928          * descriptor in this chain.  This is done
4929          * because a single map is used for all 
4930          * segments of the mbuf and we don't want to
4931          * unload the map before all of the segments
4932          * have been freed.
4933          */
4934         txr->tx_mbuf_ptr[chain_prod] = m0;
4935
4936         tmp_map = txr->tx_mbuf_map[chain_prod];
4937         txr->tx_mbuf_map[chain_prod] = map;
4938         txr->tx_mbuf_map[chain_prod_start] = tmp_map;
4939
4940         txr->used_tx_bd += nsegs;
4941
4942         /* prod points to the next free tx_bd at this point. */
4943         txr->tx_prod = prod;
4944         txr->tx_prod_bseq = prod_bseq;
4945 back:
4946         if (error) {
4947                 m_freem(*m_head);
4948                 *m_head = NULL;
4949         }
4950         return error;
4951 }
4952
4953 static void
4954 bce_xmit(struct bce_tx_ring *txr)
4955 {
4956         /* Start the transmit. */
4957         REG_WR16(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BIDX,
4958             txr->tx_prod);
4959         REG_WR(txr->sc, MB_GET_CID_ADDR(txr->tx_cid) + BCE_L2CTX_TX_HOST_BSEQ,
4960             txr->tx_prod_bseq);
4961 }
4962
4963 /****************************************************************************/
4964 /* Main transmit routine when called from another routine with a lock.      */
4965 /*                                                                          */
4966 /* Returns:                                                                 */
4967 /*   Nothing.                                                               */
4968 /****************************************************************************/
4969 static void
4970 bce_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
4971 {
4972         struct bce_softc *sc = ifp->if_softc;
4973         struct bce_tx_ring *txr = ifsq_get_priv(ifsq);
4974         int count = 0;
4975
4976         KKASSERT(txr->ifsq == ifsq);
4977         ASSERT_SERIALIZED(&txr->tx_serialize);
4978
4979         /* If there's no link or the transmit queue is empty then just exit. */
4980         if (!sc->bce_link) {
4981                 ifsq_purge(ifsq);
4982                 return;
4983         }
4984
4985         if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq))
4986                 return;
4987
4988         for (;;) {
4989                 struct mbuf *m_head;
4990
4991                 /*
4992                  * We keep BCE_TX_SPARE_SPACE entries, so bce_encap() is
4993                  * unlikely to fail.
4994                  */
4995                 if (txr->max_tx_bd - txr->used_tx_bd < BCE_TX_SPARE_SPACE) {
4996                         ifsq_set_oactive(ifsq);
4997                         break;
4998                 }
4999
5000                 /* Check for any frames to send. */
5001                 m_head = ifsq_dequeue(ifsq, NULL);
5002                 if (m_head == NULL)
5003                         break;
5004
5005                 /*
5006                  * Pack the data into the transmit ring. If we
5007                  * don't have room, place the mbuf back at the
5008                  * head of the queue and set the OACTIVE flag
5009                  * to wait for the NIC to drain the chain.
5010                  */
5011                 if (bce_encap(txr, &m_head, &count)) {
5012                         IFNET_STAT_INC(ifp, oerrors, 1);
5013                         if (txr->used_tx_bd == 0) {
5014                                 continue;
5015                         } else {
5016                                 ifsq_set_oactive(ifsq);
5017                                 break;
5018                         }
5019                 }
5020
5021                 if (count >= txr->tx_wreg) {
5022                         bce_xmit(txr);
5023                         count = 0;
5024                 }
5025
5026                 /* Send a copy of the frame to any BPF listeners. */
5027                 ETHER_BPF_MTAP(ifp, m_head);
5028
5029                 /* Set the tx timeout. */
5030                 txr->tx_watchdog.wd_timer = BCE_TX_TIMEOUT;
5031         }
5032         if (count > 0)
5033                 bce_xmit(txr);
5034 }
5035
5036 /****************************************************************************/
5037 /* Handles any IOCTL calls from the operating system.                       */
5038 /*                                                                          */
5039 /* Returns:                                                                 */
5040 /*   0 for success, positive value for failure.                             */
5041 /****************************************************************************/
5042 static int
5043 bce_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
5044 {
5045         struct bce_softc *sc = ifp->if_softc;
5046         struct ifreq *ifr = (struct ifreq *)data;
5047         struct mii_data *mii;
5048         int mask, error = 0;
5049
5050         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5051
5052         switch(command) {
5053         case SIOCSIFMTU:
5054                 /* Check that the MTU setting is supported. */
5055                 if (ifr->ifr_mtu < BCE_MIN_MTU ||
5056 #ifdef notyet
5057                     ifr->ifr_mtu > BCE_MAX_JUMBO_MTU
5058 #else
5059                     ifr->ifr_mtu > ETHERMTU
5060 #endif
5061                    ) {
5062                         error = EINVAL;
5063                         break;
5064                 }
5065
5066                 ifp->if_mtu = ifr->ifr_mtu;
5067                 ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
5068                 bce_init(sc);
5069                 break;
5070
5071         case SIOCSIFFLAGS:
5072                 if (ifp->if_flags & IFF_UP) {
5073                         if (ifp->if_flags & IFF_RUNNING) {
5074                                 mask = ifp->if_flags ^ sc->bce_if_flags;
5075
5076                                 if (mask & (IFF_PROMISC | IFF_ALLMULTI))
5077                                         bce_set_rx_mode(sc);
5078                         } else {
5079                                 bce_init(sc);
5080                         }
5081                 } else if (ifp->if_flags & IFF_RUNNING) {
5082                         bce_stop(sc);
5083
5084                         /* If MFW is running, restart the controller a bit. */
5085                         if (sc->bce_flags & BCE_MFW_ENABLE_FLAG) {
5086                                 bce_reset(sc, BCE_DRV_MSG_CODE_RESET);
5087                                 bce_chipinit(sc);
5088                                 bce_mgmt_init(sc);
5089                         }
5090                 }
5091                 sc->bce_if_flags = ifp->if_flags;
5092                 break;
5093
5094         case SIOCADDMULTI:
5095         case SIOCDELMULTI:
5096                 if (ifp->if_flags & IFF_RUNNING)
5097                         bce_set_rx_mode(sc);
5098                 break;
5099
5100         case SIOCSIFMEDIA:
5101         case SIOCGIFMEDIA:
5102                 mii = device_get_softc(sc->bce_miibus);
5103                 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
5104                 break;
5105
5106         case SIOCSIFCAP:
5107                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
5108                 if (mask & IFCAP_HWCSUM) {
5109                         ifp->if_capenable ^= (mask & IFCAP_HWCSUM);
5110                         if (ifp->if_capenable & IFCAP_TXCSUM)
5111                                 ifp->if_hwassist |= BCE_CSUM_FEATURES;
5112                         else
5113                                 ifp->if_hwassist &= ~BCE_CSUM_FEATURES;
5114                 }
5115                 if (mask & IFCAP_TSO) {
5116                         ifp->if_capenable ^= IFCAP_TSO;
5117                         if (ifp->if_capenable & IFCAP_TSO)
5118                                 ifp->if_hwassist |= CSUM_TSO;
5119                         else
5120                                 ifp->if_hwassist &= ~CSUM_TSO;
5121                 }
5122                 break;
5123
5124         default:
5125                 error = ether_ioctl(ifp, command, data);
5126                 break;
5127         }
5128         return error;
5129 }
5130
5131 /****************************************************************************/
5132 /* Transmit timeout handler.                                                */
5133 /*                                                                          */
5134 /* Returns:                                                                 */
5135 /*   Nothing.                                                               */
5136 /****************************************************************************/
5137 static void
5138 bce_watchdog(struct ifaltq_subque *ifsq)
5139 {
5140         struct ifnet *ifp = ifsq_get_ifp(ifsq);
5141         struct bce_softc *sc = ifp->if_softc;
5142         int i;
5143
5144         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5145
5146         /*
5147          * If we are in this routine because of pause frames, then
5148          * don't reset the hardware.
5149          */
5150         if (REG_RD(sc, BCE_EMAC_TX_STATUS) & BCE_EMAC_TX_STATUS_XOFFED) 
5151                 return;
5152
5153         if_printf(ifp, "Watchdog timeout occurred, resetting!\n");
5154
5155         ifp->if_flags &= ~IFF_RUNNING;  /* Force reinitialize */
5156         bce_init(sc);
5157
5158         IFNET_STAT_INC(ifp, oerrors, 1);
5159
5160         for (i = 0; i < sc->tx_ring_cnt; ++i)
5161                 ifsq_devstart_sched(sc->tx_rings[i].ifsq);
5162 }
5163
5164 #ifdef IFPOLL_ENABLE
5165
5166 static void
5167 bce_npoll_status(struct ifnet *ifp)
5168 {
5169         struct bce_softc *sc = ifp->if_softc;
5170         struct status_block *sblk = sc->status_block;
5171         uint32_t status_attn_bits;
5172
5173         ASSERT_SERIALIZED(&sc->main_serialize);
5174
5175         status_attn_bits = sblk->status_attn_bits;
5176
5177         /* Was it a link change interrupt? */
5178         if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5179             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5180                 bce_phy_intr(sc);
5181
5182                 /*
5183                  * Clear any transient status updates during link state change.
5184                  */
5185                 REG_WR(sc, BCE_HC_COMMAND,
5186                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5187                 REG_RD(sc, BCE_HC_COMMAND);
5188         }
5189
5190         /*
5191          * If any other attention is asserted then the chip is toast.
5192          */
5193         if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5194              (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5195                 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5196                     sblk->status_attn_bits);
5197                 bce_serialize_skipmain(sc);
5198                 bce_init(sc);
5199                 bce_deserialize_skipmain(sc);
5200         }
5201 }
5202
5203 static void
5204 bce_npoll_rx(struct ifnet *ifp, void *arg, int count)
5205 {
5206         struct bce_rx_ring *rxr = arg;
5207         uint16_t hw_rx_cons;
5208
5209         ASSERT_SERIALIZED(&rxr->rx_serialize);
5210
5211         /*
5212          * Save the status block index value for use when enabling
5213          * the interrupt.
5214          */
5215         rxr->last_status_idx = *rxr->hw_status_idx;
5216
5217         /* Make sure status index is extracted before RX/TX cons */
5218         cpu_lfence();
5219
5220         hw_rx_cons = bce_get_hw_rx_cons(rxr);
5221
5222         /* Check for any completed RX frames. */
5223         if (hw_rx_cons != rxr->rx_cons)
5224                 bce_rx_intr(rxr, count, hw_rx_cons);
5225 }
5226
5227 static void
5228 bce_npoll_rx_pack(struct ifnet *ifp, void *arg, int count)
5229 {
5230         struct bce_rx_ring *rxr = arg;
5231
5232         KASSERT(rxr->idx == 0, ("not the first RX ring, but %d", rxr->idx));
5233         bce_npoll_rx(ifp, rxr, count);
5234
5235         KASSERT(rxr->sc->rx_ring_cnt != rxr->sc->rx_ring_cnt2,
5236             ("RX ring count %d, count2 %d", rxr->sc->rx_ring_cnt,
5237              rxr->sc->rx_ring_cnt2));
5238
5239         /* Last ring carries packets whose masked hash is 0 */
5240         rxr = &rxr->sc->rx_rings[rxr->sc->rx_ring_cnt - 1];
5241
5242         lwkt_serialize_enter(&rxr->rx_serialize);
5243         bce_npoll_rx(ifp, rxr, count);
5244         lwkt_serialize_exit(&rxr->rx_serialize);
5245 }
5246
5247 static void
5248 bce_npoll_tx(struct ifnet *ifp, void *arg, int count __unused)
5249 {
5250         struct bce_tx_ring *txr = arg;
5251         uint16_t hw_tx_cons;
5252
5253         ASSERT_SERIALIZED(&txr->tx_serialize);
5254
5255         hw_tx_cons = bce_get_hw_tx_cons(txr);
5256
5257         /* Check for any completed TX frames. */
5258         if (hw_tx_cons != txr->tx_cons) {
5259                 bce_tx_intr(txr, hw_tx_cons);
5260                 if (!ifsq_is_empty(txr->ifsq))
5261                         ifsq_devstart(txr->ifsq);
5262         }
5263 }
5264
5265 static void
5266 bce_npoll(struct ifnet *ifp, struct ifpoll_info *info)
5267 {
5268         struct bce_softc *sc = ifp->if_softc;
5269         int i;
5270
5271         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5272
5273         if (info != NULL) {
5274                 info->ifpi_status.status_func = bce_npoll_status;
5275                 info->ifpi_status.serializer = &sc->main_serialize;
5276
5277                 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5278                         struct bce_tx_ring *txr = &sc->tx_rings[i];
5279                         int idx = i + sc->npoll_ofs;
5280
5281                         KKASSERT(idx < ncpus2);
5282                         info->ifpi_tx[idx].poll_func = bce_npoll_tx;
5283                         info->ifpi_tx[idx].arg = txr;
5284                         info->ifpi_tx[idx].serializer = &txr->tx_serialize;
5285                         ifsq_set_cpuid(txr->ifsq, idx);
5286                 }
5287
5288                 for (i = 0; i < sc->rx_ring_cnt2; ++i) {
5289                         struct bce_rx_ring *rxr = &sc->rx_rings[i];
5290                         int idx = i + sc->npoll_ofs;
5291
5292                         KKASSERT(idx < ncpus2);
5293                         if (i == 0 && sc->rx_ring_cnt2 != sc->rx_ring_cnt) {
5294                                 /*
5295                                  * If RSS is enabled, the packets whose
5296                                  * masked hash are 0 are queued to the
5297                                  * last RX ring; piggyback the last RX
5298                                  * ring's processing in the first RX
5299                                  * polling handler. (see also: comment
5300                                  * in bce_setup_ring_cnt())
5301                                  */
5302                                 if (bootverbose) {
5303                                         if_printf(ifp, "npoll pack last "
5304                                             "RX ring on cpu%d\n", idx);
5305                                 }
5306                                 info->ifpi_rx[idx].poll_func =
5307                                     bce_npoll_rx_pack;
5308                         } else {
5309                                 info->ifpi_rx[idx].poll_func = bce_npoll_rx;
5310                         }
5311                         info->ifpi_rx[idx].arg = rxr;
5312                         info->ifpi_rx[idx].serializer = &rxr->rx_serialize;
5313                 }
5314
5315                 if (ifp->if_flags & IFF_RUNNING) {
5316                         bce_set_timer_cpuid(sc, TRUE);
5317                         bce_disable_intr(sc);
5318                         bce_npoll_coal_change(sc);
5319                 }
5320         } else {
5321                 for (i = 0; i < sc->tx_ring_cnt; ++i) {
5322                         ifsq_set_cpuid(sc->tx_rings[i].ifsq,
5323                             sc->bce_msix[i].msix_cpuid);
5324                 }
5325
5326                 if (ifp->if_flags & IFF_RUNNING) {
5327                         bce_set_timer_cpuid(sc, FALSE);
5328                         bce_enable_intr(sc);
5329
5330                         sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
5331                             BCE_COALMASK_RX_BDS_INT;
5332                         bce_coal_change(sc);
5333                 }
5334         }
5335 }
5336
5337 #endif  /* IFPOLL_ENABLE */
5338
5339 /*
5340  * Interrupt handler.
5341  */
5342 /****************************************************************************/
5343 /* Main interrupt entry point.  Verifies that the controller generated the  */
5344 /* interrupt and then calls a separate routine for handle the various       */
5345 /* interrupt causes (PHY, TX, RX).                                          */
5346 /*                                                                          */
5347 /* Returns:                                                                 */
5348 /*   0 for success, positive value for failure.                             */
5349 /****************************************************************************/
5350 static void
5351 bce_intr(struct bce_softc *sc)
5352 {
5353         struct ifnet *ifp = &sc->arpcom.ac_if;
5354         struct status_block *sblk;
5355         uint16_t hw_rx_cons, hw_tx_cons;
5356         uint32_t status_attn_bits;
5357         struct bce_tx_ring *txr = &sc->tx_rings[0];
5358         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5359
5360         ASSERT_SERIALIZED(&sc->main_serialize);
5361
5362         sblk = sc->status_block;
5363
5364         /*
5365          * Save the status block index value for use during
5366          * the next interrupt.
5367          */
5368         rxr->last_status_idx = *rxr->hw_status_idx;
5369
5370         /* Make sure status index is extracted before RX/TX cons */
5371         cpu_lfence();
5372
5373         /* Check if the hardware has finished any work. */
5374         hw_rx_cons = bce_get_hw_rx_cons(rxr);
5375         hw_tx_cons = bce_get_hw_tx_cons(txr);
5376
5377         status_attn_bits = sblk->status_attn_bits;
5378
5379         /* Was it a link change interrupt? */
5380         if ((status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5381             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5382                 bce_phy_intr(sc);
5383
5384                 /*
5385                  * Clear any transient status updates during link state
5386                  * change.
5387                  */
5388                 REG_WR(sc, BCE_HC_COMMAND,
5389                     sc->hc_command | BCE_HC_COMMAND_COAL_NOW_WO_INT);
5390                 REG_RD(sc, BCE_HC_COMMAND);
5391         }
5392
5393         /*
5394          * If any other attention is asserted then
5395          * the chip is toast.
5396          */
5397         if ((status_attn_bits & ~STATUS_ATTN_BITS_LINK_STATE) !=
5398             (sblk->status_attn_bits_ack & ~STATUS_ATTN_BITS_LINK_STATE)) {
5399                 if_printf(ifp, "Fatal attention detected: 0x%08X\n",
5400                           sblk->status_attn_bits);
5401                 bce_serialize_skipmain(sc);
5402                 bce_init(sc);
5403                 bce_deserialize_skipmain(sc);
5404                 return;
5405         }
5406
5407         /* Check for any completed RX frames. */
5408         lwkt_serialize_enter(&rxr->rx_serialize);
5409         if (hw_rx_cons != rxr->rx_cons)
5410                 bce_rx_intr(rxr, -1, hw_rx_cons);
5411         lwkt_serialize_exit(&rxr->rx_serialize);
5412
5413         /* Check for any completed TX frames. */
5414         lwkt_serialize_enter(&txr->tx_serialize);
5415         if (hw_tx_cons != txr->tx_cons) {
5416                 bce_tx_intr(txr, hw_tx_cons);
5417                 if (!ifsq_is_empty(txr->ifsq))
5418                         ifsq_devstart(txr->ifsq);
5419         }
5420         lwkt_serialize_exit(&txr->tx_serialize);
5421 }
5422
5423 static void
5424 bce_intr_legacy(void *xsc)
5425 {
5426         struct bce_softc *sc = xsc;
5427         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5428         struct status_block *sblk;
5429
5430         sblk = sc->status_block;
5431
5432         /*
5433          * If the hardware status block index matches the last value
5434          * read by the driver and we haven't asserted our interrupt
5435          * then there's nothing to do.
5436          */
5437         if (sblk->status_idx == rxr->last_status_idx &&
5438             (REG_RD(sc, BCE_PCICFG_MISC_STATUS) &
5439              BCE_PCICFG_MISC_STATUS_INTA_VALUE))
5440                 return;
5441
5442         /* Ack the interrupt and stop others from occuring. */
5443         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5444                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5445                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5446
5447         /*
5448          * Read back to deassert IRQ immediately to avoid too
5449          * many spurious interrupts.
5450          */
5451         REG_RD(sc, BCE_PCICFG_INT_ACK_CMD);
5452
5453         bce_intr(sc);
5454
5455         /* Re-enable interrupts. */
5456         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5457                BCE_PCICFG_INT_ACK_CMD_INDEX_VALID |
5458                BCE_PCICFG_INT_ACK_CMD_MASK_INT | rxr->last_status_idx);
5459         bce_reenable_intr(rxr);
5460 }
5461
5462 static void
5463 bce_intr_msi(void *xsc)
5464 {
5465         struct bce_softc *sc = xsc;
5466
5467         /* Ack the interrupt and stop others from occuring. */
5468         REG_WR(sc, BCE_PCICFG_INT_ACK_CMD,
5469                BCE_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
5470                BCE_PCICFG_INT_ACK_CMD_MASK_INT);
5471
5472         bce_intr(sc);
5473
5474         /* Re-enable interrupts */
5475         bce_reenable_intr(&sc->rx_rings[0]);
5476 }
5477
5478 static void
5479 bce_intr_msi_oneshot(void *xsc)
5480 {
5481         struct bce_softc *sc = xsc;
5482
5483         bce_intr(sc);
5484
5485         /* Re-enable interrupts */
5486         bce_reenable_intr(&sc->rx_rings[0]);
5487 }
5488
5489 static void
5490 bce_intr_msix_rxtx(void *xrxr)
5491 {
5492         struct bce_rx_ring *rxr = xrxr;
5493         struct bce_tx_ring *txr;
5494         uint16_t hw_rx_cons, hw_tx_cons;
5495
5496         ASSERT_SERIALIZED(&rxr->rx_serialize);
5497
5498         KKASSERT(rxr->idx < rxr->sc->tx_ring_cnt);
5499         txr = &rxr->sc->tx_rings[rxr->idx];
5500
5501         /*
5502          * Save the status block index value for use during
5503          * the next interrupt.
5504          */
5505         rxr->last_status_idx = *rxr->hw_status_idx;
5506
5507         /* Make sure status index is extracted before RX/TX cons */
5508         cpu_lfence();
5509
5510         /* Check if the hardware has finished any work. */
5511         hw_rx_cons = bce_get_hw_rx_cons(rxr);
5512         if (hw_rx_cons != rxr->rx_cons)
5513                 bce_rx_intr(rxr, -1, hw_rx_cons);
5514
5515         /* Check for any completed TX frames. */
5516         hw_tx_cons = bce_get_hw_tx_cons(txr);
5517         lwkt_serialize_enter(&txr->tx_serialize);
5518         if (hw_tx_cons != txr->tx_cons) {
5519                 bce_tx_intr(txr, hw_tx_cons);
5520                 if (!ifsq_is_empty(txr->ifsq))
5521                         ifsq_devstart(txr->ifsq);
5522         }
5523         lwkt_serialize_exit(&txr->tx_serialize);
5524
5525         /* Re-enable interrupts */
5526         bce_reenable_intr(rxr);
5527 }
5528
5529 static void
5530 bce_intr_msix_rx(void *xrxr)
5531 {
5532         struct bce_rx_ring *rxr = xrxr;
5533         uint16_t hw_rx_cons;
5534
5535         ASSERT_SERIALIZED(&rxr->rx_serialize);
5536
5537         /*
5538          * Save the status block index value for use during
5539          * the next interrupt.
5540          */
5541         rxr->last_status_idx = *rxr->hw_status_idx;
5542
5543         /* Make sure status index is extracted before RX cons */
5544         cpu_lfence();
5545
5546         /* Check if the hardware has finished any work. */
5547         hw_rx_cons = bce_get_hw_rx_cons(rxr);
5548         if (hw_rx_cons != rxr->rx_cons)
5549                 bce_rx_intr(rxr, -1, hw_rx_cons);
5550
5551         /* Re-enable interrupts */
5552         bce_reenable_intr(rxr);
5553 }
5554
5555 /****************************************************************************/
5556 /* Programs the various packet receive modes (broadcast and multicast).     */
5557 /*                                                                          */
5558 /* Returns:                                                                 */
5559 /*   Nothing.                                                               */
5560 /****************************************************************************/
5561 static void
5562 bce_set_rx_mode(struct bce_softc *sc)
5563 {
5564         struct ifnet *ifp = &sc->arpcom.ac_if;
5565         struct ifmultiaddr *ifma;
5566         uint32_t hashes[NUM_MC_HASH_REGISTERS] = { 0, 0, 0, 0, 0, 0, 0, 0 };
5567         uint32_t rx_mode, sort_mode;
5568         int h, i;
5569
5570         ASSERT_IFNET_SERIALIZED_ALL(ifp);
5571
5572         /* Initialize receive mode default settings. */
5573         rx_mode = sc->rx_mode &
5574                   ~(BCE_EMAC_RX_MODE_PROMISCUOUS |
5575                     BCE_EMAC_RX_MODE_KEEP_VLAN_TAG);
5576         sort_mode = 1 | BCE_RPM_SORT_USER0_BC_EN;
5577
5578         /*
5579          * ASF/IPMI/UMP firmware requires that VLAN tag stripping
5580          * be enbled.
5581          */
5582         if (!(BCE_IF_CAPABILITIES & IFCAP_VLAN_HWTAGGING) &&
5583             !(sc->bce_flags & BCE_MFW_ENABLE_FLAG))
5584                 rx_mode |= BCE_EMAC_RX_MODE_KEEP_VLAN_TAG;
5585
5586         /*
5587          * Check for promiscuous, all multicast, or selected
5588          * multicast address filtering.
5589          */
5590         if (ifp->if_flags & IFF_PROMISC) {
5591                 /* Enable promiscuous mode. */
5592                 rx_mode |= BCE_EMAC_RX_MODE_PROMISCUOUS;
5593                 sort_mode |= BCE_RPM_SORT_USER0_PROM_EN;
5594         } else if (ifp->if_flags & IFF_ALLMULTI) {
5595                 /* Enable all multicast addresses. */
5596                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5597                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5598                                0xffffffff);
5599                 }
5600                 sort_mode |= BCE_RPM_SORT_USER0_MC_EN;
5601         } else {
5602                 /* Accept one or more multicast(s). */
5603                 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
5604                         if (ifma->ifma_addr->sa_family != AF_LINK)
5605                                 continue;
5606                         h = ether_crc32_le(
5607                             LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
5608                             ETHER_ADDR_LEN) & 0xFF;
5609                         hashes[(h & 0xE0) >> 5] |= 1 << (h & 0x1F);
5610                 }
5611
5612                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
5613                         REG_WR(sc, BCE_EMAC_MULTICAST_HASH0 + (i * 4),
5614                                hashes[i]);
5615                 }
5616                 sort_mode |= BCE_RPM_SORT_USER0_MC_HSH_EN;
5617         }
5618
5619         /* Only make changes if the recive mode has actually changed. */
5620         if (rx_mode != sc->rx_mode) {
5621                 sc->rx_mode = rx_mode;
5622                 REG_WR(sc, BCE_EMAC_RX_MODE, rx_mode);
5623         }
5624
5625         /* Disable and clear the exisitng sort before enabling a new sort. */
5626         REG_WR(sc, BCE_RPM_SORT_USER0, 0x0);
5627         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode);
5628         REG_WR(sc, BCE_RPM_SORT_USER0, sort_mode | BCE_RPM_SORT_USER0_ENA);
5629 }
5630
5631 /****************************************************************************/
5632 /* Called periodically to updates statistics from the controllers           */
5633 /* statistics block.                                                        */
5634 /*                                                                          */
5635 /* Returns:                                                                 */
5636 /*   Nothing.                                                               */
5637 /****************************************************************************/
5638 static void
5639 bce_stats_update(struct bce_softc *sc)
5640 {
5641         struct ifnet *ifp = &sc->arpcom.ac_if;
5642         struct statistics_block *stats = sc->stats_block;
5643
5644         ASSERT_SERIALIZED(&sc->main_serialize);
5645
5646         /* 
5647          * Certain controllers don't report carrier sense errors correctly.
5648          * See errata E11_5708CA0_1165.
5649          */
5650         if (!(BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5706) &&
5651             !(BCE_CHIP_ID(sc) == BCE_CHIP_ID_5708_A0)) {
5652                 IFNET_STAT_INC(ifp, oerrors,
5653                         (u_long)stats->stat_Dot3StatsCarrierSenseErrors);
5654         }
5655
5656         /*
5657          * Update the sysctl statistics from the hardware statistics.
5658          */
5659         sc->stat_IfHCInOctets =
5660                 ((uint64_t)stats->stat_IfHCInOctets_hi << 32) +
5661                  (uint64_t)stats->stat_IfHCInOctets_lo;
5662
5663         sc->stat_IfHCInBadOctets =
5664                 ((uint64_t)stats->stat_IfHCInBadOctets_hi << 32) +
5665                  (uint64_t)stats->stat_IfHCInBadOctets_lo;
5666
5667         sc->stat_IfHCOutOctets =
5668                 ((uint64_t)stats->stat_IfHCOutOctets_hi << 32) +
5669                  (uint64_t)stats->stat_IfHCOutOctets_lo;
5670
5671         sc->stat_IfHCOutBadOctets =
5672                 ((uint64_t)stats->stat_IfHCOutBadOctets_hi << 32) +
5673                  (uint64_t)stats->stat_IfHCOutBadOctets_lo;
5674
5675         sc->stat_IfHCInUcastPkts =
5676                 ((uint64_t)stats->stat_IfHCInUcastPkts_hi << 32) +
5677                  (uint64_t)stats->stat_IfHCInUcastPkts_lo;
5678
5679         sc->stat_IfHCInMulticastPkts =
5680                 ((uint64_t)stats->stat_IfHCInMulticastPkts_hi << 32) +
5681                  (uint64_t)stats->stat_IfHCInMulticastPkts_lo;
5682
5683         sc->stat_IfHCInBroadcastPkts =
5684                 ((uint64_t)stats->stat_IfHCInBroadcastPkts_hi << 32) +
5685                  (uint64_t)stats->stat_IfHCInBroadcastPkts_lo;
5686
5687         sc->stat_IfHCOutUcastPkts =
5688                 ((uint64_t)stats->stat_IfHCOutUcastPkts_hi << 32) +
5689                  (uint64_t)stats->stat_IfHCOutUcastPkts_lo;
5690
5691         sc->stat_IfHCOutMulticastPkts =
5692                 ((uint64_t)stats->stat_IfHCOutMulticastPkts_hi << 32) +
5693                  (uint64_t)stats->stat_IfHCOutMulticastPkts_lo;
5694
5695         sc->stat_IfHCOutBroadcastPkts =
5696                 ((uint64_t)stats->stat_IfHCOutBroadcastPkts_hi << 32) +
5697                  (uint64_t)stats->stat_IfHCOutBroadcastPkts_lo;
5698
5699         sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors =
5700                 stats->stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
5701
5702         sc->stat_Dot3StatsCarrierSenseErrors =
5703                 stats->stat_Dot3StatsCarrierSenseErrors;
5704
5705         sc->stat_Dot3StatsFCSErrors =
5706                 stats->stat_Dot3StatsFCSErrors;
5707
5708         sc->stat_Dot3StatsAlignmentErrors =
5709                 stats->stat_Dot3StatsAlignmentErrors;
5710
5711         sc->stat_Dot3StatsSingleCollisionFrames =
5712                 stats->stat_Dot3StatsSingleCollisionFrames;
5713
5714         sc->stat_Dot3StatsMultipleCollisionFrames =
5715                 stats->stat_Dot3StatsMultipleCollisionFrames;
5716
5717         sc->stat_Dot3StatsDeferredTransmissions =
5718                 stats->stat_Dot3StatsDeferredTransmissions;
5719
5720         sc->stat_Dot3StatsExcessiveCollisions =
5721                 stats->stat_Dot3StatsExcessiveCollisions;
5722
5723         sc->stat_Dot3StatsLateCollisions =
5724                 stats->stat_Dot3StatsLateCollisions;
5725
5726         sc->stat_EtherStatsCollisions =
5727                 stats->stat_EtherStatsCollisions;
5728
5729         sc->stat_EtherStatsFragments =
5730                 stats->stat_EtherStatsFragments;
5731
5732         sc->stat_EtherStatsJabbers =
5733                 stats->stat_EtherStatsJabbers;
5734
5735         sc->stat_EtherStatsUndersizePkts =
5736                 stats->stat_EtherStatsUndersizePkts;
5737
5738         sc->stat_EtherStatsOverrsizePkts =
5739                 stats->stat_EtherStatsOverrsizePkts;
5740
5741         sc->stat_EtherStatsPktsRx64Octets =
5742                 stats->stat_EtherStatsPktsRx64Octets;
5743
5744         sc->stat_EtherStatsPktsRx65Octetsto127Octets =
5745                 stats->stat_EtherStatsPktsRx65Octetsto127Octets;
5746
5747         sc->stat_EtherStatsPktsRx128Octetsto255Octets =
5748                 stats->stat_EtherStatsPktsRx128Octetsto255Octets;
5749
5750         sc->stat_EtherStatsPktsRx256Octetsto511Octets =
5751                 stats->stat_EtherStatsPktsRx256Octetsto511Octets;
5752
5753         sc->stat_EtherStatsPktsRx512Octetsto1023Octets =
5754                 stats->stat_EtherStatsPktsRx512Octetsto1023Octets;
5755
5756         sc->stat_EtherStatsPktsRx1024Octetsto1522Octets =
5757                 stats->stat_EtherStatsPktsRx1024Octetsto1522Octets;
5758
5759         sc->stat_EtherStatsPktsRx1523Octetsto9022Octets =
5760                 stats->stat_EtherStatsPktsRx1523Octetsto9022Octets;
5761
5762         sc->stat_EtherStatsPktsTx64Octets =
5763                 stats->stat_EtherStatsPktsTx64Octets;
5764
5765         sc->stat_EtherStatsPktsTx65Octetsto127Octets =
5766                 stats->stat_EtherStatsPktsTx65Octetsto127Octets;
5767
5768         sc->stat_EtherStatsPktsTx128Octetsto255Octets =
5769                 stats->stat_EtherStatsPktsTx128Octetsto255Octets;
5770
5771         sc->stat_EtherStatsPktsTx256Octetsto511Octets =
5772                 stats->stat_EtherStatsPktsTx256Octetsto511Octets;
5773
5774         sc->stat_EtherStatsPktsTx512Octetsto1023Octets =
5775                 stats->stat_EtherStatsPktsTx512Octetsto1023Octets;
5776
5777         sc->stat_EtherStatsPktsTx1024Octetsto1522Octets =
5778                 stats->stat_EtherStatsPktsTx1024Octetsto1522Octets;
5779
5780         sc->stat_EtherStatsPktsTx1523Octetsto9022Octets =
5781                 stats->stat_EtherStatsPktsTx1523Octetsto9022Octets;
5782
5783         sc->stat_XonPauseFramesReceived =
5784                 stats->stat_XonPauseFramesReceived;
5785
5786         sc->stat_XoffPauseFramesReceived =
5787                 stats->stat_XoffPauseFramesReceived;
5788
5789         sc->stat_OutXonSent =
5790                 stats->stat_OutXonSent;
5791
5792         sc->stat_OutXoffSent =
5793                 stats->stat_OutXoffSent;
5794
5795         sc->stat_FlowControlDone =
5796                 stats->stat_FlowControlDone;
5797
5798         sc->stat_MacControlFramesReceived =
5799                 stats->stat_MacControlFramesReceived;
5800
5801         sc->stat_XoffStateEntered =
5802                 stats->stat_XoffStateEntered;
5803
5804         sc->stat_IfInFramesL2FilterDiscards =
5805                 stats->stat_IfInFramesL2FilterDiscards;
5806
5807         sc->stat_IfInRuleCheckerDiscards =
5808                 stats->stat_IfInRuleCheckerDiscards;
5809
5810         sc->stat_IfInFTQDiscards =
5811                 stats->stat_IfInFTQDiscards;
5812
5813         sc->stat_IfInMBUFDiscards =
5814                 stats->stat_IfInMBUFDiscards;
5815
5816         sc->stat_IfInRuleCheckerP4Hit =
5817                 stats->stat_IfInRuleCheckerP4Hit;
5818
5819         sc->stat_CatchupInRuleCheckerDiscards =
5820                 stats->stat_CatchupInRuleCheckerDiscards;
5821
5822         sc->stat_CatchupInFTQDiscards =
5823                 stats->stat_CatchupInFTQDiscards;
5824
5825         sc->stat_CatchupInMBUFDiscards =
5826                 stats->stat_CatchupInMBUFDiscards;
5827
5828         sc->stat_CatchupInRuleCheckerP4Hit =
5829                 stats->stat_CatchupInRuleCheckerP4Hit;
5830
5831         sc->com_no_buffers = REG_RD_IND(sc, 0x120084);
5832
5833         /*
5834          * Update the interface statistics from the
5835          * hardware statistics.
5836          */
5837         IFNET_STAT_SET(ifp, collisions, (u_long)sc->stat_EtherStatsCollisions);
5838
5839         IFNET_STAT_SET(ifp, ierrors, (u_long)sc->stat_EtherStatsUndersizePkts +
5840             (u_long)sc->stat_EtherStatsOverrsizePkts +
5841             (u_long)sc->stat_IfInMBUFDiscards +
5842             (u_long)sc->stat_Dot3StatsAlignmentErrors +
5843             (u_long)sc->stat_Dot3StatsFCSErrors +
5844             (u_long)sc->stat_IfInRuleCheckerDiscards +
5845             (u_long)sc->stat_IfInFTQDiscards +
5846             (u_long)sc->com_no_buffers);
5847
5848         IFNET_STAT_SET(ifp, oerrors,
5849             (u_long)sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors +
5850             (u_long)sc->stat_Dot3StatsExcessiveCollisions +
5851             (u_long)sc->stat_Dot3StatsLateCollisions);
5852 }
5853
5854 /****************************************************************************/
5855 /* Periodic function to notify the bootcode that the driver is still        */
5856 /* present.                                                                 */
5857 /*                                                                          */
5858 /* Returns:                                                                 */
5859 /*   Nothing.                                                               */
5860 /****************************************************************************/
5861 static void
5862 bce_pulse(void *xsc)
5863 {
5864         struct bce_softc *sc = xsc;
5865         struct ifnet *ifp = &sc->arpcom.ac_if;
5866         uint32_t msg;
5867
5868         lwkt_serialize_enter(&sc->main_serialize);
5869
5870         /* Tell the firmware that the driver is still running. */
5871         msg = (uint32_t)++sc->bce_fw_drv_pulse_wr_seq;
5872         bce_shmem_wr(sc, BCE_DRV_PULSE_MB, msg);
5873
5874         /* Update the bootcode condition. */
5875         sc->bc_state = bce_shmem_rd(sc, BCE_BC_STATE_CONDITION);
5876
5877         /* Report whether the bootcode still knows the driver is running. */
5878         if (!sc->bce_drv_cardiac_arrest) {
5879                 if (!(sc->bc_state & BCE_CONDITION_DRV_PRESENT)) {
5880                         sc->bce_drv_cardiac_arrest = 1;
5881                         if_printf(ifp, "Bootcode lost the driver pulse! "
5882                             "(bc_state = 0x%08X)\n", sc->bc_state);
5883                 }
5884         } else {
5885                 /*
5886                  * Not supported by all bootcode versions.
5887                  * (v5.0.11+ and v5.2.1+)  Older bootcode
5888                  * will require the driver to reset the
5889                  * controller to clear this condition.
5890                  */
5891                 if (sc->bc_state & BCE_CONDITION_DRV_PRESENT) {
5892                         sc->bce_drv_cardiac_arrest = 0;
5893                         if_printf(ifp, "Bootcode found the driver pulse! "
5894                             "(bc_state = 0x%08X)\n", sc->bc_state);
5895                 }
5896         }
5897
5898         /* Schedule the next pulse. */
5899         callout_reset_bycpu(&sc->bce_pulse_callout, hz, bce_pulse, sc,
5900             sc->bce_timer_cpuid);
5901
5902         lwkt_serialize_exit(&sc->main_serialize);
5903 }
5904
5905 /****************************************************************************/
5906 /* Periodic function to check whether MSI is lost                           */
5907 /*                                                                          */
5908 /* Returns:                                                                 */
5909 /*   Nothing.                                                               */
5910 /****************************************************************************/
5911 static void
5912 bce_check_msi(void *xsc)
5913 {
5914         struct bce_softc *sc = xsc;
5915         struct ifnet *ifp = &sc->arpcom.ac_if;
5916         struct status_block *sblk = sc->status_block;
5917         struct bce_tx_ring *txr = &sc->tx_rings[0];
5918         struct bce_rx_ring *rxr = &sc->rx_rings[0];
5919
5920         lwkt_serialize_enter(&sc->main_serialize);
5921
5922         KKASSERT(mycpuid == sc->bce_msix[0].msix_cpuid);
5923
5924         if ((ifp->if_flags & (IFF_RUNNING | IFF_NPOLLING)) != IFF_RUNNING) {
5925                 lwkt_serialize_exit(&sc->main_serialize);
5926                 return;
5927         }
5928
5929         if (bce_get_hw_rx_cons(rxr) != rxr->rx_cons ||
5930             bce_get_hw_tx_cons(txr) != txr->tx_cons ||
5931             (sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
5932             (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE)) {
5933                 if (sc->bce_check_rx_cons == rxr->rx_cons &&
5934                     sc->bce_check_tx_cons == txr->tx_cons &&
5935                     sc->bce_check_status_idx == rxr->last_status_idx) {
5936                         uint32_t msi_ctrl;
5937
5938                         if (!sc->bce_msi_maylose) {
5939                                 sc->bce_msi_maylose = TRUE;
5940                                 goto done;
5941                         }
5942
5943                         msi_ctrl = REG_RD(sc, BCE_PCICFG_MSI_CONTROL);
5944                         if (msi_ctrl & BCE_PCICFG_MSI_CONTROL_ENABLE) {
5945                                 if (bootverbose)
5946                                         if_printf(ifp, "lost MSI\n");
5947
5948                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL,
5949                                     msi_ctrl & ~BCE_PCICFG_MSI_CONTROL_ENABLE);
5950                                 REG_WR(sc, BCE_PCICFG_MSI_CONTROL, msi_ctrl);
5951
5952                                 bce_intr_msi(sc);
5953                         } else if (bootverbose) {
5954                                 if_printf(ifp, "MSI may be lost\n");
5955                         }
5956                 }
5957         }
5958         sc->bce_msi_maylose = FALSE;
5959         sc->bce_check_rx_cons = rxr->rx_cons;
5960         sc->bce_check_tx_cons = txr->tx_cons;
5961         sc->bce_check_status_idx = rxr->last_status_idx;
5962
5963 done:
5964         callout_reset(&sc->bce_ckmsi_callout, BCE_MSI_CKINTVL,
5965             bce_check_msi, sc);
5966         lwkt_serialize_exit(&sc->main_serialize);
5967 }
5968
5969 /****************************************************************************/
5970 /* Periodic function to perform maintenance tasks.                          */
5971 /*                                                                          */
5972 /* Returns:                                                                 */
5973 /*   Nothing.                                                               */
5974 /****************************************************************************/
5975 static void
5976 bce_tick_serialized(struct bce_softc *sc)
5977 {
5978         struct mii_data *mii;
5979
5980         ASSERT_SERIALIZED(&sc->main_serialize);
5981
5982         /* Update the statistics from the hardware statistics block. */
5983         bce_stats_update(sc);
5984
5985         /* Schedule the next tick. */
5986         callout_reset_bycpu(&sc->bce_tick_callout, hz, bce_tick, sc,
5987             sc->bce_timer_cpuid);
5988
5989         /* If link is up already up then we're done. */
5990         if (sc->bce_link)
5991                 return;
5992
5993         mii = device_get_softc(sc->bce_miibus);
5994         mii_tick(mii);
5995
5996         /* Check if the link has come up. */
5997         if ((mii->mii_media_status & IFM_ACTIVE) &&
5998             IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
5999                 int i;
6000
6001                 sc->bce_link++;
6002                 /* Now that link is up, handle any outstanding TX traffic. */
6003                 for (i = 0; i < sc->tx_ring_cnt; ++i)
6004                         ifsq_devstart_sched(sc->tx_rings[i].ifsq);
6005         }
6006 }
6007
6008 static void
6009 bce_tick(void *xsc)
6010 {
6011         struct bce_softc *sc = xsc;
6012
6013         lwkt_serialize_enter(&sc->main_serialize);
6014         bce_tick_serialized(sc);
6015         lwkt_serialize_exit(&sc->main_serialize);
6016 }
6017
6018 /****************************************************************************/
6019 /* Adds any sysctl parameters for tuning or debugging purposes.             */
6020 /*                                                                          */
6021 /* Returns:                                                                 */
6022 /*   0 for success, positive value for failure.                             */
6023 /****************************************************************************/
6024 static void
6025 bce_add_sysctls(struct bce_softc *sc)
6026 {
6027         struct sysctl_ctx_list *ctx;
6028         struct sysctl_oid_list *children;
6029 #if defined(BCE_TSS_DEBUG) || defined(BCE_RSS_DEBUG)
6030         char node[32];
6031         int i;
6032 #endif
6033
6034         sysctl_ctx_init(&sc->bce_sysctl_ctx);
6035         sc->bce_sysctl_tree = SYSCTL_ADD_NODE(&sc->bce_sysctl_ctx,
6036                                               SYSCTL_STATIC_CHILDREN(_hw),
6037                                               OID_AUTO,
6038                                               device_get_nameunit(sc->bce_dev),
6039                                               CTLFLAG_RD, 0, "");
6040         if (sc->bce_sysctl_tree == NULL) {
6041                 device_printf(sc->bce_dev, "can't add sysctl node\n");
6042                 return;
6043         }
6044
6045         ctx = &sc->bce_sysctl_ctx;
6046         children = SYSCTL_CHILDREN(sc->bce_sysctl_tree);
6047
6048         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds_int",
6049                         CTLTYPE_INT | CTLFLAG_RW,
6050                         sc, 0, bce_sysctl_tx_bds_int, "I",
6051                         "Send max coalesced BD count during interrupt");
6052         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_bds",
6053                         CTLTYPE_INT | CTLFLAG_RW,
6054                         sc, 0, bce_sysctl_tx_bds, "I",
6055                         "Send max coalesced BD count");
6056         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks_int",
6057                         CTLTYPE_INT | CTLFLAG_RW,
6058                         sc, 0, bce_sysctl_tx_ticks_int, "I",
6059                         "Send coalescing ticks during interrupt");
6060         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_ticks",
6061                         CTLTYPE_INT | CTLFLAG_RW,
6062                         sc, 0, bce_sysctl_tx_ticks, "I",
6063                         "Send coalescing ticks");
6064
6065         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds_int",
6066                         CTLTYPE_INT | CTLFLAG_RW,
6067                         sc, 0, bce_sysctl_rx_bds_int, "I",
6068                         "Receive max coalesced BD count during interrupt");
6069         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_bds",
6070                         CTLTYPE_INT | CTLFLAG_RW,
6071                         sc, 0, bce_sysctl_rx_bds, "I",
6072                         "Receive max coalesced BD count");
6073         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks_int",
6074                         CTLTYPE_INT | CTLFLAG_RW,
6075                         sc, 0, bce_sysctl_rx_ticks_int, "I",
6076                         "Receive coalescing ticks during interrupt");
6077         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_ticks",
6078                         CTLTYPE_INT | CTLFLAG_RW,
6079                         sc, 0, bce_sysctl_rx_ticks, "I",
6080                         "Receive coalescing ticks");
6081
6082         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_rings",
6083                 CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings");
6084         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_pages",
6085                 CTLFLAG_RD, &sc->rx_rings[0].rx_pages, 0, "# of RX pages");
6086
6087         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_rings",
6088                 CTLFLAG_RD, &sc->tx_ring_cnt, 0, "# of TX rings");
6089         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pages",
6090                 CTLFLAG_RD, &sc->tx_rings[0].tx_pages, 0, "# of TX pages");
6091
6092         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wreg",
6093                 CTLFLAG_RW, &sc->tx_rings[0].tx_wreg, 0,
6094                 "# segments before write to hardware registers");
6095
6096 #ifdef IFPOLL_ENABLE
6097         SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "npoll_offset",
6098             CTLTYPE_INT|CTLFLAG_RW, sc, 0, bce_sysctl_npoll_offset,
6099             "I", "NPOLLING cpu offset");
6100 #endif
6101
6102 #ifdef BCE_RSS_DEBUG
6103         SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rss_debug",
6104             CTLFLAG_RW, &sc->rss_debug, 0, "RSS debug level");
6105         for (i = 0; i < sc->rx_ring_cnt; ++i) {
6106                 ksnprintf(node, sizeof(node), "rx%d_pkt", i);
6107                 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6108                     CTLFLAG_RW, &sc->rx_rings[i].rx_pkts,
6109                     "RXed packets");
6110         }
6111 #endif
6112
6113 #ifdef BCE_TSS_DEBUG
6114         for (i = 0; i < sc->tx_ring_cnt; ++i) {
6115                 ksnprintf(node, sizeof(node), "tx%d_pkt", i);
6116                 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, node,
6117                     CTLFLAG_RW, &sc->tx_rings[i].tx_pkts,
6118                     "TXed packets");
6119         }
6120 #endif
6121
6122         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6123                 "stat_IfHCInOctets",
6124                 CTLFLAG_RD, &sc->stat_IfHCInOctets,
6125                 "Bytes received");
6126
6127         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6128                 "stat_IfHCInBadOctets",
6129                 CTLFLAG_RD, &sc->stat_IfHCInBadOctets,
6130                 "Bad bytes received");
6131
6132         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6133                 "stat_IfHCOutOctets",
6134                 CTLFLAG_RD, &sc->stat_IfHCOutOctets,
6135                 "Bytes sent");
6136
6137         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6138                 "stat_IfHCOutBadOctets",
6139                 CTLFLAG_RD, &sc->stat_IfHCOutBadOctets,
6140                 "Bad bytes sent");
6141
6142         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6143                 "stat_IfHCInUcastPkts",
6144                 CTLFLAG_RD, &sc->stat_IfHCInUcastPkts,
6145                 "Unicast packets received");
6146
6147         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6148                 "stat_IfHCInMulticastPkts",
6149                 CTLFLAG_RD, &sc->stat_IfHCInMulticastPkts,
6150                 "Multicast packets received");
6151
6152         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6153                 "stat_IfHCInBroadcastPkts",
6154                 CTLFLAG_RD, &sc->stat_IfHCInBroadcastPkts,
6155                 "Broadcast packets received");
6156
6157         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6158                 "stat_IfHCOutUcastPkts",
6159                 CTLFLAG_RD, &sc->stat_IfHCOutUcastPkts,
6160                 "Unicast packets sent");
6161
6162         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6163                 "stat_IfHCOutMulticastPkts",
6164                 CTLFLAG_RD, &sc->stat_IfHCOutMulticastPkts,
6165                 "Multicast packets sent");
6166
6167         SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, 
6168                 "stat_IfHCOutBroadcastPkts",
6169                 CTLFLAG_RD, &sc->stat_IfHCOutBroadcastPkts,
6170                 "Broadcast packets sent");
6171
6172         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6173                 "stat_emac_tx_stat_dot3statsinternalmactransmiterrors",
6174                 CTLFLAG_RD, &sc->stat_emac_tx_stat_dot3statsinternalmactransmiterrors,
6175                 0, "Internal MAC transmit errors");
6176
6177         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6178                 "stat_Dot3StatsCarrierSenseErrors",
6179                 CTLFLAG_RD, &sc->stat_Dot3StatsCarrierSenseErrors,
6180                 0, "Carrier sense errors");
6181
6182         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6183                 "stat_Dot3StatsFCSErrors",
6184                 CTLFLAG_RD, &sc->stat_Dot3StatsFCSErrors,
6185                 0, "Frame check sequence errors");
6186
6187         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6188                 "stat_Dot3StatsAlignmentErrors",
6189                 CTLFLAG_RD, &sc->stat_Dot3StatsAlignmentErrors,
6190                 0, "Alignment errors");
6191
6192         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6193                 "stat_Dot3StatsSingleCollisionFrames",
6194                 CTLFLAG_RD, &sc->stat_Dot3StatsSingleCollisionFrames,
6195                 0, "Single Collision Frames");
6196
6197         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6198                 "stat_Dot3StatsMultipleCollisionFrames",
6199                 CTLFLAG_RD, &sc->stat_Dot3StatsMultipleCollisionFrames,
6200                 0, "Multiple Collision Frames");
6201
6202         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6203                 "stat_Dot3StatsDeferredTransmissions",
6204                 CTLFLAG_RD, &sc->stat_Dot3StatsDeferredTransmissions,
6205                 0, "Deferred Transmissions");
6206
6207         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6208                 "stat_Dot3StatsExcessiveCollisions",
6209                 CTLFLAG_RD, &sc->stat_Dot3StatsExcessiveCollisions,
6210                 0, "Excessive Collisions");
6211
6212         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6213                 "stat_Dot3StatsLateCollisions",
6214                 CTLFLAG_RD, &sc->stat_Dot3StatsLateCollisions,
6215                 0, "Late Collisions");
6216
6217         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6218                 "stat_EtherStatsCollisions",
6219                 CTLFLAG_RD, &sc->stat_EtherStatsCollisions,
6220                 0, "Collisions");
6221
6222         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6223                 "stat_EtherStatsFragments",
6224                 CTLFLAG_RD, &sc->stat_EtherStatsFragments,
6225                 0, "Fragments");
6226
6227         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6228                 "stat_EtherStatsJabbers",
6229                 CTLFLAG_RD, &sc->stat_EtherStatsJabbers,
6230                 0, "Jabbers");
6231
6232         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6233                 "stat_EtherStatsUndersizePkts",
6234                 CTLFLAG_RD, &sc->stat_EtherStatsUndersizePkts,
6235                 0, "Undersize packets");
6236
6237         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6238                 "stat_EtherStatsOverrsizePkts",
6239                 CTLFLAG_RD, &sc->stat_EtherStatsOverrsizePkts,
6240                 0, "stat_EtherStatsOverrsizePkts");
6241
6242         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6243                 "stat_EtherStatsPktsRx64Octets",
6244                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx64Octets,
6245                 0, "Bytes received in 64 byte packets");
6246
6247         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6248                 "stat_EtherStatsPktsRx65Octetsto127Octets",
6249                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx65Octetsto127Octets,
6250                 0, "Bytes received in 65 to 127 byte packets");
6251
6252         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6253                 "stat_EtherStatsPktsRx128Octetsto255Octets",
6254                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx128Octetsto255Octets,
6255                 0, "Bytes received in 128 to 255 byte packets");
6256
6257         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6258                 "stat_EtherStatsPktsRx256Octetsto511Octets",
6259                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx256Octetsto511Octets,
6260                 0, "Bytes received in 256 to 511 byte packets");
6261
6262         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6263                 "stat_EtherStatsPktsRx512Octetsto1023Octets",
6264                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx512Octetsto1023Octets,
6265                 0, "Bytes received in 512 to 1023 byte packets");
6266
6267         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6268                 "stat_EtherStatsPktsRx1024Octetsto1522Octets",
6269                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1024Octetsto1522Octets,
6270                 0, "Bytes received in 1024 t0 1522 byte packets");
6271
6272         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6273                 "stat_EtherStatsPktsRx1523Octetsto9022Octets",
6274                 CTLFLAG_RD, &sc->stat_EtherStatsPktsRx1523Octetsto9022Octets,
6275                 0, "Bytes received in 1523 to 9022 byte packets");
6276
6277         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6278                 "stat_EtherStatsPktsTx64Octets",
6279                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx64Octets,
6280                 0, "Bytes sent in 64 byte packets");
6281
6282         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6283                 "stat_EtherStatsPktsTx65Octetsto127Octets",
6284                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx65Octetsto127Octets,
6285                 0, "Bytes sent in 65 to 127 byte packets");
6286
6287         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6288                 "stat_EtherStatsPktsTx128Octetsto255Octets",
6289                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx128Octetsto255Octets,
6290                 0, "Bytes sent in 128 to 255 byte packets");
6291
6292         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6293                 "stat_EtherStatsPktsTx256Octetsto511Octets",
6294                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx256Octetsto511Octets,
6295                 0, "Bytes sent in 256 to 511 byte packets");
6296
6297         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6298                 "stat_EtherStatsPktsTx512Octetsto1023Octets",
6299                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx512Octetsto1023Octets,
6300                 0, "Bytes sent in 512 to 1023 byte packets");
6301
6302         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6303                 "stat_EtherStatsPktsTx1024Octetsto1522Octets",
6304                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1024Octetsto1522Octets,
6305                 0, "Bytes sent in 1024 to 1522 byte packets");
6306
6307         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6308                 "stat_EtherStatsPktsTx1523Octetsto9022Octets",
6309                 CTLFLAG_RD, &sc->stat_EtherStatsPktsTx1523Octetsto9022Octets,
6310                 0, "Bytes sent in 1523 to 9022 byte packets");
6311
6312         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6313                 "stat_XonPauseFramesReceived",
6314                 CTLFLAG_RD, &sc->stat_XonPauseFramesReceived,
6315                 0, "XON pause frames receved");
6316
6317         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6318                 "stat_XoffPauseFramesReceived",
6319                 CTLFLAG_RD, &sc->stat_XoffPauseFramesReceived,
6320                 0, "XOFF pause frames received");
6321
6322         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6323                 "stat_OutXonSent",
6324                 CTLFLAG_RD, &sc->stat_OutXonSent,
6325                 0, "XON pause frames sent");
6326
6327         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6328                 "stat_OutXoffSent",
6329                 CTLFLAG_RD, &sc->stat_OutXoffSent,
6330                 0, "XOFF pause frames sent");
6331
6332         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6333                 "stat_FlowControlDone",
6334                 CTLFLAG_RD, &sc->stat_FlowControlDone,
6335                 0, "Flow control done");
6336
6337         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6338                 "stat_MacControlFramesReceived",
6339                 CTLFLAG_RD, &sc->stat_MacControlFramesReceived,
6340                 0, "MAC control frames received");
6341
6342         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6343                 "stat_XoffStateEntered",
6344                 CTLFLAG_RD, &sc->stat_XoffStateEntered,
6345                 0, "XOFF state entered");
6346
6347         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6348                 "stat_IfInFramesL2FilterDiscards",
6349                 CTLFLAG_RD, &sc->stat_IfInFramesL2FilterDiscards,
6350                 0, "Received L2 packets discarded");
6351
6352         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6353                 "stat_IfInRuleCheckerDiscards",
6354                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerDiscards,
6355                 0, "Received packets discarded by rule");
6356
6357         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6358                 "stat_IfInFTQDiscards",
6359                 CTLFLAG_RD, &sc->stat_IfInFTQDiscards,
6360                 0, "Received packet FTQ discards");
6361
6362         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6363                 "stat_IfInMBUFDiscards",
6364                 CTLFLAG_RD, &sc->stat_IfInMBUFDiscards,
6365                 0, "Received packets discarded due to lack of controller buffer memory");
6366
6367         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6368                 "stat_IfInRuleCheckerP4Hit",
6369                 CTLFLAG_RD, &sc->stat_IfInRuleCheckerP4Hit,
6370                 0, "Received packets rule checker hits");
6371
6372         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6373                 "stat_CatchupInRuleCheckerDiscards",
6374                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerDiscards,
6375                 0, "Received packets discarded in Catchup path");
6376
6377         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6378                 "stat_CatchupInFTQDiscards",
6379                 CTLFLAG_RD, &sc->stat_CatchupInFTQDiscards,
6380                 0, "Received packets discarded in FTQ in Catchup path");
6381
6382         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6383                 "stat_CatchupInMBUFDiscards",
6384                 CTLFLAG_RD, &sc->stat_CatchupInMBUFDiscards,
6385                 0, "Received packets discarded in controller buffer memory in Catchup path");
6386
6387         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6388                 "stat_CatchupInRuleCheckerP4Hit",
6389                 CTLFLAG_RD, &sc->stat_CatchupInRuleCheckerP4Hit,
6390                 0, "Received packets rule checker hits in Catchup path");
6391
6392         SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 
6393                 "com_no_buffers",
6394                 CTLFLAG_RD, &sc->com_no_buffers,
6395                 0, "Valid packets received but no RX buffers available");
6396 }
6397
6398 static int
6399 bce_sysctl_tx_bds_int(SYSCTL_HANDLER_ARGS)
6400 {
6401         struct bce_softc *sc = arg1;
6402
6403         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6404                         &sc->bce_tx_quick_cons_trip_int,
6405                         BCE_COALMASK_TX_BDS_INT);
6406 }
6407
6408 static int
6409 bce_sysctl_tx_bds(SYSCTL_HANDLER_ARGS)
6410 {
6411         struct bce_softc *sc = arg1;
6412
6413         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6414                         &sc->bce_tx_quick_cons_trip,
6415                         BCE_COALMASK_TX_BDS);
6416 }
6417
6418 static int
6419 bce_sysctl_tx_ticks_int(SYSCTL_HANDLER_ARGS)
6420 {
6421         struct bce_softc *sc = arg1;
6422
6423         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6424                         &sc->bce_tx_ticks_int,
6425                         BCE_COALMASK_TX_TICKS_INT);
6426 }
6427
6428 static int
6429 bce_sysctl_tx_ticks(SYSCTL_HANDLER_ARGS)
6430 {
6431         struct bce_softc *sc = arg1;
6432
6433         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6434                         &sc->bce_tx_ticks,
6435                         BCE_COALMASK_TX_TICKS);
6436 }
6437
6438 static int
6439 bce_sysctl_rx_bds_int(SYSCTL_HANDLER_ARGS)
6440 {
6441         struct bce_softc *sc = arg1;
6442
6443         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6444                         &sc->bce_rx_quick_cons_trip_int,
6445                         BCE_COALMASK_RX_BDS_INT);
6446 }
6447
6448 static int
6449 bce_sysctl_rx_bds(SYSCTL_HANDLER_ARGS)
6450 {
6451         struct bce_softc *sc = arg1;
6452
6453         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6454                         &sc->bce_rx_quick_cons_trip,
6455                         BCE_COALMASK_RX_BDS);
6456 }
6457
6458 static int
6459 bce_sysctl_rx_ticks_int(SYSCTL_HANDLER_ARGS)
6460 {
6461         struct bce_softc *sc = arg1;
6462
6463         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6464                         &sc->bce_rx_ticks_int,
6465                         BCE_COALMASK_RX_TICKS_INT);
6466 }
6467
6468 static int
6469 bce_sysctl_rx_ticks(SYSCTL_HANDLER_ARGS)
6470 {
6471         struct bce_softc *sc = arg1;
6472
6473         return bce_sysctl_coal_change(oidp, arg1, arg2, req,
6474                         &sc->bce_rx_ticks,
6475                         BCE_COALMASK_RX_TICKS);
6476 }
6477
6478 static int
6479 bce_sysctl_coal_change(SYSCTL_HANDLER_ARGS, uint32_t *coal,
6480     uint32_t coalchg_mask)
6481 {
6482         struct bce_softc *sc = arg1;
6483         struct ifnet *ifp = &sc->arpcom.ac_if;
6484         int error = 0, v;
6485
6486         ifnet_serialize_all(ifp);
6487
6488         v = *coal;
6489         error = sysctl_handle_int(oidp, &v, 0, req);
6490         if (!error && req->newptr != NULL) {
6491                 if (v < 0) {
6492                         error = EINVAL;
6493                 } else {
6494                         *coal = v;
6495                         sc->bce_coalchg_mask |= coalchg_mask;
6496
6497                         /* Commit changes */
6498                         bce_coal_change(sc);
6499                 }
6500         }
6501
6502         ifnet_deserialize_all(ifp);
6503         return error;
6504 }
6505
6506 static void
6507 bce_coal_change(struct bce_softc *sc)
6508 {
6509         struct ifnet *ifp = &sc->arpcom.ac_if;
6510         int i;
6511
6512         ASSERT_SERIALIZED(&sc->main_serialize);
6513
6514         if ((ifp->if_flags & IFF_RUNNING) == 0) {
6515                 sc->bce_coalchg_mask = 0;
6516                 return;
6517         }
6518
6519         if (sc->bce_coalchg_mask &
6520             (BCE_COALMASK_TX_BDS | BCE_COALMASK_TX_BDS_INT)) {
6521                 REG_WR(sc, BCE_HC_TX_QUICK_CONS_TRIP,
6522                        (sc->bce_tx_quick_cons_trip_int << 16) |
6523                        sc->bce_tx_quick_cons_trip);
6524                 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6525                         uint32_t base;
6526
6527                         base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6528                             BCE_HC_SB_CONFIG_1;
6529                         REG_WR(sc, base + BCE_HC_TX_QUICK_CONS_TRIP_OFF,
6530                             (sc->bce_tx_quick_cons_trip_int << 16) |
6531                             sc->bce_tx_quick_cons_trip);
6532                 }
6533                 if (bootverbose) {
6534                         if_printf(ifp, "tx_bds %u, tx_bds_int %u\n",
6535                                   sc->bce_tx_quick_cons_trip,
6536                                   sc->bce_tx_quick_cons_trip_int);
6537                 }
6538         }
6539
6540         if (sc->bce_coalchg_mask &
6541             (BCE_COALMASK_TX_TICKS | BCE_COALMASK_TX_TICKS_INT)) {
6542                 REG_WR(sc, BCE_HC_TX_TICKS,
6543                        (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6544                 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6545                         uint32_t base;
6546
6547                         base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6548                             BCE_HC_SB_CONFIG_1;
6549                         REG_WR(sc, base + BCE_HC_TX_TICKS_OFF,
6550                             (sc->bce_tx_ticks_int << 16) | sc->bce_tx_ticks);
6551                 }
6552                 if (bootverbose) {
6553                         if_printf(ifp, "tx_ticks %u, tx_ticks_int %u\n",
6554                                   sc->bce_tx_ticks, sc->bce_tx_ticks_int);
6555                 }
6556         }
6557
6558         if (sc->bce_coalchg_mask &
6559             (BCE_COALMASK_RX_BDS | BCE_COALMASK_RX_BDS_INT)) {
6560                 REG_WR(sc, BCE_HC_RX_QUICK_CONS_TRIP,
6561                        (sc->bce_rx_quick_cons_trip_int << 16) |
6562                        sc->bce_rx_quick_cons_trip);
6563                 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6564                         uint32_t base;
6565
6566                         base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6567                             BCE_HC_SB_CONFIG_1;
6568                         REG_WR(sc, base + BCE_HC_RX_QUICK_CONS_TRIP_OFF,
6569                             (sc->bce_rx_quick_cons_trip_int << 16) |
6570                             sc->bce_rx_quick_cons_trip);
6571                 }
6572                 if (bootverbose) {
6573                         if_printf(ifp, "rx_bds %u, rx_bds_int %u\n",
6574                                   sc->bce_rx_quick_cons_trip,
6575                                   sc->bce_rx_quick_cons_trip_int);
6576                 }
6577         }
6578
6579         if (sc->bce_coalchg_mask &
6580             (BCE_COALMASK_RX_TICKS | BCE_COALMASK_RX_TICKS_INT)) {
6581                 REG_WR(sc, BCE_HC_RX_TICKS,
6582                        (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6583                 for (i = 1; i < sc->rx_ring_cnt; ++i) {
6584                         uint32_t base;
6585
6586                         base = ((i - 1) * BCE_HC_SB_CONFIG_SIZE) +
6587                             BCE_HC_SB_CONFIG_1;
6588                         REG_WR(sc, base + BCE_HC_RX_TICKS_OFF,
6589                             (sc->bce_rx_ticks_int << 16) | sc->bce_rx_ticks);
6590                 }
6591                 if (bootverbose) {
6592                         if_printf(ifp, "rx_ticks %u, rx_ticks_int %u\n",
6593                                   sc->bce_rx_ticks, sc->bce_rx_ticks_int);
6594                 }
6595         }
6596
6597         sc->bce_coalchg_mask = 0;
6598 }
6599
6600 static int
6601 bce_tso_setup(struct bce_tx_ring *txr, struct mbuf **mp,
6602     uint16_t *flags0, uint16_t *mss0)
6603 {
6604         struct mbuf *m;
6605         uint16_t flags;
6606         int thoff, iphlen, hoff;
6607
6608         m = *mp;
6609         KASSERT(M_WRITABLE(m), ("TSO mbuf not writable"));
6610
6611         hoff = m->m_pkthdr.csum_lhlen;
6612         iphlen = m->m_pkthdr.csum_iphlen;
6613         thoff = m->m_pkthdr.csum_thlen;
6614
6615         KASSERT(hoff >= sizeof(struct ether_header),
6616             ("invalid ether header len %d", hoff));
6617         KASSERT(iphlen >= sizeof(struct ip),
6618             ("invalid ip header len %d", iphlen));
6619         KASSERT(thoff >= sizeof(struct tcphdr),
6620             ("invalid tcp header len %d", thoff));
6621
6622         if (__predict_false(m->m_len < hoff + iphlen + thoff)) {
6623                 m = m_pullup(m, hoff + iphlen + thoff);
6624                 if (m == NULL) {
6625                         *mp = NULL;
6626                         return ENOBUFS;
6627                 }
6628                 *mp = m;
6629         }
6630
6631         /* Set the LSO flag in the TX BD */
6632         flags = TX_BD_FLAGS_SW_LSO;
6633
6634         /* Set the length of IP + TCP options (in 32 bit words) */
6635         flags |= (((iphlen + thoff -
6636             sizeof(struct ip) - sizeof(struct tcphdr)) >> 2) << 8);
6637
6638         *mss0 = htole16(m->m_pkthdr.tso_segsz);
6639         *flags0 = flags;
6640
6641         return 0;
6642 }
6643
6644 static void
6645 bce_setup_serialize(struct bce_softc *sc)
6646 {
6647         int i, j;
6648
6649         /*
6650          * Allocate serializer array
6651          */
6652
6653         /* Main + TX + RX */
6654         sc->serialize_cnt = 1 + sc->tx_ring_cnt + sc->rx_ring_cnt;
6655
6656         sc->serializes =
6657             kmalloc(sc->serialize_cnt * sizeof(struct lwkt_serialize *),
6658                 M_DEVBUF, M_WAITOK | M_ZERO);
6659
6660         /*
6661          * Setup serializers
6662          *
6663          * NOTE: Order is critical
6664          */
6665
6666         i = 0;
6667         KKASSERT(i < sc->serialize_cnt);
6668         sc->serializes[i++] = &sc->main_serialize;
6669
6670         sc->rx_serialize = i;
6671         for (j = 0; j < sc->rx_ring_cnt; ++j) {
6672                 KKASSERT(i < sc->serialize_cnt);
6673                 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize;
6674         }
6675
6676         sc->tx_serialize = i;
6677         for (j = 0; j < sc->tx_ring_cnt; ++j) {
6678                 KKASSERT(i < sc->serialize_cnt);
6679                 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize;
6680         }
6681
6682         KKASSERT(i == sc->serialize_cnt);
6683 }
6684
6685 static void
6686 bce_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
6687 {
6688         struct bce_softc *sc = ifp->if_softc;
6689
6690         ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt,
6691             sc->tx_serialize, sc->rx_serialize, slz);
6692 }
6693
6694 static void
6695 bce_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6696 {
6697         struct bce_softc *sc = ifp->if_softc;
6698
6699         ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt,
6700             sc->tx_serialize, sc->rx_serialize, slz);
6701 }
6702
6703 static int
6704 bce_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
6705 {
6706         struct bce_softc *sc = ifp->if_softc;
6707
6708         return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt,
6709             sc->tx_serialize, sc->rx_serialize, slz);
6710 }
6711
6712 #ifdef INVARIANTS
6713
6714 static void
6715 bce_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
6716     boolean_t serialized)
6717 {
6718         struct bce_softc *sc = ifp->if_softc;
6719
6720         ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt,
6721             sc->tx_serialize, sc->rx_serialize, slz, serialized);
6722 }
6723
6724 #endif  /* INVARIANTS */
6725
6726 static void
6727 bce_serialize_skipmain(struct bce_softc *sc)
6728 {
6729         lwkt_serialize_array_enter(sc->serializes, sc->serialize_cnt, 1);
6730 }
6731
6732 static void
6733 bce_deserialize_skipmain(struct bce_softc *sc)
6734 {
6735         lwkt_serialize_array_exit(sc->serializes, sc->serialize_cnt, 1);
6736 }
6737
6738 #ifdef IFPOLL_ENABLE
6739
6740 static int
6741 bce_sysctl_npoll_offset(SYSCTL_HANDLER_ARGS)
6742 {
6743         struct bce_softc *sc = (void *)arg1;
6744         struct ifnet *ifp = &sc->arpcom.ac_if;
6745         int error, off;
6746
6747         off = sc->npoll_ofs;
6748         error = sysctl_handle_int(oidp, &off, 0, req);
6749         if (error || req->newptr == NULL)
6750                 return error;
6751         if (off < 0)
6752                 return EINVAL;
6753
6754         ifnet_serialize_all(ifp);
6755         if (off >= ncpus2 || off % sc->rx_ring_cnt2 != 0) {
6756                 error = EINVAL;
6757         } else {
6758                 error = 0;
6759                 sc->npoll_ofs = off;
6760         }
6761         ifnet_deserialize_all(ifp);
6762
6763         return error;
6764 }
6765
6766 #endif  /* IFPOLL_ENABLE */
6767
6768 static void
6769 bce_set_timer_cpuid(struct bce_softc *sc, boolean_t polling)
6770 {
6771         if (polling)
6772                 sc->bce_timer_cpuid = 0; /* XXX */
6773         else
6774                 sc->bce_timer_cpuid = sc->bce_msix[0].msix_cpuid;
6775 }
6776
6777 static int
6778 bce_alloc_intr(struct bce_softc *sc)
6779 {
6780         u_int irq_flags;
6781
6782         bce_try_alloc_msix(sc);
6783         if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
6784                 return 0;
6785
6786         sc->bce_irq_type = pci_alloc_1intr(sc->bce_dev, bce_msi_enable,
6787             &sc->bce_irq_rid, &irq_flags);
6788
6789         sc->bce_res_irq = bus_alloc_resource_any(sc->bce_dev, SYS_RES_IRQ,
6790             &sc->bce_irq_rid, irq_flags);
6791         if (sc->bce_res_irq == NULL) {
6792                 device_printf(sc->bce_dev, "PCI map interrupt failed\n");
6793                 return ENXIO;
6794         }
6795         return 0;
6796 }
6797
6798 static void
6799 bce_try_alloc_msix(struct bce_softc *sc)
6800 {
6801         struct bce_msix_data *msix;
6802         int offset, i, error;
6803         boolean_t setup = FALSE;
6804
6805         if (sc->rx_ring_cnt == 1)
6806                 return;
6807
6808         if (sc->rx_ring_cnt2 == ncpus2) {
6809                 offset = 0;
6810         } else {
6811                 int offset_def =
6812                     (sc->rx_ring_cnt2 * device_get_unit(sc->bce_dev)) % ncpus2;
6813
6814                 offset = device_getenv_int(sc->bce_dev,
6815                     "msix.offset", offset_def);
6816                 if (offset >= ncpus2 || offset % sc->rx_ring_cnt2 != 0) {
6817                         device_printf(sc->bce_dev,
6818                             "invalid msix.offset %d, use %d\n",
6819                             offset, offset_def);
6820                         offset = offset_def;
6821                 }
6822         }
6823
6824         msix = &sc->bce_msix[0];
6825         msix->msix_serialize = &sc->main_serialize;
6826         msix->msix_func = bce_intr_msi_oneshot;
6827         msix->msix_arg = sc;
6828         KKASSERT(offset < ncpus2);
6829         msix->msix_cpuid = offset;
6830         ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s combo",
6831             device_get_nameunit(sc->bce_dev));
6832
6833         for (i = 1; i < sc->rx_ring_cnt; ++i) {
6834                 struct bce_rx_ring *rxr = &sc->rx_rings[i];
6835
6836                 msix = &sc->bce_msix[i];
6837
6838                 msix->msix_serialize = &rxr->rx_serialize;
6839                 msix->msix_arg = rxr;
6840                 msix->msix_cpuid = offset + (i % sc->rx_ring_cnt2);
6841                 KKASSERT(msix->msix_cpuid < ncpus2);
6842
6843                 if (i < sc->tx_ring_cnt) {
6844                         msix->msix_func = bce_intr_msix_rxtx;
6845                         ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6846                             "%s rxtx%d", device_get_nameunit(sc->bce_dev), i);
6847                 } else {
6848                         msix->msix_func = bce_intr_msix_rx;
6849                         ksnprintf(msix->msix_desc, sizeof(msix->msix_desc),
6850                             "%s rx%d", device_get_nameunit(sc->bce_dev), i);
6851                 }
6852         }
6853
6854         /*
6855          * Setup MSI-X table
6856          */
6857         bce_setup_msix_table(sc);
6858         REG_WR(sc, BCE_PCI_MSIX_CONTROL, BCE_MSIX_MAX - 1);
6859         REG_WR(sc, BCE_PCI_MSIX_TBL_OFF_BIR, BCE_PCI_GRC_WINDOW2_BASE);
6860         REG_WR(sc, BCE_PCI_MSIX_PBA_OFF_BIT, BCE_PCI_GRC_WINDOW3_BASE);
6861         /* Flush */
6862         REG_RD(sc, BCE_PCI_MSIX_CONTROL);
6863
6864         error = pci_setup_msix(sc->bce_dev);
6865         if (error) {
6866                 device_printf(sc->bce_dev, "Setup MSI-X failed\n");
6867                 goto back;
6868         }
6869         setup = TRUE;
6870
6871         for (i = 0; i < sc->rx_ring_cnt; ++i) {
6872                 msix = &sc->bce_msix[i];
6873
6874                 error = pci_alloc_msix_vector(sc->bce_dev, i, &msix->msix_rid,
6875                     msix->msix_cpuid);
6876                 if (error) {
6877                         device_printf(sc->bce_dev,
6878                             "Unable to allocate MSI-X %d on cpu%d\n",
6879                             i, msix->msix_cpuid);
6880                         goto back;
6881                 }
6882
6883                 msix->msix_res = bus_alloc_resource_any(sc->bce_dev,
6884                     SYS_RES_IRQ, &msix->msix_rid, RF_ACTIVE);
6885                 if (msix->msix_res == NULL) {
6886                         device_printf(sc->bce_dev,
6887                             "Unable to allocate MSI-X %d resource\n", i);
6888                         error = ENOMEM;
6889                         goto back;
6890                 }
6891         }
6892
6893         pci_enable_msix(sc->bce_dev);
6894         sc->bce_irq_type = PCI_INTR_TYPE_MSIX;
6895 back:
6896         if (error)
6897                 bce_free_msix(sc, setup);
6898 }
6899
6900 static void
6901 bce_setup_ring_cnt(struct bce_softc *sc)
6902 {
6903         int msix_enable, ring_max, msix_cnt2, msix_cnt, i;
6904
6905         sc->rx_ring_cnt = 1;
6906         sc->rx_ring_cnt2 = 1;
6907         sc->tx_ring_cnt = 1;
6908
6909         if (BCE_CHIP_NUM(sc) != BCE_CHIP_NUM_5709)
6910                 return;
6911
6912         msix_enable = device_getenv_int(sc->bce_dev, "msix.enable",
6913             bce_msix_enable);
6914         if (!msix_enable)
6915                 return;
6916
6917         if (ncpus2 == 1)
6918                 return;
6919
6920         msix_cnt = pci_msix_count(sc->bce_dev);
6921         if (msix_cnt <= 1)
6922                 return;
6923
6924         i = 0;
6925         while ((1 << (i + 1)) <= msix_cnt)
6926                 ++i;
6927         msix_cnt2 = 1 << i;
6928
6929         /*
6930          * One extra RX ring will be needed (see below), so make sure
6931          * that there are enough MSI-X vectors.
6932          */
6933         if (msix_cnt == msix_cnt2) {
6934                 /*
6935                  * XXX
6936                  * This probably will not happen; 5709/5716
6937                  * come with 9 MSI-X vectors.
6938                  */
6939                 msix_cnt2 >>= 1;
6940                 if (msix_cnt2 <= 1) {
6941                         device_printf(sc->bce_dev,
6942                             "MSI-X count %d could not be used\n", msix_cnt);
6943                         return;
6944                 }
6945                 device_printf(sc->bce_dev, "MSI-X count %d is power of 2\n",
6946                     msix_cnt);
6947         }
6948
6949         /*
6950          * Setup RX ring count
6951          */
6952         ring_max = BCE_RX_RING_MAX;
6953         if (ring_max > msix_cnt2)
6954                 ring_max = msix_cnt2;
6955         sc->rx_ring_cnt2 = device_getenv_int(sc->bce_dev, "rx_rings",
6956             bce_rx_rings);
6957         sc->rx_ring_cnt2 = if_ring_count2(sc->rx_ring_cnt2, ring_max);
6958
6959         /*
6960          * One extra RX ring is allocated, since the first RX ring
6961          * could not be used for RSS hashed packets whose masked
6962          * hash is 0.  The first RX ring is only used for packets
6963          * whose RSS hash could not be calculated, e.g. ARP packets.
6964          * This extra RX ring will be used for packets whose masked
6965          * hash is 0.  The effective RX ring count involved in RSS
6966          * is still sc->rx_ring_cnt2.
6967          */
6968         KKASSERT(sc->rx_ring_cnt2 + 1 <= msix_cnt);
6969         sc->rx_ring_cnt = sc->rx_ring_cnt2 + 1;
6970
6971         /*
6972          * Setup TX ring count
6973          *
6974          * NOTE:
6975          * TX ring count must be less than the effective RSS RX ring
6976          * count, since we use RX ring software data struct to save
6977          * status index and various other MSI-X related stuffs.
6978          */
6979         ring_max = BCE_TX_RING_MAX;
6980         if (ring_max > msix_cnt2)
6981                 ring_max = msix_cnt2;
6982         if (ring_max > sc->rx_ring_cnt2)
6983                 ring_max = sc->rx_ring_cnt2;
6984         sc->tx_ring_cnt = device_getenv_int(sc->bce_dev, "tx_rings",
6985             bce_tx_rings);
6986         sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, ring_max);
6987 }
6988
6989 static void
6990 bce_free_msix(struct bce_softc *sc, boolean_t setup)
6991 {
6992         int i;
6993
6994         KKASSERT(sc->rx_ring_cnt > 1);
6995
6996         for (i = 0; i < sc->rx_ring_cnt; ++i) {
6997                 struct bce_msix_data *msix = &sc->bce_msix[i];
6998
6999                 if (msix->msix_res != NULL) {
7000                         bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7001                             msix->msix_rid, msix->msix_res);
7002                 }
7003                 if (msix->msix_rid >= 0)
7004                         pci_release_msix_vector(sc->bce_dev, msix->msix_rid);
7005         }
7006         if (setup)
7007                 pci_teardown_msix(sc->bce_dev);
7008 }
7009
7010 static void
7011 bce_free_intr(struct bce_softc *sc)
7012 {
7013         if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX) {
7014                 if (sc->bce_res_irq != NULL) {
7015                         bus_release_resource(sc->bce_dev, SYS_RES_IRQ,
7016                             sc->bce_irq_rid, sc->bce_res_irq);
7017                 }
7018                 if (sc->bce_irq_type == PCI_INTR_TYPE_MSI)
7019                         pci_release_msi(sc->bce_dev);
7020         } else {
7021                 bce_free_msix(sc, TRUE);
7022         }
7023 }
7024
7025 static void
7026 bce_setup_msix_table(struct bce_softc *sc)
7027 {
7028         REG_WR(sc, BCE_PCI_GRC_WINDOW_ADDR, BCE_PCI_GRC_WINDOW_ADDR_SEP_WIN);
7029         REG_WR(sc, BCE_PCI_GRC_WINDOW2_ADDR, BCE_MSIX_TABLE_ADDR);
7030         REG_WR(sc, BCE_PCI_GRC_WINDOW3_ADDR, BCE_MSIX_PBA_ADDR);
7031 }
7032
7033 static int
7034 bce_setup_intr(struct bce_softc *sc)
7035 {
7036         void (*irq_handle)(void *);
7037         int error;
7038
7039         if (sc->bce_irq_type == PCI_INTR_TYPE_MSIX)
7040                 return bce_setup_msix(sc);
7041
7042         if (sc->bce_irq_type == PCI_INTR_TYPE_LEGACY) {
7043                 irq_handle = bce_intr_legacy;
7044         } else if (sc->bce_irq_type == PCI_INTR_TYPE_MSI) {
7045                 if (BCE_CHIP_NUM(sc) == BCE_CHIP_NUM_5709) {
7046                         irq_handle = bce_intr_msi_oneshot;
7047                         sc->bce_flags |= BCE_ONESHOT_MSI_FLAG;
7048                 } else {
7049                         irq_handle = bce_intr_msi;
7050                         sc->bce_flags |= BCE_CHECK_MSI_FLAG;
7051                 }
7052         } else {
7053                 panic("%s: unsupported intr type %d",
7054                     device_get_nameunit(sc->bce_dev), sc->bce_irq_type);
7055         }
7056
7057         error = bus_setup_intr(sc->bce_dev, sc->bce_res_irq, INTR_MPSAFE,
7058             irq_handle, sc, &sc->bce_intrhand, &sc->main_serialize);
7059         if (error != 0) {
7060                 device_printf(sc->bce_dev, "Failed to setup IRQ!\n");
7061                 return error;
7062         }
7063         sc->bce_msix[0].msix_cpuid = rman_get_cpuid(sc->bce_res_irq);
7064         sc->bce_msix[0].msix_serialize = &sc->main_serialize;
7065
7066         return 0;
7067 }
7068
7069 static void
7070 bce_teardown_intr(struct bce_softc *sc)
7071 {
7072         if (sc->bce_irq_type != PCI_INTR_TYPE_MSIX)
7073                 bus_teardown_intr(sc->bce_dev, sc->bce_res_irq, sc->bce_intrhand);
7074         else
7075                 bce_teardown_msix(sc, sc->rx_ring_cnt);
7076 }
7077
7078 static int
7079 bce_setup_msix(struct bce_softc *sc)
7080 {
7081         int i;
7082
7083         for (i = 0; i < sc->rx_ring_cnt; ++i) {
7084                 struct bce_msix_data *msix = &sc->bce_msix[i];
7085                 int error;
7086
7087                 error = bus_setup_intr_descr(sc->bce_dev, msix->msix_res,
7088                     INTR_MPSAFE, msix->msix_func, msix->msix_arg,
7089                     &msix->msix_handle, msix->msix_serialize, msix->msix_desc);
7090                 if (error) {
7091                         device_printf(sc->bce_dev, "could not set up %s "
7092                             "interrupt handler.\n", msix->msix_desc);
7093                         bce_teardown_msix(sc, i);
7094                         return error;
7095                 }
7096         }
7097         return 0;
7098 }
7099
7100 static void
7101 bce_teardown_msix(struct bce_softc *sc, int msix_cnt)
7102 {
7103         int i;
7104
7105         for (i = 0; i < msix_cnt; ++i) {
7106                 struct bce_msix_data *msix = &sc->bce_msix[i];
7107
7108                 bus_teardown_intr(sc->bce_dev, msix->msix_res,
7109                     msix->msix_handle);
7110         }
7111 }
7112
7113 static void
7114 bce_init_rss(struct bce_softc *sc)
7115 {
7116         uint8_t key[BCE_RLUP_RSS_KEY_CNT * BCE_RLUP_RSS_KEY_SIZE];
7117         uint32_t tbl = 0;
7118         int i;
7119
7120         KKASSERT(sc->rx_ring_cnt > 2);
7121
7122         /*
7123          * Configure RSS keys
7124          */
7125         toeplitz_get_key(key, sizeof(key));
7126         for (i = 0; i < BCE_RLUP_RSS_KEY_CNT; ++i) {
7127                 uint32_t rss_key;
7128
7129                 rss_key = BCE_RLUP_RSS_KEYVAL(key, i);
7130                 BCE_RSS_DPRINTF(sc, 1, "rss_key%d 0x%08x\n", i, rss_key);
7131
7132                 REG_WR(sc, BCE_RLUP_RSS_KEY(i), rss_key);
7133         }
7134
7135         /*
7136          * Configure the redirect table
7137          *
7138          * NOTE:
7139          * - The "queue ID" in redirect table is the software RX ring's
7140          *   index _minus_ one.
7141          * - The last RX ring, whose "queue ID" is (sc->rx_ring_cnt - 2)
7142          *   will be used for packets whose masked hash is 0.
7143          *   (see also: comment in bce_setup_ring_cnt())
7144          *
7145          * The redirect table is configured in following fashion, except
7146          * for the masked hash 0, which is noted above:
7147          * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)]
7148          */
7149         for (i = 0; i < BCE_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
7150                 int shift = (i % 8) << 2, qid;
7151
7152                 qid = i % sc->rx_ring_cnt2;
7153                 if (qid > 0)
7154                         --qid;
7155                 else
7156                         qid = sc->rx_ring_cnt - 2;
7157                 KKASSERT(qid < (sc->rx_ring_cnt - 1));
7158
7159                 tbl |= qid << shift;
7160                 if (i % 8 == 7) {
7161                         BCE_RSS_DPRINTF(sc, 1, "tbl 0x%08x\n", tbl);
7162                         REG_WR(sc, BCE_RLUP_RSS_DATA, tbl);
7163                         REG_WR(sc, BCE_RLUP_RSS_COMMAND, (i >> 3) |
7164                             BCE_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
7165                             BCE_RLUP_RSS_COMMAND_WRITE |
7166                             BCE_RLUP_RSS_COMMAND_HASH_MASK);
7167                         tbl = 0;
7168                 }
7169         }
7170         REG_WR(sc, BCE_RLUP_RSS_CONFIG,
7171             BCE_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI);
7172 }
7173
7174 static void
7175 bce_npoll_coal_change(struct bce_softc *sc)
7176 {
7177         uint32_t old_rx_cons, old_tx_cons;
7178
7179         old_rx_cons = sc->bce_rx_quick_cons_trip_int;
7180         old_tx_cons = sc->bce_tx_quick_cons_trip_int;
7181         sc->bce_rx_quick_cons_trip_int = 1;
7182         sc->bce_tx_quick_cons_trip_int = 1;
7183
7184         sc->bce_coalchg_mask |= BCE_COALMASK_TX_BDS_INT |
7185             BCE_COALMASK_RX_BDS_INT;
7186         bce_coal_change(sc);
7187
7188         sc->bce_rx_quick_cons_trip_int = old_rx_cons;
7189         sc->bce_tx_quick_cons_trip_int = old_tx_cons;
7190 }