1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include <sys/param.h>
104 #include <sys/endian.h>
105 #include <sys/kernel.h>
107 #include <sys/in_cksum.h>
108 #include <sys/interrupt.h>
109 #include <sys/malloc.h>
110 #include <sys/proc.h>
111 #include <sys/rman.h>
112 #include <sys/serialize.h>
113 #include <sys/socket.h>
114 #include <sys/sockio.h>
115 #include <sys/sysctl.h>
117 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/ifq_var.h>
124 #include <net/vlan/if_vlan_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/ip_var.h>
129 #include <dev/netif/mii_layer/miivar.h>
131 #include <bus/pci/pcireg.h>
132 #include <bus/pci/pcivar.h>
134 #include "if_mskreg.h"
136 /* "device miibus" required. See GENERIC if you get errors here. */
137 #include "miibus_if.h"
139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
142 * Devices supported by this driver.
144 static const struct msk_product {
145 uint16_t msk_vendorid;
146 uint16_t msk_deviceid;
147 const char *msk_name;
149 { VENDORID_SK, DEVICEID_SK_YUKON2,
150 "SK-9Sxx Gigabit Ethernet" },
151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
152 "SK-9Exx Gigabit Ethernet"},
153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
154 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
158 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
162 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
166 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
170 "Marvell Yukon 88E8035 Fast Ethernet" },
171 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
172 "Marvell Yukon 88E8036 Fast Ethernet" },
173 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
174 "Marvell Yukon 88E8038 Fast Ethernet" },
175 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
176 "Marvell Yukon 88E8039 Fast Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
178 "Marvell Yukon 88E8040 Fast Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
180 "Marvell Yukon 88E8040T Fast Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
182 "Marvell Yukon 88E8042 Fast Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
184 "Marvell Yukon 88E8048 Fast Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
186 "Marvell Yukon 88E8050 Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
188 "Marvell Yukon 88E8052 Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
190 "Marvell Yukon 88E8053 Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
192 "Marvell Yukon 88E8055 Gigabit Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
194 "Marvell Yukon 88E8056 Gigabit Ethernet" },
195 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
196 "Marvell Yukon 88E8070 Gigabit Ethernet" },
197 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
198 "Marvell Yukon 88E8058 Gigabit Ethernet" },
199 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
200 "Marvell Yukon 88E8071 Gigabit Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
202 "Marvell Yukon 88E8072 Gigabit Ethernet" },
203 { VENDORID_MARVELL, DEVICEID_MRVL_436D,
204 "Marvell Yukon 88E8055 Gigabit Ethernet" },
205 { VENDORID_MARVELL, DEVICEID_MRVL_4370,
206 "Marvell Yukon 88E8075 Gigabit Ethernet" },
207 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
208 "Marvell Yukon 88E8057 Gigabit Ethernet" },
209 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
210 "Marvell Yukon 88E8059 Gigabit Ethernet" },
211 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
212 "D-Link 550SX Gigabit Ethernet" },
213 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
214 "D-Link 560T Gigabit Ethernet" },
218 static const char *model_name[] = {
231 static int mskc_probe(device_t);
232 static int mskc_attach(device_t);
233 static int mskc_detach(device_t);
234 static int mskc_shutdown(device_t);
235 static int mskc_suspend(device_t);
236 static int mskc_resume(device_t);
237 static void mskc_intr(void *);
239 static void mskc_reset(struct msk_softc *);
240 static void mskc_set_imtimer(struct msk_softc *);
241 static void mskc_intr_hwerr(struct msk_softc *);
242 static int mskc_handle_events(struct msk_softc *);
243 static void mskc_phy_power(struct msk_softc *, int);
244 static int mskc_setup_rambuffer(struct msk_softc *);
245 static int mskc_status_dma_alloc(struct msk_softc *);
246 static void mskc_status_dma_free(struct msk_softc *);
247 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
248 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
250 static int msk_probe(device_t);
251 static int msk_attach(device_t);
252 static int msk_detach(device_t);
253 static int msk_miibus_readreg(device_t, int, int);
254 static int msk_miibus_writereg(device_t, int, int, int);
255 static void msk_miibus_statchg(device_t);
257 static void msk_init(void *);
258 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
259 static void msk_start(struct ifnet *);
260 static void msk_watchdog(struct ifnet *);
261 static int msk_mediachange(struct ifnet *);
262 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
264 static void msk_tick(void *);
265 static void msk_intr_phy(struct msk_if_softc *);
266 static void msk_intr_gmac(struct msk_if_softc *);
268 msk_rxput(struct msk_if_softc *);
269 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
270 static void msk_rxeof(struct msk_if_softc *, uint32_t, int,
271 struct mbuf_chain *);
272 static void msk_txeof(struct msk_if_softc *, int);
273 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
274 static void msk_set_rambuffer(struct msk_if_softc *);
275 static void msk_stop(struct msk_if_softc *);
277 static int msk_txrx_dma_alloc(struct msk_if_softc *);
278 static void msk_txrx_dma_free(struct msk_if_softc *);
279 static int msk_init_rx_ring(struct msk_if_softc *);
280 static void msk_init_tx_ring(struct msk_if_softc *);
282 msk_discard_rxbuf(struct msk_if_softc *, int);
283 static int msk_newbuf(struct msk_if_softc *, int, int);
284 static int msk_encap(struct msk_if_softc *, struct mbuf **);
287 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
288 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
289 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
290 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
291 static void *msk_jalloc(struct msk_if_softc *);
292 static void msk_jfree(void *, void *);
295 static int msk_phy_readreg(struct msk_if_softc *, int, int);
296 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
298 static void msk_rxfilter(struct msk_if_softc *);
299 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
300 static void msk_set_tx_stfwd(struct msk_if_softc *);
302 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
303 void **, bus_addr_t *, bus_dmamap_t *);
304 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
306 static device_method_t mskc_methods[] = {
307 /* Device interface */
308 DEVMETHOD(device_probe, mskc_probe),
309 DEVMETHOD(device_attach, mskc_attach),
310 DEVMETHOD(device_detach, mskc_detach),
311 DEVMETHOD(device_suspend, mskc_suspend),
312 DEVMETHOD(device_resume, mskc_resume),
313 DEVMETHOD(device_shutdown, mskc_shutdown),
316 DEVMETHOD(bus_print_child, bus_generic_print_child),
317 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
322 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
323 static devclass_t mskc_devclass;
325 static device_method_t msk_methods[] = {
326 /* Device interface */
327 DEVMETHOD(device_probe, msk_probe),
328 DEVMETHOD(device_attach, msk_attach),
329 DEVMETHOD(device_detach, msk_detach),
330 DEVMETHOD(device_shutdown, bus_generic_shutdown),
333 DEVMETHOD(bus_print_child, bus_generic_print_child),
334 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
337 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
338 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
339 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
344 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
345 static devclass_t msk_devclass;
347 DECLARE_DUMMY_MODULE(if_msk);
348 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL);
349 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL);
350 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
352 static int mskc_intr_rate = 0;
353 static int mskc_process_limit = MSK_PROC_DEFAULT;
355 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
356 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
359 msk_miibus_readreg(device_t dev, int phy, int reg)
361 struct msk_if_softc *sc_if;
363 if (phy != PHY_ADDR_MARV)
366 sc_if = device_get_softc(dev);
368 return (msk_phy_readreg(sc_if, phy, reg));
372 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
374 struct msk_softc *sc;
377 sc = sc_if->msk_softc;
379 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
380 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
382 for (i = 0; i < MSK_TIMEOUT; i++) {
384 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
385 if ((val & GM_SMI_CT_RD_VAL) != 0) {
386 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
391 if (i == MSK_TIMEOUT) {
392 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
400 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
402 struct msk_if_softc *sc_if;
404 if (phy != PHY_ADDR_MARV)
407 sc_if = device_get_softc(dev);
409 return (msk_phy_writereg(sc_if, phy, reg, val));
413 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
415 struct msk_softc *sc;
418 sc = sc_if->msk_softc;
420 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
422 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
423 for (i = 0; i < MSK_TIMEOUT; i++) {
425 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
426 GM_SMI_CT_BUSY) == 0)
429 if (i == MSK_TIMEOUT)
430 if_printf(sc_if->msk_ifp, "phy write timeout\n");
436 msk_miibus_statchg(device_t dev)
438 struct msk_if_softc *sc_if;
439 struct msk_softc *sc;
440 struct mii_data *mii;
444 sc_if = device_get_softc(dev);
445 sc = sc_if->msk_softc;
447 mii = device_get_softc(sc_if->msk_miibus);
448 ifp = sc_if->msk_ifp;
451 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
452 (IFM_AVALID | IFM_ACTIVE)) {
453 switch (IFM_SUBTYPE(mii->mii_media_active)) {
462 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
468 if (sc_if->msk_link != 0) {
469 /* Enable Tx FIFO Underrun. */
470 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
471 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
473 * Because mii(4) notify msk(4) that it detected link status
474 * change, there is no need to enable automatic
475 * speed/flow-control/duplex updates.
477 gmac = GM_GPCR_AU_ALL_DIS;
478 switch (IFM_SUBTYPE(mii->mii_media_active)) {
481 gmac |= GM_GPCR_SPEED_1000;
484 gmac |= GM_GPCR_SPEED_100;
490 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)
491 gmac |= GM_GPCR_DUP_FULL;
493 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
494 /* Disable Rx flow control. */
495 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
496 gmac |= GM_GPCR_FC_RX_DIS;
497 /* Disable Tx flow control. */
498 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
499 gmac |= GM_GPCR_FC_TX_DIS;
500 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
501 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
502 /* Read again to ensure writing. */
503 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
505 gmac = GMC_PAUSE_OFF;
506 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) &&
507 ((mii->mii_media_active & IFM_GMASK) & IFM_FDX))
509 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
511 /* Enable PHY interrupt for FIFO underrun/overflow. */
512 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
513 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
516 * Link state changed to down.
517 * Disable PHY interrupts.
519 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
520 /* Disable Rx/Tx MAC. */
521 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
522 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) {
523 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
524 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
525 /* Read again to ensure writing. */
526 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
532 msk_rxfilter(struct msk_if_softc *sc_if)
534 struct msk_softc *sc;
536 struct ifmultiaddr *ifma;
541 sc = sc_if->msk_softc;
542 ifp = sc_if->msk_ifp;
544 bzero(mchash, sizeof(mchash));
545 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
546 if ((ifp->if_flags & IFF_PROMISC) != 0) {
547 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
548 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
549 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
553 mode |= GM_RXCR_UCF_ENA;
554 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
555 if (ifma->ifma_addr->sa_family != AF_LINK)
557 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
558 ifma->ifma_addr), ETHER_ADDR_LEN);
559 /* Just want the 6 least significant bits. */
561 /* Set the corresponding bit in the hash table. */
562 mchash[crc >> 5] |= 1 << (crc & 0x1f);
564 if (mchash[0] != 0 || mchash[1] != 0)
565 mode |= GM_RXCR_MCF_ENA;
568 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
570 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
571 (mchash[0] >> 16) & 0xffff);
572 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
574 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
575 (mchash[1] >> 16) & 0xffff);
576 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
580 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
582 struct msk_softc *sc;
584 sc = sc_if->msk_softc;
585 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
586 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
588 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
591 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
593 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
599 msk_init_rx_ring(struct msk_if_softc *sc_if)
601 struct msk_ring_data *rd;
602 struct msk_rxdesc *rxd;
605 sc_if->msk_cdata.msk_rx_cons = 0;
606 sc_if->msk_cdata.msk_rx_prod = 0;
607 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
609 rd = &sc_if->msk_rdata;
610 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
611 prod = sc_if->msk_cdata.msk_rx_prod;
612 for (i = 0; i < MSK_RX_RING_CNT; i++) {
613 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
615 rxd->rx_le = &rd->msk_rx_ring[prod];
616 if (msk_newbuf(sc_if, prod, 1) != 0)
618 MSK_INC(prod, MSK_RX_RING_CNT);
621 /* Update prefetch unit. */
622 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
623 CSR_WRITE_2(sc_if->msk_softc,
624 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
625 sc_if->msk_cdata.msk_rx_prod);
632 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
634 struct msk_ring_data *rd;
635 struct msk_rxdesc *rxd;
638 MSK_IF_LOCK_ASSERT(sc_if);
640 sc_if->msk_cdata.msk_rx_cons = 0;
641 sc_if->msk_cdata.msk_rx_prod = 0;
642 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
644 rd = &sc_if->msk_rdata;
645 bzero(rd->msk_jumbo_rx_ring,
646 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
647 prod = sc_if->msk_cdata.msk_rx_prod;
648 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
649 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
651 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
652 if (msk_jumbo_newbuf(sc_if, prod) != 0)
654 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
657 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
658 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
659 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
661 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
662 CSR_WRITE_2(sc_if->msk_softc,
663 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
664 sc_if->msk_cdata.msk_rx_prod);
671 msk_init_tx_ring(struct msk_if_softc *sc_if)
673 struct msk_ring_data *rd;
674 struct msk_txdesc *txd;
677 sc_if->msk_cdata.msk_tx_prod = 0;
678 sc_if->msk_cdata.msk_tx_cons = 0;
679 sc_if->msk_cdata.msk_tx_cnt = 0;
681 rd = &sc_if->msk_rdata;
682 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
683 for (i = 0; i < MSK_TX_RING_CNT; i++) {
684 txd = &sc_if->msk_cdata.msk_txdesc[i];
686 txd->tx_le = &rd->msk_tx_ring[i];
691 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
693 struct msk_rx_desc *rx_le;
694 struct msk_rxdesc *rxd;
697 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
700 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
705 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
707 struct msk_rx_desc *rx_le;
708 struct msk_rxdesc *rxd;
711 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
714 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
719 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init)
721 struct msk_rx_desc *rx_le;
722 struct msk_rxdesc *rxd;
724 bus_dma_segment_t seg;
728 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
732 m->m_len = m->m_pkthdr.len = MCLBYTES;
733 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
734 m_adj(m, ETHER_ALIGN);
736 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag,
737 sc_if->msk_cdata.msk_rx_sparemap,
738 m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
742 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n");
746 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
747 if (rxd->rx_m != NULL) {
748 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
749 BUS_DMASYNC_POSTREAD);
750 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
753 map = rxd->rx_dmamap;
754 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
755 sc_if->msk_cdata.msk_rx_sparemap = map;
759 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
760 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER);
767 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
769 struct msk_rx_desc *rx_le;
770 struct msk_rxdesc *rxd;
772 bus_dma_segment_t segs[1];
777 MGETHDR(m, M_DONTWAIT, MT_DATA);
780 buf = msk_jalloc(sc_if);
785 /* Attach the buffer to the mbuf. */
786 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
788 if ((m->m_flags & M_EXT) == 0) {
792 m->m_pkthdr.len = m->m_len = MSK_JLEN;
793 m_adj(m, ETHER_ALIGN);
795 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
796 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
797 BUS_DMA_NOWAIT) != 0) {
801 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
803 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
804 if (rxd->rx_m != NULL) {
805 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
806 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
807 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
810 map = rxd->rx_dmamap;
811 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
812 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
813 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
814 BUS_DMASYNC_PREREAD);
817 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
819 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
829 msk_mediachange(struct ifnet *ifp)
831 struct msk_if_softc *sc_if = ifp->if_softc;
832 struct mii_data *mii;
835 mii = device_get_softc(sc_if->msk_miibus);
836 error = mii_mediachg(mii);
842 * Report current media status.
845 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
847 struct msk_if_softc *sc_if = ifp->if_softc;
848 struct mii_data *mii;
850 mii = device_get_softc(sc_if->msk_miibus);
853 ifmr->ifm_active = mii->mii_media_active;
854 ifmr->ifm_status = mii->mii_media_status;
858 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
860 struct msk_if_softc *sc_if;
862 struct mii_data *mii;
865 sc_if = ifp->if_softc;
866 ifr = (struct ifreq *)data;
872 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
876 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
877 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
881 ifp->if_mtu = ifr->ifr_mtu;
882 if ((ifp->if_flags & IFF_RUNNING) != 0)
890 if (ifp->if_flags & IFF_UP) {
891 if (ifp->if_flags & IFF_RUNNING) {
892 if (((ifp->if_flags ^ sc_if->msk_if_flags)
893 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
896 if (sc_if->msk_detach == 0)
900 if (ifp->if_flags & IFF_RUNNING)
903 sc_if->msk_if_flags = ifp->if_flags;
908 if (ifp->if_flags & IFF_RUNNING)
914 mii = device_get_softc(sc_if->msk_miibus);
915 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
919 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
920 if ((mask & IFCAP_TXCSUM) != 0) {
921 ifp->if_capenable ^= IFCAP_TXCSUM;
922 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
923 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
924 ifp->if_hwassist |= MSK_CSUM_FEATURES;
926 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
929 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
930 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
931 msk_setvlan(sc_if, ifp);
935 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
936 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
938 * In Yukon EC Ultra, TSO & checksum offload is not
939 * supported for jumbo frame.
941 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
942 ifp->if_capenable &= ~IFCAP_TXCSUM;
947 error = ether_ioctl(ifp, command, data);
955 mskc_probe(device_t dev)
957 const struct msk_product *mp;
958 uint16_t vendor, devid;
960 vendor = pci_get_vendor(dev);
961 devid = pci_get_device(dev);
962 for (mp = msk_products; mp->msk_name != NULL; ++mp) {
963 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
964 device_set_desc(dev, mp->msk_name);
972 mskc_setup_rambuffer(struct msk_softc *sc)
977 /* Get adapter SRAM size. */
978 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
980 device_printf(sc->msk_dev,
981 "RAM buffer size : %dKB\n", sc->msk_ramsize);
983 if (sc->msk_ramsize == 0)
985 sc->msk_pflags |= MSK_FLAG_RAMBUF;
988 * Give receiver 2/3 of memory and round down to the multiple
989 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
992 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
993 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
994 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
995 sc->msk_rxqstart[i] = next;
996 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
997 next = sc->msk_rxqend[i] + 1;
998 sc->msk_txqstart[i] = next;
999 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
1000 next = sc->msk_txqend[i] + 1;
1002 device_printf(sc->msk_dev,
1003 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1004 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1006 device_printf(sc->msk_dev,
1007 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1008 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1017 mskc_phy_power(struct msk_softc *sc, int mode)
1023 case MSK_PHY_POWERUP:
1024 /* Switch power to VCC (WA for VAUX problem). */
1025 CSR_WRITE_1(sc, B0_POWER_CTRL,
1026 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1027 /* Disable Core Clock Division, set Clock Select to 0. */
1028 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1031 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1032 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1033 /* Enable bits are inverted. */
1034 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1035 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1036 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1039 * Enable PCI & Core Clock, enable clock gating for both Links.
1041 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1043 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1044 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1045 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
1046 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1047 /* Deassert Low Power for 1st PHY. */
1048 our |= PCI_Y2_PHY1_COMA;
1049 if (sc->msk_num_port > 1)
1050 our |= PCI_Y2_PHY2_COMA;
1053 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1054 sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1055 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1056 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1057 val &= (PCI_FORCE_ASPM_REQUEST |
1058 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1059 PCI_ASPM_CLKRUN_REQUEST);
1060 /* Set all bits to 0 except bits 15..12. */
1061 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1062 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1063 val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1064 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
1065 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
1066 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1068 * Disable status race, workaround for
1069 * Yukon EC Ultra & Yukon EX.
1071 val = CSR_READ_4(sc, B2_GP_IO);
1072 val |= GLB_GPIO_STAT_RACE_DIS;
1073 CSR_WRITE_4(sc, B2_GP_IO, val);
1074 CSR_READ_4(sc, B2_GP_IO);
1076 /* Release PHY from PowerDown/COMA mode. */
1077 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1079 for (i = 0; i < sc->msk_num_port; i++) {
1080 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1082 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1086 case MSK_PHY_POWERDOWN:
1087 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1088 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1089 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1090 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1091 val &= ~PCI_Y2_PHY1_COMA;
1092 if (sc->msk_num_port > 1)
1093 val &= ~PCI_Y2_PHY2_COMA;
1095 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1097 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1098 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1099 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1100 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1101 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1102 /* Enable bits are inverted. */
1106 * Disable PCI & Core Clock, disable clock gating for
1109 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1110 CSR_WRITE_1(sc, B0_POWER_CTRL,
1111 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1119 mskc_reset(struct msk_softc *sc)
1127 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1128 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1129 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1130 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1131 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1132 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1133 /* Clear AHB bridge & microcontroller reset. */
1134 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1135 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1136 /* Clear ASF microcontroller state. */
1137 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1138 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1139 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1140 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1142 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1144 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1146 * Since we disabled ASF, S/W reset is required for
1149 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1150 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1153 /* Clear all error bits in the PCI status register. */
1154 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1155 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1157 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1158 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1159 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1160 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1162 switch (sc->msk_bustype) {
1164 /* Clear all PEX errors. */
1165 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1166 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1167 if ((val & PEX_RX_OV) != 0) {
1168 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1169 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1174 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1175 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1177 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1178 if (sc->msk_bustype == MSK_PCIX_BUS) {
1179 /* Set Cache Line Size opt. */
1180 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1182 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
1186 /* Set PHY power state. */
1187 mskc_phy_power(sc, MSK_PHY_POWERUP);
1189 /* Reset GPHY/GMAC Control */
1190 for (i = 0; i < sc->msk_num_port; i++) {
1191 /* GPHY Control reset. */
1192 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1193 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1194 /* GMAC Control reset. */
1195 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1196 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1197 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1198 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1199 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1200 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1201 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1206 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR &&
1207 sc->msk_hw_rev > CHIP_REV_YU_SU_B0)
1208 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS);
1209 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1210 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1211 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1213 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1216 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1218 /* Clear TWSI IRQ. */
1219 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1221 /* Turn off hardware timer. */
1222 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1223 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1225 /* Turn off descriptor polling. */
1226 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1228 /* Turn off time stamps. */
1229 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1230 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1232 if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1233 sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1234 sc->msk_hw_id == CHIP_ID_YUKON_FE) {
1235 /* Configure timeout values. */
1236 for (i = 0; i < sc->msk_num_port; i++) {
1237 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1239 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1241 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1243 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1245 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1247 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1249 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1251 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1253 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1255 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1257 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1259 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1261 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1263 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1268 /* Disable all interrupts. */
1269 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1270 CSR_READ_4(sc, B0_HWE_IMSK);
1271 CSR_WRITE_4(sc, B0_IMSK, 0);
1272 CSR_READ_4(sc, B0_IMSK);
1275 * On dual port PCI-X card, there is an problem where status
1276 * can be received out of order due to split transactions.
1278 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
1281 pcix_cmd = pci_read_config(sc->msk_dev,
1282 sc->msk_pcixcap + PCIXR_COMMAND, 2);
1283 /* Clear Max Outstanding Split Transactions. */
1284 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
1285 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1286 pci_write_config(sc->msk_dev,
1287 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
1288 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1290 if (sc->msk_pciecap != 0) {
1291 /* Change Max. Read Request Size to 2048 bytes. */
1292 if (pcie_get_max_readrq(sc->msk_dev) ==
1293 PCIEM_DEVCTL_MAX_READRQ_512) {
1294 pcie_set_max_readrq(sc->msk_dev,
1295 PCIEM_DEVCTL_MAX_READRQ_2048);
1299 /* Clear status list. */
1300 bzero(sc->msk_stat_ring,
1301 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1302 sc->msk_stat_cons = 0;
1303 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1304 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1305 /* Set the status list base address. */
1306 addr = sc->msk_stat_ring_paddr;
1307 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1308 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1309 /* Set the status list last index. */
1310 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1311 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1312 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1313 /* WA for dev. #4.3 */
1314 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1315 /* WA for dev. #4.18 */
1316 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1317 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1319 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1320 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1321 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1322 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1323 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1325 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1326 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1329 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1331 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1333 /* Enable status unit. */
1334 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1336 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1337 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1338 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1342 msk_probe(device_t dev)
1344 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1348 * Not much to do here. We always know there will be
1349 * at least one GMAC present, and if there are two,
1350 * mskc_attach() will create a second device instance
1353 ksnprintf(desc, sizeof(desc),
1354 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1355 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1357 device_set_desc_copy(dev, desc);
1363 msk_attach(device_t dev)
1365 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1366 struct msk_if_softc *sc_if = device_get_softc(dev);
1367 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1369 uint8_t eaddr[ETHER_ADDR_LEN];
1371 port = *(int *)device_get_ivars(dev);
1372 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
1374 kfree(device_get_ivars(dev), M_DEVBUF);
1375 device_set_ivars(dev, NULL);
1377 callout_init(&sc_if->msk_tick_ch);
1378 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1380 sc_if->msk_if_dev = dev;
1381 sc_if->msk_port = port;
1382 sc_if->msk_softc = sc;
1383 sc_if->msk_ifp = ifp;
1384 sc_if->msk_flags = sc->msk_pflags;
1385 sc->msk_if[port] = sc_if;
1387 /* Setup Tx/Rx queue register offsets. */
1388 if (port == MSK_PORT_A) {
1389 sc_if->msk_txq = Q_XA1;
1390 sc_if->msk_txsq = Q_XS1;
1391 sc_if->msk_rxq = Q_R1;
1393 sc_if->msk_txq = Q_XA2;
1394 sc_if->msk_txsq = Q_XS2;
1395 sc_if->msk_rxq = Q_R2;
1398 error = msk_txrx_dma_alloc(sc_if);
1402 ifp->if_softc = sc_if;
1403 ifp->if_mtu = ETHERMTU;
1404 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1405 ifp->if_init = msk_init;
1406 ifp->if_ioctl = msk_ioctl;
1407 ifp->if_start = msk_start;
1408 ifp->if_watchdog = msk_watchdog;
1409 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1410 ifq_set_ready(&ifp->if_snd);
1414 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1415 * has serious bug in Rx checksum offload for all Yukon II family
1416 * hardware. It seems there is a workaround to make it work somtimes.
1417 * However, the workaround also have to check OP code sequences to
1418 * verify whether the OP code is correct. Sometimes it should compute
1419 * IP/TCP/UDP checksum in driver in order to verify correctness of
1420 * checksum computed by hardware. If you have to compute checksum
1421 * with software to verify the hardware's checksum why have hardware
1422 * compute the checksum? I think there is no reason to spend time to
1423 * make Rx checksum offload work on Yukon II hardware.
1425 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
1426 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1427 ifp->if_hwassist = MSK_CSUM_FEATURES;
1428 ifp->if_capenable = ifp->if_capabilities;
1432 * Get station address for this interface. Note that
1433 * dual port cards actually come with three station
1434 * addresses: one for each port, plus an extra. The
1435 * extra one is used by the SysKonnect driver software
1436 * as a 'virtual' station address for when both ports
1437 * are operating in failover mode. Currently we don't
1438 * use this extra address.
1440 for (i = 0; i < ETHER_ADDR_LEN; i++)
1441 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1443 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
1448 error = mii_phy_probe(dev, &sc_if->msk_miibus,
1449 msk_mediachange, msk_mediastatus);
1451 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1456 * Call MI attach routine. Can't hold locks when calling into ether_*.
1458 ether_ifattach(ifp, eaddr, &sc->msk_serializer);
1461 * Tell the upper layer(s) we support long frames.
1462 * Must appear after the call to ether_ifattach() because
1463 * ether_ifattach() sets ifi_hdrlen to the default value.
1465 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1471 sc->msk_if[port] = NULL;
1476 * Attach the interface. Allocate softc structures, do ifmedia
1477 * setup and ethernet/BPF attach.
1480 mskc_attach(device_t dev)
1482 struct msk_softc *sc;
1483 int error, *port, cpuid;
1485 sc = device_get_softc(dev);
1487 lwkt_serialize_init(&sc->msk_serializer);
1490 * Initailize sysctl variables
1492 sc->msk_process_limit = mskc_process_limit;
1493 sc->msk_intr_rate = mskc_intr_rate;
1495 #ifndef BURN_BRIDGES
1497 * Handle power management nonsense.
1499 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1500 uint32_t irq, bar0, bar1;
1502 /* Save important PCI config data. */
1503 bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
1504 bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
1505 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1507 /* Reset the power state. */
1508 device_printf(dev, "chip is in D%d power mode "
1509 "-- setting to D0\n", pci_get_powerstate(dev));
1511 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1513 /* Restore PCI config data. */
1514 pci_write_config(dev, PCIR_BAR(0), bar0, 4);
1515 pci_write_config(dev, PCIR_BAR(1), bar1, 4);
1516 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1518 #endif /* BURN_BRIDGES */
1521 * Map control/status registers.
1523 pci_enable_busmaster(dev);
1526 * Allocate I/O resource
1528 #ifdef MSK_USEIOSPACE
1529 sc->msk_res_type = SYS_RES_IOPORT;
1530 sc->msk_res_rid = PCIR_BAR(1);
1532 sc->msk_res_type = SYS_RES_MEMORY;
1533 sc->msk_res_rid = PCIR_BAR(0);
1535 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1536 &sc->msk_res_rid, RF_ACTIVE);
1537 if (sc->msk_res == NULL) {
1538 if (sc->msk_res_type == SYS_RES_MEMORY) {
1539 sc->msk_res_type = SYS_RES_IOPORT;
1540 sc->msk_res_rid = PCIR_BAR(1);
1542 sc->msk_res_type = SYS_RES_MEMORY;
1543 sc->msk_res_rid = PCIR_BAR(0);
1545 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1548 if (sc->msk_res == NULL) {
1549 device_printf(dev, "couldn't allocate %s resources\n",
1550 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
1554 sc->msk_res_bt = rman_get_bustag(sc->msk_res);
1555 sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
1560 sc->msk_irq_rid = 0;
1561 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1563 RF_SHAREABLE | RF_ACTIVE);
1564 if (sc->msk_irq == NULL) {
1565 device_printf(dev, "couldn't allocate IRQ resources\n");
1570 /* Enable all clocks before accessing any registers. */
1571 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1573 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1574 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1575 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1576 /* Bail out if chip is not recognized. */
1577 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1578 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1579 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
1580 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1581 sc->msk_hw_id, sc->msk_hw_rev);
1587 * Create sysctl tree
1589 sysctl_ctx_init(&sc->msk_sysctl_ctx);
1590 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx,
1591 SYSCTL_STATIC_CHILDREN(_hw),
1593 device_get_nameunit(dev),
1595 if (sc->msk_sysctl_tree == NULL) {
1596 device_printf(dev, "can't add sysctl node\n");
1601 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1602 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1603 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1604 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
1605 "I", "max number of Rx events to process");
1606 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1607 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1608 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1609 sc, 0, mskc_sysctl_intr_rate,
1610 "I", "max number of interrupt per second");
1611 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1612 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1613 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided,
1614 0, "# of avoided m_defrag on TX path");
1615 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1616 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1617 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied,
1618 0, "# of leading copies on TX path");
1619 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1620 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1621 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied,
1622 0, "# of trailing copies on TX path");
1624 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1625 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1626 sc->msk_coppertype = 0;
1628 sc->msk_coppertype = 1;
1629 /* Check number of MACs. */
1630 sc->msk_num_port = 1;
1631 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1633 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1637 /* Check bus type. */
1638 if (pci_is_pcie(sc->msk_dev) == 0) {
1639 sc->msk_bustype = MSK_PEX_BUS;
1640 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev);
1641 } else if (pci_is_pcix(sc->msk_dev) == 0) {
1642 sc->msk_bustype = MSK_PCIX_BUS;
1643 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev);
1645 sc->msk_bustype = MSK_PCI_BUS;
1648 switch (sc->msk_hw_id) {
1649 case CHIP_ID_YUKON_EC:
1650 case CHIP_ID_YUKON_EC_U:
1651 sc->msk_clock = 125; /* 125 Mhz */
1653 case CHIP_ID_YUKON_EX:
1654 sc->msk_clock = 125; /* 125 Mhz */
1656 case CHIP_ID_YUKON_FE:
1657 sc->msk_clock = 100; /* 100 Mhz */
1658 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1660 case CHIP_ID_YUKON_FE_P:
1661 sc->msk_clock = 50; /* 50 Mhz */
1663 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1664 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1667 * FE+ A0 has status LE writeback bug so msk(4)
1668 * does not rely on status word of received frame
1669 * in msk_rxeof() which in turn disables all
1670 * hardware assistance bits reported by the status
1671 * word as well as validity of the recevied frame.
1672 * Just pass received frames to upper stack with
1673 * minimal test and let upper stack handle them.
1675 sc->msk_pflags |= MSK_FLAG_NORXCHK;
1678 case CHIP_ID_YUKON_XL:
1679 sc->msk_clock = 156; /* 156 Mhz */
1681 case CHIP_ID_YUKON_SUPR:
1682 sc->msk_clock = 125; /* 125 MHz */
1684 case CHIP_ID_YUKON_UL_2:
1685 sc->msk_clock = 125; /* 125 Mhz */
1687 case CHIP_ID_YUKON_OPT:
1688 sc->msk_clock = 125; /* 125 MHz */
1691 sc->msk_clock = 156; /* 156 Mhz */
1695 error = mskc_status_dma_alloc(sc);
1699 /* Set base interrupt mask. */
1700 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1701 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1702 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1704 /* Reset the adapter. */
1707 error = mskc_setup_rambuffer(sc);
1711 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1712 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1713 device_printf(dev, "failed to add child for PORT_A\n");
1717 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1719 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1721 if (sc->msk_num_port > 1) {
1722 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1723 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1724 device_printf(dev, "failed to add child for PORT_B\n");
1728 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1730 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1733 bus_generic_attach(dev);
1735 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
1736 mskc_intr, sc, &sc->msk_intrhand,
1737 &sc->msk_serializer);
1739 device_printf(dev, "couldn't set up interrupt handler\n");
1743 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq));
1744 KKASSERT(cpuid >= 0 && cpuid < ncpus);
1746 if (sc->msk_if[0] != NULL)
1747 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid;
1748 if (sc->msk_if[1] != NULL)
1749 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid;
1757 * Shutdown hardware and free up resources. This can be called any
1758 * time after the mutex has been initialized. It is called in both
1759 * the error case in attach and the normal detach case so it needs
1760 * to be careful about only freeing resources that have actually been
1764 msk_detach(device_t dev)
1766 struct msk_if_softc *sc_if = device_get_softc(dev);
1768 if (device_is_attached(dev)) {
1769 struct msk_softc *sc = sc_if->msk_softc;
1770 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1772 lwkt_serialize_enter(ifp->if_serializer);
1774 if (sc->msk_intrhand != NULL) {
1775 if (sc->msk_if[MSK_PORT_A] != NULL)
1776 msk_stop(sc->msk_if[MSK_PORT_A]);
1777 if (sc->msk_if[MSK_PORT_B] != NULL)
1778 msk_stop(sc->msk_if[MSK_PORT_B]);
1780 bus_teardown_intr(sc->msk_dev, sc->msk_irq,
1782 sc->msk_intrhand = NULL;
1785 lwkt_serialize_exit(ifp->if_serializer);
1787 ether_ifdetach(ifp);
1790 if (sc_if->msk_miibus != NULL)
1791 device_delete_child(dev, sc_if->msk_miibus);
1793 msk_txrx_dma_free(sc_if);
1798 mskc_detach(device_t dev)
1800 struct msk_softc *sc = device_get_softc(dev);
1804 if (device_is_attached(dev)) {
1805 KASSERT(sc->msk_intrhand == NULL,
1806 ("intr is not torn down yet\n"));
1810 for (i = 0; i < sc->msk_num_port; ++i) {
1811 if (sc->msk_devs[i] != NULL) {
1812 port = device_get_ivars(sc->msk_devs[i]);
1814 kfree(port, M_DEVBUF);
1815 device_set_ivars(sc->msk_devs[i], NULL);
1817 device_delete_child(dev, sc->msk_devs[i]);
1821 /* Disable all interrupts. */
1822 CSR_WRITE_4(sc, B0_IMSK, 0);
1823 CSR_READ_4(sc, B0_IMSK);
1824 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1825 CSR_READ_4(sc, B0_HWE_IMSK);
1828 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1830 /* Put hardware reset. */
1831 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1833 mskc_status_dma_free(sc);
1835 if (sc->msk_irq != NULL) {
1836 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
1839 if (sc->msk_res != NULL) {
1840 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
1844 if (sc->msk_sysctl_tree != NULL)
1845 sysctl_ctx_free(&sc->msk_sysctl_ctx);
1850 /* Create status DMA region. */
1852 mskc_status_dma_alloc(struct msk_softc *sc)
1857 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0,
1858 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1859 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1861 device_printf(sc->msk_dev,
1862 "failed to create status coherent DMA memory\n");
1865 sc->msk_stat_tag = dmem.dmem_tag;
1866 sc->msk_stat_map = dmem.dmem_map;
1867 sc->msk_stat_ring = dmem.dmem_addr;
1868 sc->msk_stat_ring_paddr = dmem.dmem_busaddr;
1874 mskc_status_dma_free(struct msk_softc *sc)
1876 /* Destroy status block. */
1877 if (sc->msk_stat_tag) {
1878 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1879 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1881 bus_dma_tag_destroy(sc->msk_stat_tag);
1882 sc->msk_stat_tag = NULL;
1887 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1891 struct msk_rxdesc *jrxd;
1892 struct msk_jpool_entry *entry;
1897 /* Create parent DMA tag. */
1900 * It seems that Yukon II supports full 64bits DMA operations. But
1901 * it needs two descriptors(list elements) for 64bits DMA operations.
1902 * Since we don't know what DMA address mappings(32bits or 64bits)
1903 * would be used in advance for each mbufs, we limits its DMA space
1904 * to be in range of 32bits address space. Otherwise, we should check
1905 * what DMA address is used and chain another descriptor for the
1906 * 64bits DMA operation. This also means descriptor ring size is
1907 * variable. Limiting DMA address to be in 32bit address space greatly
1908 * simplyfies descriptor handling and possibly would increase
1909 * performance a bit due to efficient handling of descriptors.
1910 * Apart from harassing checksum offloading mechanisms, it seems
1911 * it's really bad idea to use a seperate descriptor for 64bit
1912 * DMA operation to save small descriptor memory. Anyway, I've
1913 * never seen these exotic scheme on ethernet interface hardware.
1915 error = bus_dma_tag_create(
1917 1, 0, /* alignment, boundary */
1918 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1919 BUS_SPACE_MAXADDR, /* highaddr */
1920 NULL, NULL, /* filter, filterarg */
1921 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1923 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1925 &sc_if->msk_cdata.msk_parent_tag);
1927 device_printf(sc_if->msk_if_dev,
1928 "failed to create parent DMA tag\n");
1932 /* Create DMA stuffs for Tx ring. */
1933 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
1934 &sc_if->msk_cdata.msk_tx_ring_tag,
1935 (void *)&sc_if->msk_rdata.msk_tx_ring,
1936 &sc_if->msk_rdata.msk_tx_ring_paddr,
1937 &sc_if->msk_cdata.msk_tx_ring_map);
1939 device_printf(sc_if->msk_if_dev,
1940 "failed to create TX ring DMA stuffs\n");
1944 /* Create DMA stuffs for Rx ring. */
1945 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
1946 &sc_if->msk_cdata.msk_rx_ring_tag,
1947 (void *)&sc_if->msk_rdata.msk_rx_ring,
1948 &sc_if->msk_rdata.msk_rx_ring_paddr,
1949 &sc_if->msk_cdata.msk_rx_ring_map);
1951 device_printf(sc_if->msk_if_dev,
1952 "failed to create RX ring DMA stuffs\n");
1956 /* Create tag for Tx buffers. */
1957 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1958 1, 0, /* alignment, boundary */
1959 BUS_SPACE_MAXADDR, /* lowaddr */
1960 BUS_SPACE_MAXADDR, /* highaddr */
1961 NULL, NULL, /* filter, filterarg */
1962 MSK_JUMBO_FRAMELEN, /* maxsize */
1963 MSK_MAXTXSEGS, /* nsegments */
1964 MSK_MAXSGSIZE, /* maxsegsize */
1965 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
1966 BUS_DMA_ONEBPAGE, /* flags */
1967 &sc_if->msk_cdata.msk_tx_tag);
1969 device_printf(sc_if->msk_if_dev,
1970 "failed to create Tx DMA tag\n");
1974 /* Create DMA maps for Tx buffers. */
1975 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1976 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
1978 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag,
1979 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1982 device_printf(sc_if->msk_if_dev,
1983 "failed to create %dth Tx dmamap\n", i);
1985 for (j = 0; j < i; ++j) {
1986 txd = &sc_if->msk_cdata.msk_txdesc[j];
1987 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
1990 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
1991 sc_if->msk_cdata.msk_tx_tag = NULL;
1998 * Workaround hardware hang which seems to happen when Rx buffer
1999 * is not aligned on multiple of FIFO word(8 bytes).
2001 if (sc_if->msk_flags & MSK_FLAG_RAMBUF)
2002 rxalign = MSK_RX_BUF_ALIGN;
2006 /* Create tag for Rx buffers. */
2007 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2008 rxalign, 0, /* alignment, boundary */
2009 BUS_SPACE_MAXADDR, /* lowaddr */
2010 BUS_SPACE_MAXADDR, /* highaddr */
2011 NULL, NULL, /* filter, filterarg */
2012 MCLBYTES, /* maxsize */
2014 MCLBYTES, /* maxsegsize */
2015 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED |
2016 BUS_DMA_WAITOK, /* flags */
2017 &sc_if->msk_cdata.msk_rx_tag);
2019 device_printf(sc_if->msk_if_dev,
2020 "failed to create Rx DMA tag\n");
2024 /* Create DMA maps for Rx buffers. */
2025 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK,
2026 &sc_if->msk_cdata.msk_rx_sparemap);
2028 device_printf(sc_if->msk_if_dev,
2029 "failed to create spare Rx dmamap\n");
2030 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2031 sc_if->msk_cdata.msk_rx_tag = NULL;
2034 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2035 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2037 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag,
2038 BUS_DMA_WAITOK, &rxd->rx_dmamap);
2040 device_printf(sc_if->msk_if_dev,
2041 "failed to create %dth Rx dmamap\n", i);
2043 for (j = 0; j < i; ++j) {
2044 rxd = &sc_if->msk_cdata.msk_rxdesc[j];
2045 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2048 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2049 sc_if->msk_cdata.msk_rx_sparemap);
2050 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2051 sc_if->msk_cdata.msk_rx_tag = NULL;
2058 SLIST_INIT(&sc_if->msk_jfree_listhead);
2059 SLIST_INIT(&sc_if->msk_jinuse_listhead);
2061 /* Create tag for jumbo Rx ring. */
2062 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2063 MSK_RING_ALIGN, 0, /* alignment, boundary */
2064 BUS_SPACE_MAXADDR, /* lowaddr */
2065 BUS_SPACE_MAXADDR, /* highaddr */
2066 NULL, NULL, /* filter, filterarg */
2067 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2069 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2071 NULL, NULL, /* lockfunc, lockarg */
2072 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2074 device_printf(sc_if->msk_if_dev,
2075 "failed to create jumbo Rx ring DMA tag\n");
2079 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2080 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2081 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2082 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2083 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2085 device_printf(sc_if->msk_if_dev,
2086 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2090 ctx.msk_busaddr = 0;
2091 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2092 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2093 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2094 msk_dmamap_cb, &ctx, 0);
2096 device_printf(sc_if->msk_if_dev,
2097 "failed to load DMA'able memory for jumbo Rx ring\n");
2100 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2102 /* Create tag for jumbo buffer blocks. */
2103 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2104 PAGE_SIZE, 0, /* alignment, boundary */
2105 BUS_SPACE_MAXADDR, /* lowaddr */
2106 BUS_SPACE_MAXADDR, /* highaddr */
2107 NULL, NULL, /* filter, filterarg */
2108 MSK_JMEM, /* maxsize */
2110 MSK_JMEM, /* maxsegsize */
2112 NULL, NULL, /* lockfunc, lockarg */
2113 &sc_if->msk_cdata.msk_jumbo_tag);
2115 device_printf(sc_if->msk_if_dev,
2116 "failed to create jumbo Rx buffer block DMA tag\n");
2120 /* Create tag for jumbo Rx buffers. */
2121 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2122 PAGE_SIZE, 0, /* alignment, boundary */
2123 BUS_SPACE_MAXADDR, /* lowaddr */
2124 BUS_SPACE_MAXADDR, /* highaddr */
2125 NULL, NULL, /* filter, filterarg */
2126 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2127 MSK_MAXRXSEGS, /* nsegments */
2128 MSK_JLEN, /* maxsegsize */
2130 NULL, NULL, /* lockfunc, lockarg */
2131 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2133 device_printf(sc_if->msk_if_dev,
2134 "failed to create jumbo Rx DMA tag\n");
2138 /* Create DMA maps for jumbo Rx buffers. */
2139 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2140 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2141 device_printf(sc_if->msk_if_dev,
2142 "failed to create spare jumbo Rx dmamap\n");
2145 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2146 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2148 jrxd->rx_dmamap = NULL;
2149 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2152 device_printf(sc_if->msk_if_dev,
2153 "failed to create jumbo Rx dmamap\n");
2158 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2159 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2160 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2161 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2162 &sc_if->msk_cdata.msk_jumbo_map);
2164 device_printf(sc_if->msk_if_dev,
2165 "failed to allocate DMA'able memory for jumbo buf\n");
2169 ctx.msk_busaddr = 0;
2170 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2171 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2172 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2174 device_printf(sc_if->msk_if_dev,
2175 "failed to load DMA'able memory for jumbobuf\n");
2178 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2181 * Now divide it up into 9K pieces and save the addresses
2184 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2185 for (i = 0; i < MSK_JSLOTS; i++) {
2186 sc_if->msk_cdata.msk_jslots[i] = ptr;
2188 entry = malloc(sizeof(struct msk_jpool_entry),
2189 M_DEVBUF, M_WAITOK);
2190 if (entry == NULL) {
2191 device_printf(sc_if->msk_if_dev,
2192 "no memory for jumbo buffers!\n");
2197 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2205 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2207 struct msk_txdesc *txd;
2208 struct msk_rxdesc *rxd;
2210 struct msk_rxdesc *jrxd;
2211 struct msk_jpool_entry *entry;
2216 MSK_JLIST_LOCK(sc_if);
2217 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2218 device_printf(sc_if->msk_if_dev,
2219 "asked to free buffer that is in use!\n");
2220 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2221 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2225 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2226 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2227 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2228 free(entry, M_DEVBUF);
2230 MSK_JLIST_UNLOCK(sc_if);
2232 /* Destroy jumbo buffer block. */
2233 if (sc_if->msk_cdata.msk_jumbo_map)
2234 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2235 sc_if->msk_cdata.msk_jumbo_map);
2237 if (sc_if->msk_rdata.msk_jumbo_buf) {
2238 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2239 sc_if->msk_rdata.msk_jumbo_buf,
2240 sc_if->msk_cdata.msk_jumbo_map);
2241 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2242 sc_if->msk_cdata.msk_jumbo_map = NULL;
2245 /* Jumbo Rx ring. */
2246 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2247 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2248 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2249 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2250 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2251 sc_if->msk_rdata.msk_jumbo_rx_ring)
2252 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2253 sc_if->msk_rdata.msk_jumbo_rx_ring,
2254 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2255 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2256 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2257 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2258 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2261 /* Jumbo Rx buffers. */
2262 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2263 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2264 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2265 if (jrxd->rx_dmamap) {
2267 sc_if->msk_cdata.msk_jumbo_rx_tag,
2269 jrxd->rx_dmamap = NULL;
2272 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2273 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2274 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2275 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2277 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2278 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2283 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
2284 sc_if->msk_rdata.msk_tx_ring,
2285 sc_if->msk_cdata.msk_tx_ring_map);
2288 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
2289 sc_if->msk_rdata.msk_rx_ring,
2290 sc_if->msk_cdata.msk_rx_ring_map);
2293 if (sc_if->msk_cdata.msk_tx_tag) {
2294 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2295 txd = &sc_if->msk_cdata.msk_txdesc[i];
2296 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2299 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2300 sc_if->msk_cdata.msk_tx_tag = NULL;
2304 if (sc_if->msk_cdata.msk_rx_tag) {
2305 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2306 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2307 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2310 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2311 sc_if->msk_cdata.msk_rx_sparemap);
2312 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2313 sc_if->msk_cdata.msk_rx_tag = NULL;
2316 if (sc_if->msk_cdata.msk_parent_tag) {
2317 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2318 sc_if->msk_cdata.msk_parent_tag = NULL;
2324 * Allocate a jumbo buffer.
2327 msk_jalloc(struct msk_if_softc *sc_if)
2329 struct msk_jpool_entry *entry;
2331 MSK_JLIST_LOCK(sc_if);
2333 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2335 if (entry == NULL) {
2336 MSK_JLIST_UNLOCK(sc_if);
2340 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2341 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2343 MSK_JLIST_UNLOCK(sc_if);
2345 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2349 * Release a jumbo buffer.
2352 msk_jfree(void *buf, void *args)
2354 struct msk_if_softc *sc_if;
2355 struct msk_jpool_entry *entry;
2358 /* Extract the softc struct pointer. */
2359 sc_if = (struct msk_if_softc *)args;
2360 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2362 MSK_JLIST_LOCK(sc_if);
2363 /* Calculate the slot this buffer belongs to. */
2364 i = ((vm_offset_t)buf
2365 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2366 KASSERT(i >= 0 && i < MSK_JSLOTS,
2367 ("%s: asked to free buffer that we don't manage!", __func__));
2369 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2370 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2372 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2373 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2374 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2377 MSK_JLIST_UNLOCK(sc_if);
2382 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2384 struct msk_txdesc *txd, *txd_last;
2385 struct msk_tx_desc *tx_le;
2388 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2389 uint32_t control, prod, si;
2390 uint16_t offset, tcp_offset;
2391 int error, i, nsegs, maxsegs, defrag;
2393 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt -
2394 MSK_RESERVED_TX_DESC_CNT;
2395 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT,
2396 ("not enough spare TX desc\n"));
2397 if (maxsegs > MSK_MAXTXSEGS)
2398 maxsegs = MSK_MAXTXSEGS;
2401 * Align TX buffer to 64bytes boundary. This greately improves
2402 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2403 * Try avoiding m_defrag(), if the mbufs are not chained together
2404 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
2407 #define MSK_TXBUF_ALIGN 64
2408 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1)
2412 if (m->m_len == m->m_pkthdr.len) {
2415 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK);
2417 if (M_WRITABLE(m)) {
2418 if (M_TRAILINGSPACE(m) >= space) {
2420 bcopy(m->m_data, m->m_data + space,
2424 sc_if->msk_softc->msk_trailing_copied++;
2426 space = MSK_TXBUF_ALIGN - space;
2427 if (M_LEADINGSPACE(m) >= space) {
2428 /* e.g. Small UDP datagrams */
2435 msk_leading_copied++;
2440 /* e.g. on forwarding path */
2445 m = m_defrag(*m_head, MB_DONTWAIT);
2453 sc_if->msk_softc->msk_defrag_avoided++;
2456 #undef MSK_TXBUF_MASK
2457 #undef MSK_TXBUF_ALIGN
2459 tcp_offset = offset = 0;
2460 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2462 * Since mbuf has no protocol specific structure information
2463 * in it we have to inspect protocol information here to
2464 * setup TSO and checksum offload. I don't know why Marvell
2465 * made a such decision in chip design because other GigE
2466 * hardwares normally takes care of all these chores in
2467 * hardware. However, TSO performance of Yukon II is very
2468 * good such that it's worth to implement it.
2470 struct ether_header *eh;
2473 /* TODO check for M_WRITABLE(m) */
2475 offset = sizeof(struct ether_header);
2476 m = m_pullup(m, offset);
2481 eh = mtod(m, struct ether_header *);
2482 /* Check if hardware VLAN insertion is off. */
2483 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2484 offset = sizeof(struct ether_vlan_header);
2485 m = m_pullup(m, offset);
2491 m = m_pullup(m, offset + sizeof(struct ip));
2496 ip = (struct ip *)(mtod(m, char *) + offset);
2497 offset += (ip->ip_hl << 2);
2498 tcp_offset = offset;
2500 * It seems that Yukon II has Tx checksum offload bug for
2501 * small TCP packets that's less than 60 bytes in size
2502 * (e.g. TCP window probe packet, pure ACK packet).
2503 * Common work around like padding with zeros to make the
2504 * frame minimum ethernet frame size didn't work at all.
2505 * Instead of disabling checksum offload completely we
2506 * resort to S/W checksum routine when we encounter short
2508 * Short UDP packets appear to be handled correctly by
2511 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2512 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2515 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2516 (ip->ip_hl << 2), offset);
2517 *(uint16_t *)(m->m_data + offset +
2518 m->m_pkthdr.csum_data) = csum;
2519 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2524 prod = sc_if->msk_cdata.msk_tx_prod;
2525 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2527 map = txd->tx_dmamap;
2529 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map,
2530 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2536 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2543 /* Check if we have a VLAN tag to insert. */
2544 if ((m->m_flags & M_VLANTAG) != 0) {
2545 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2546 tx_le->msk_addr = htole32(0);
2547 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2548 htons(m->m_pkthdr.ether_vtag));
2549 sc_if->msk_cdata.msk_tx_cnt++;
2550 MSK_INC(prod, MSK_TX_RING_CNT);
2551 control |= INS_VLAN;
2554 /* Check if we have to handle checksum offload. */
2555 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2556 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2557 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2558 & 0xffff) | ((uint32_t)tcp_offset << 16));
2559 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2560 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2561 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2563 sc_if->msk_cdata.msk_tx_cnt++;
2564 MSK_INC(prod, MSK_TX_RING_CNT);
2568 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2569 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2570 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2572 sc_if->msk_cdata.msk_tx_cnt++;
2573 MSK_INC(prod, MSK_TX_RING_CNT);
2575 for (i = 1; i < nsegs; i++) {
2576 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2577 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2578 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2579 OP_BUFFER | HW_OWNER);
2580 sc_if->msk_cdata.msk_tx_cnt++;
2581 MSK_INC(prod, MSK_TX_RING_CNT);
2583 /* Update producer index. */
2584 sc_if->msk_cdata.msk_tx_prod = prod;
2586 /* Set EOP on the last desciptor. */
2587 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2588 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2589 tx_le->msk_control |= htole32(EOP);
2591 /* Turn the first descriptor ownership to hardware. */
2592 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2593 tx_le->msk_control |= htole32(HW_OWNER);
2595 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2596 map = txd_last->tx_dmamap;
2597 txd_last->tx_dmamap = txd->tx_dmamap;
2598 txd->tx_dmamap = map;
2605 msk_start(struct ifnet *ifp)
2607 struct msk_if_softc *sc_if;
2608 struct mbuf *m_head;
2611 sc_if = ifp->if_softc;
2613 ASSERT_SERIALIZED(ifp->if_serializer);
2615 if (!sc_if->msk_link) {
2616 ifq_purge(&ifp->if_snd);
2620 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2624 while (!ifq_is_empty(&ifp->if_snd)) {
2625 if (MSK_IS_OACTIVE(sc_if)) {
2626 ifp->if_flags |= IFF_OACTIVE;
2630 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2635 * Pack the data into the transmit ring. If we
2636 * don't have room, set the OACTIVE flag and wait
2637 * for the NIC to drain the ring.
2639 if (msk_encap(sc_if, &m_head) != 0) {
2641 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2644 ifp->if_flags |= IFF_OACTIVE;
2651 * If there's a BPF listener, bounce a copy of this frame
2654 BPF_MTAP(ifp, m_head);
2659 CSR_WRITE_2(sc_if->msk_softc,
2660 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2661 sc_if->msk_cdata.msk_tx_prod);
2663 /* Set a timeout in case the chip goes out to lunch. */
2664 ifp->if_timer = MSK_TX_TIMEOUT;
2669 msk_watchdog(struct ifnet *ifp)
2671 struct msk_if_softc *sc_if = ifp->if_softc;
2675 ASSERT_SERIALIZED(ifp->if_serializer);
2677 if (sc_if->msk_link == 0) {
2679 if_printf(sc_if->msk_ifp, "watchdog timeout "
2687 * Reclaim first as there is a possibility of losing Tx completion
2690 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2691 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2692 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2693 msk_txeof(sc_if, idx);
2694 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2695 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2697 if (!ifq_is_empty(&ifp->if_snd))
2703 if_printf(ifp, "watchdog timeout\n");
2706 if (!ifq_is_empty(&ifp->if_snd))
2711 mskc_shutdown(device_t dev)
2713 struct msk_softc *sc = device_get_softc(dev);
2716 lwkt_serialize_enter(&sc->msk_serializer);
2718 for (i = 0; i < sc->msk_num_port; i++) {
2719 if (sc->msk_if[i] != NULL)
2720 msk_stop(sc->msk_if[i]);
2723 /* Put hardware reset. */
2724 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2726 lwkt_serialize_exit(&sc->msk_serializer);
2731 mskc_suspend(device_t dev)
2733 struct msk_softc *sc = device_get_softc(dev);
2736 lwkt_serialize_enter(&sc->msk_serializer);
2738 for (i = 0; i < sc->msk_num_port; i++) {
2739 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2740 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
2741 msk_stop(sc->msk_if[i]);
2744 /* Disable all interrupts. */
2745 CSR_WRITE_4(sc, B0_IMSK, 0);
2746 CSR_READ_4(sc, B0_IMSK);
2747 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2748 CSR_READ_4(sc, B0_HWE_IMSK);
2750 mskc_phy_power(sc, MSK_PHY_POWERDOWN);
2752 /* Put hardware reset. */
2753 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2754 sc->msk_suspended = 1;
2756 lwkt_serialize_exit(&sc->msk_serializer);
2762 mskc_resume(device_t dev)
2764 struct msk_softc *sc = device_get_softc(dev);
2767 lwkt_serialize_enter(&sc->msk_serializer);
2769 /* Enable all clocks before accessing any registers. */
2770 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
2772 for (i = 0; i < sc->msk_num_port; i++) {
2773 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2774 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2775 msk_init(sc->msk_if[i]);
2777 sc->msk_suspended = 0;
2779 lwkt_serialize_exit(&sc->msk_serializer);
2785 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len,
2786 struct mbuf_chain *chain)
2790 struct msk_rxdesc *rxd;
2793 ifp = sc_if->msk_ifp;
2795 cons = sc_if->msk_cdata.msk_rx_cons;
2797 rxlen = status >> 16;
2798 if ((status & GMR_FS_VLAN) != 0 &&
2799 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2800 rxlen -= EVL_ENCAPLEN;
2801 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) {
2803 * For controllers that returns bogus status code
2804 * just do minimal check and let upper stack
2805 * handle this frame.
2807 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2809 msk_discard_rxbuf(sc_if, cons);
2812 } else if (len > sc_if->msk_framesize ||
2813 ((status & GMR_FS_ANY_ERR) != 0) ||
2814 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2815 /* Don't count flow-control packet as errors. */
2816 if ((status & GMR_FS_GOOD_FC) == 0)
2818 msk_discard_rxbuf(sc_if, cons);
2821 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2823 if (msk_newbuf(sc_if, cons, 0) != 0) {
2825 /* Reuse old buffer. */
2826 msk_discard_rxbuf(sc_if, cons);
2829 m->m_pkthdr.rcvif = ifp;
2830 m->m_pkthdr.len = m->m_len = len;
2833 /* Check for VLAN tagged packets. */
2834 if ((status & GMR_FS_VLAN) != 0 &&
2835 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2836 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2837 m->m_flags |= M_VLANTAG;
2841 ether_input_chain(ifp, m, NULL, chain);
2844 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2845 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2850 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2854 struct msk_rxdesc *jrxd;
2857 ifp = sc_if->msk_ifp;
2859 MSK_IF_LOCK_ASSERT(sc_if);
2861 cons = sc_if->msk_cdata.msk_rx_cons;
2863 rxlen = status >> 16;
2864 if ((status & GMR_FS_VLAN) != 0 &&
2865 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2866 rxlen -= ETHER_VLAN_ENCAP_LEN;
2867 if (len > sc_if->msk_framesize ||
2868 ((status & GMR_FS_ANY_ERR) != 0) ||
2869 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2870 /* Don't count flow-control packet as errors. */
2871 if ((status & GMR_FS_GOOD_FC) == 0)
2873 msk_discard_jumbo_rxbuf(sc_if, cons);
2876 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2878 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2880 /* Reuse old buffer. */
2881 msk_discard_jumbo_rxbuf(sc_if, cons);
2884 m->m_pkthdr.rcvif = ifp;
2885 m->m_pkthdr.len = m->m_len = len;
2887 /* Check for VLAN tagged packets. */
2888 if ((status & GMR_FS_VLAN) != 0 &&
2889 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2890 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2891 m->m_flags |= M_VLANTAG;
2893 MSK_IF_UNLOCK(sc_if);
2894 (*ifp->if_input)(ifp, m);
2898 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2899 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2904 msk_txeof(struct msk_if_softc *sc_if, int idx)
2906 struct msk_txdesc *txd;
2907 struct msk_tx_desc *cur_tx;
2912 ifp = sc_if->msk_ifp;
2915 * Go through our tx ring and free mbufs for those
2916 * frames that have been sent.
2918 cons = sc_if->msk_cdata.msk_tx_cons;
2920 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2921 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2924 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2925 control = le32toh(cur_tx->msk_control);
2926 sc_if->msk_cdata.msk_tx_cnt--;
2927 if ((control & EOP) == 0)
2929 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2930 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2933 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2940 sc_if->msk_cdata.msk_tx_cons = cons;
2941 if (!MSK_IS_OACTIVE(sc_if))
2942 ifp->if_flags &= ~IFF_OACTIVE;
2943 if (sc_if->msk_cdata.msk_tx_cnt == 0)
2945 /* No need to sync LEs as we didn't update LEs. */
2950 msk_tick(void *xsc_if)
2952 struct msk_if_softc *sc_if = xsc_if;
2953 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2954 struct mii_data *mii;
2956 lwkt_serialize_enter(ifp->if_serializer);
2958 mii = device_get_softc(sc_if->msk_miibus);
2961 if (!sc_if->msk_link)
2962 msk_miibus_statchg(sc_if->msk_if_dev);
2963 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
2965 lwkt_serialize_exit(ifp->if_serializer);
2969 msk_intr_phy(struct msk_if_softc *sc_if)
2973 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2974 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2975 /* Handle FIFO Underrun/Overflow? */
2976 if (status & PHY_M_IS_FIFO_ERROR) {
2977 device_printf(sc_if->msk_if_dev,
2978 "PHY FIFO underrun/overflow.\n");
2983 msk_intr_gmac(struct msk_if_softc *sc_if)
2985 struct msk_softc *sc;
2988 sc = sc_if->msk_softc;
2989 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
2991 /* GMAC Rx FIFO overrun. */
2992 if ((status & GM_IS_RX_FF_OR) != 0) {
2993 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
2996 /* GMAC Tx FIFO underrun. */
2997 if ((status & GM_IS_TX_FF_UR) != 0) {
2998 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3000 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3003 * In case of Tx underrun, we may need to flush/reset
3004 * Tx MAC but that would also require resynchronization
3005 * with status LEs. Reintializing status LEs would
3006 * affect other port in dual MAC configuration so it
3007 * should be avoided as possible as we can.
3008 * Due to lack of documentation it's all vague guess but
3009 * it needs more investigation.
3015 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3017 struct msk_softc *sc;
3019 sc = sc_if->msk_softc;
3020 if ((status & Y2_IS_PAR_RD1) != 0) {
3021 device_printf(sc_if->msk_if_dev,
3022 "RAM buffer read parity error\n");
3024 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3027 if ((status & Y2_IS_PAR_WR1) != 0) {
3028 device_printf(sc_if->msk_if_dev,
3029 "RAM buffer write parity error\n");
3031 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3034 if ((status & Y2_IS_PAR_MAC1) != 0) {
3035 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3037 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3040 if ((status & Y2_IS_PAR_RX1) != 0) {
3041 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3043 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3045 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3046 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3048 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3053 mskc_intr_hwerr(struct msk_softc *sc)
3056 uint32_t tlphead[4];
3058 status = CSR_READ_4(sc, B0_HWE_ISRC);
3059 /* Time Stamp timer overflow. */
3060 if ((status & Y2_IS_TIST_OV) != 0)
3061 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3062 if ((status & Y2_IS_PCI_NEXP) != 0) {
3064 * PCI Express Error occured which is not described in PEX
3066 * This error is also mapped either to Master Abort(
3067 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3068 * can only be cleared there.
3070 device_printf(sc->msk_dev,
3071 "PCI Express protocol violation error\n");
3074 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3077 if ((status & Y2_IS_MST_ERR) != 0)
3078 device_printf(sc->msk_dev,
3079 "unexpected IRQ Status error\n");
3081 device_printf(sc->msk_dev,
3082 "unexpected IRQ Master error\n");
3083 /* Reset all bits in the PCI status register. */
3084 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3085 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3086 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3087 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3088 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3089 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3092 /* Check for PCI Express Uncorrectable Error. */
3093 if ((status & Y2_IS_PCI_EXP) != 0) {
3097 * On PCI Express bus bridges are called root complexes (RC).
3098 * PCI Express errors are recognized by the root complex too,
3099 * which requests the system to handle the problem. After
3100 * error occurence it may be that no access to the adapter
3101 * may be performed any longer.
3104 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3105 if ((v32 & PEX_UNSUP_REQ) != 0) {
3106 /* Ignore unsupported request error. */
3108 device_printf(sc->msk_dev,
3109 "Uncorrectable PCI Express error\n");
3112 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3115 /* Get TLP header form Log Registers. */
3116 for (i = 0; i < 4; i++)
3117 tlphead[i] = CSR_PCI_READ_4(sc,
3118 PEX_HEADER_LOG + i * 4);
3119 /* Check for vendor defined broadcast message. */
3120 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3121 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3122 CSR_WRITE_4(sc, B0_HWE_IMSK,
3123 sc->msk_intrhwemask);
3124 CSR_READ_4(sc, B0_HWE_IMSK);
3127 /* Clear the interrupt. */
3128 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3129 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3130 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3133 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3134 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3135 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3136 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3139 static __inline void
3140 msk_rxput(struct msk_if_softc *sc_if)
3142 struct msk_softc *sc;
3144 sc = sc_if->msk_softc;
3146 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3148 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3149 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3150 BUS_DMASYNC_PREWRITE);
3153 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3154 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3158 mskc_handle_events(struct msk_softc *sc)
3160 struct msk_if_softc *sc_if;
3162 struct msk_stat_desc *sd;
3163 uint32_t control, status;
3164 int cons, idx, len, port, rxprog;
3165 struct mbuf_chain chain[MAXCPU];
3167 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3168 if (idx == sc->msk_stat_cons)
3171 ether_input_chain_init(chain);
3173 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3176 for (cons = sc->msk_stat_cons; cons != idx;) {
3177 sd = &sc->msk_stat_ring[cons];
3178 control = le32toh(sd->msk_control);
3179 if ((control & HW_OWNER) == 0)
3182 * Marvell's FreeBSD driver updates status LE after clearing
3183 * HW_OWNER. However we don't have a way to sync single LE
3184 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3185 * an entire DMA map. So don't sync LE until we have a better
3188 control &= ~HW_OWNER;
3189 sd->msk_control = htole32(control);
3190 status = le32toh(sd->msk_status);
3191 len = control & STLE_LEN_MASK;
3192 port = (control >> 16) & 0x01;
3193 sc_if = sc->msk_if[port];
3194 if (sc_if == NULL) {
3195 device_printf(sc->msk_dev, "invalid port opcode "
3196 "0x%08x\n", control & STLE_OP_MASK);
3200 switch (control & STLE_OP_MASK) {
3202 sc_if->msk_vtag = ntohs(len);
3205 sc_if->msk_vtag = ntohs(len);
3208 if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0)
3211 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3212 msk_jumbo_rxeof(sc_if, status, len);
3215 msk_rxeof(sc_if, status, len, chain);
3218 * Because there is no way to sync single Rx LE
3219 * put the DMA sync operation off until the end of
3223 /* Update prefetch unit if we've passed water mark. */
3224 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3230 if (sc->msk_if[MSK_PORT_A] != NULL) {
3231 msk_txeof(sc->msk_if[MSK_PORT_A],
3232 status & STLE_TXA1_MSKL);
3234 if (sc->msk_if[MSK_PORT_B] != NULL) {
3235 msk_txeof(sc->msk_if[MSK_PORT_B],
3236 ((status & STLE_TXA2_MSKL) >>
3238 ((len & STLE_TXA2_MSKH) <<
3243 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3244 control & STLE_OP_MASK);
3247 MSK_INC(cons, MSK_STAT_RING_CNT);
3248 if (rxprog > sc->msk_process_limit)
3253 ether_input_dispatch(chain);
3255 sc->msk_stat_cons = cons;
3256 /* XXX We should sync status LEs here. See above notes. */
3258 if (rxput[MSK_PORT_A] > 0)
3259 msk_rxput(sc->msk_if[MSK_PORT_A]);
3260 if (rxput[MSK_PORT_B] > 0)
3261 msk_rxput(sc->msk_if[MSK_PORT_B]);
3263 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3266 /* Legacy interrupt handler for shared interrupt. */
3268 mskc_intr(void *xsc)
3270 struct msk_softc *sc;
3271 struct msk_if_softc *sc_if0, *sc_if1;
3272 struct ifnet *ifp0, *ifp1;
3276 ASSERT_SERIALIZED(&sc->msk_serializer);
3278 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3279 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3280 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3281 (status & sc->msk_intrmask) == 0) {
3282 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3286 sc_if0 = sc->msk_if[MSK_PORT_A];
3287 sc_if1 = sc->msk_if[MSK_PORT_B];
3290 ifp0 = sc_if0->msk_ifp;
3292 ifp1 = sc_if1->msk_ifp;
3294 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3295 msk_intr_phy(sc_if0);
3296 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3297 msk_intr_phy(sc_if1);
3298 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3299 msk_intr_gmac(sc_if0);
3300 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3301 msk_intr_gmac(sc_if1);
3302 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3303 device_printf(sc->msk_dev, "Rx descriptor error\n");
3304 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3305 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3306 CSR_READ_4(sc, B0_IMSK);
3308 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3309 device_printf(sc->msk_dev, "Tx descriptor error\n");
3310 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3311 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3312 CSR_READ_4(sc, B0_IMSK);
3314 if ((status & Y2_IS_HW_ERR) != 0)
3315 mskc_intr_hwerr(sc);
3317 while (mskc_handle_events(sc) != 0)
3319 if ((status & Y2_IS_STAT_BMU) != 0)
3320 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3322 /* Reenable interrupts. */
3323 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3325 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
3326 !ifq_is_empty(&ifp0->if_snd))
3328 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
3329 !ifq_is_empty(&ifp1->if_snd))
3334 msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3336 struct msk_softc *sc = sc_if->msk_softc;
3337 struct ifnet *ifp = sc_if->msk_ifp;
3339 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3340 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3341 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3342 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3345 if (ifp->if_mtu > ETHERMTU) {
3346 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3348 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3349 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3350 /* Disable Store & Forward mode for Tx. */
3351 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3354 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3363 struct msk_if_softc *sc_if = xsc;
3364 struct msk_softc *sc = sc_if->msk_softc;
3365 struct ifnet *ifp = sc_if->msk_ifp;
3366 struct mii_data *mii;
3367 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3372 ASSERT_SERIALIZED(ifp->if_serializer);
3374 mii = device_get_softc(sc_if->msk_miibus);
3377 /* Cancel pending I/O and free all Rx/Tx buffers. */
3380 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
3381 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3382 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3384 * In Yukon EC Ultra, TSO & checksum offload is not
3385 * supported for jumbo frame.
3387 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
3388 ifp->if_capenable &= ~IFCAP_TXCSUM;
3391 /* GMAC Control reset. */
3392 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3393 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3394 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
3395 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3396 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
3397 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3398 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3403 * Initialize GMAC first such that speed/duplex/flow-control
3404 * parameters are renegotiated when interface is brought up.
3406 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
3408 /* Dummy read the Interrupt Source Register. */
3409 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3411 /* Set MIB Clear Counter Mode. */
3412 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3413 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3414 /* Read all MIB Counters with Clear Mode set. */
3415 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3416 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3417 /* Clear MIB Clear Counter Mode. */
3418 gmac &= ~GM_PAR_MIB_CLR;
3419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3422 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3424 /* Setup Transmit Control Register. */
3425 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3427 /* Setup Transmit Flow Control Register. */
3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3430 /* Setup Transmit Parameter Register. */
3431 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3432 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3433 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3435 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3436 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3438 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3439 gmac |= GM_SMOD_JUMBO_ENA;
3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3442 /* Set station address. */
3443 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3444 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3445 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3447 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3448 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3451 /* Disable interrupts for counter overflows. */
3452 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3453 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3454 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3456 /* Configure Rx MAC FIFO. */
3457 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3458 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3459 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
3460 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3461 sc->msk_hw_id == CHIP_ID_YUKON_EX)
3462 reg |= GMF_RX_OVER_ON;
3463 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
3465 /* Set receive filter. */
3466 msk_rxfilter(sc_if);
3468 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3469 /* Clear flush mask - HW bug. */
3470 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3472 /* Flush Rx MAC FIFO on any flow control or error. */
3473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3478 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3479 * due to hardware hang on receipt of pause frames.
3481 reg = RX_GMF_FL_THR_DEF + 1;
3482 /* Another magic for Yukon FE+ - From Linux. */
3483 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3484 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3486 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3489 /* Configure Tx MAC FIFO. */
3490 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3491 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3492 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3494 /* Configure hardware VLAN tag insertion/stripping. */
3495 msk_setvlan(sc_if, ifp);
3497 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3498 /* Set Rx Pause threshould. */
3499 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3501 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3503 /* Configure store-and-forward for Tx. */
3504 msk_set_tx_stfwd(sc_if);
3507 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3508 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3509 /* Disable dynamic watermark - from Linux. */
3510 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3512 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3516 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3517 * arbiter as we don't use Sync Tx queue.
3519 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3520 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3521 /* Enable the RAM Interface Arbiter. */
3522 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3524 /* Setup RAM buffer. */
3525 msk_set_rambuffer(sc_if);
3527 /* Disable Tx sync Queue. */
3528 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3530 /* Setup Tx Queue Bus Memory Interface. */
3531 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3532 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3533 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3534 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3535 switch (sc->msk_hw_id) {
3536 case CHIP_ID_YUKON_EC_U:
3537 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3538 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3539 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3543 case CHIP_ID_YUKON_EX:
3545 * Yukon Extreme seems to have silicon bug for
3546 * automatic Tx checksum calculation capability.
3548 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3555 /* Setup Rx Queue Bus Memory Interface. */
3556 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3557 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3558 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3559 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3560 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3561 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3562 /* MAC Rx RAM Read is controlled by hardware. */
3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3566 msk_set_prefetch(sc, sc_if->msk_txq,
3567 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3568 msk_init_tx_ring(sc_if);
3570 /* Disable Rx checksum offload and RSS hash. */
3571 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3572 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3574 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3575 msk_set_prefetch(sc, sc_if->msk_rxq,
3576 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3577 MSK_JUMBO_RX_RING_CNT - 1);
3578 error = msk_init_jumbo_rx_ring(sc_if);
3582 msk_set_prefetch(sc, sc_if->msk_rxq,
3583 sc_if->msk_rdata.msk_rx_ring_paddr,
3584 MSK_RX_RING_CNT - 1);
3585 error = msk_init_rx_ring(sc_if);
3588 device_printf(sc_if->msk_if_dev,
3589 "initialization failed: no memory for Rx buffers\n");
3593 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
3594 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
3595 /* Disable flushing of non-ASF packets. */
3596 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3597 GMF_RX_MACSEC_FLUSH_OFF);
3600 /* Configure interrupt handling. */
3601 if (sc_if->msk_port == MSK_PORT_A) {
3602 sc->msk_intrmask |= Y2_IS_PORT_A;
3603 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3605 sc->msk_intrmask |= Y2_IS_PORT_B;
3606 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3608 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3609 CSR_READ_4(sc, B0_HWE_IMSK);
3610 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3611 CSR_READ_4(sc, B0_IMSK);
3613 sc_if->msk_link = 0;
3616 mskc_set_imtimer(sc);
3618 ifp->if_flags |= IFF_RUNNING;
3619 ifp->if_flags &= ~IFF_OACTIVE;
3621 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3625 msk_set_rambuffer(struct msk_if_softc *sc_if)
3627 struct msk_softc *sc;
3630 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3633 sc = sc_if->msk_softc;
3635 /* Setup Rx Queue. */
3636 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3637 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3638 sc->msk_rxqstart[sc_if->msk_port] / 8);
3639 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3640 sc->msk_rxqend[sc_if->msk_port] / 8);
3641 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3642 sc->msk_rxqstart[sc_if->msk_port] / 8);
3643 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3644 sc->msk_rxqstart[sc_if->msk_port] / 8);
3646 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3647 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3648 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3649 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3650 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3651 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3652 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3653 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3654 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3656 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3657 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3659 /* Setup Tx Queue. */
3660 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3661 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3662 sc->msk_txqstart[sc_if->msk_port] / 8);
3663 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3664 sc->msk_txqend[sc_if->msk_port] / 8);
3665 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3666 sc->msk_txqstart[sc_if->msk_port] / 8);
3667 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3668 sc->msk_txqstart[sc_if->msk_port] / 8);
3669 /* Enable Store & Forward for Tx side. */
3670 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3671 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3672 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3676 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3680 /* Reset the prefetch unit. */
3681 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3683 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3685 /* Set LE base address. */
3686 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3688 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3690 /* Set the list last index. */
3691 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3693 /* Turn on prefetch unit. */
3694 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3696 /* Dummy read to ensure write. */
3697 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3701 msk_stop(struct msk_if_softc *sc_if)
3703 struct msk_softc *sc = sc_if->msk_softc;
3704 struct ifnet *ifp = sc_if->msk_ifp;
3705 struct msk_txdesc *txd;
3706 struct msk_rxdesc *rxd;
3708 struct msk_rxdesc *jrxd;
3713 ASSERT_SERIALIZED(ifp->if_serializer);
3715 callout_stop(&sc_if->msk_tick_ch);
3718 /* Disable interrupts. */
3719 if (sc_if->msk_port == MSK_PORT_A) {
3720 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3721 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3723 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3724 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3726 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3727 CSR_READ_4(sc, B0_HWE_IMSK);
3728 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3729 CSR_READ_4(sc, B0_IMSK);
3731 /* Disable Tx/Rx MAC. */
3732 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3733 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3734 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3735 /* Read again to ensure writing. */
3736 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3739 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3740 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3741 for (i = 0; i < MSK_TIMEOUT; i++) {
3742 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3743 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3745 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3750 if (i == MSK_TIMEOUT)
3751 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3752 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3753 RB_RST_SET | RB_DIS_OP_MD);
3755 /* Disable all GMAC interrupt. */
3756 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3757 /* Disable PHY interrupt. */
3758 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3760 /* Disable the RAM Interface Arbiter. */
3761 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3763 /* Reset the PCI FIFO of the async Tx queue */
3764 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3765 BMU_RST_SET | BMU_FIFO_RST);
3767 /* Reset the Tx prefetch units. */
3768 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3771 /* Reset the RAM Buffer async Tx queue. */
3772 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3774 /* Reset Tx MAC FIFO. */
3775 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3776 /* Set Pause Off. */
3777 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3780 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3781 * reach the end of packet and since we can't make sure that we have
3782 * incoming data, we must reset the BMU while it is not during a DMA
3783 * transfer. Since it is possible that the Rx path is still active,
3784 * the Rx RAM buffer will be stopped first, so any possible incoming
3785 * data will not trigger a DMA. After the RAM buffer is stopped, the
3786 * BMU is polled until any DMA in progress is ended and only then it
3790 /* Disable the RAM Buffer receive queue. */
3791 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3792 for (i = 0; i < MSK_TIMEOUT; i++) {
3793 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3794 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3798 if (i == MSK_TIMEOUT)
3799 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3800 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3801 BMU_RST_SET | BMU_FIFO_RST);
3802 /* Reset the Rx prefetch unit. */
3803 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3805 /* Reset the RAM Buffer receive queue. */
3806 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3807 /* Reset Rx MAC FIFO. */
3808 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3810 /* Free Rx and Tx mbufs still in the queues. */
3811 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3812 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3813 if (rxd->rx_m != NULL) {
3814 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3821 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3822 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3823 if (jrxd->rx_m != NULL) {
3824 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3825 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3826 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3828 m_freem(jrxd->rx_m);
3833 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3834 txd = &sc_if->msk_cdata.msk_txdesc[i];
3835 if (txd->tx_m != NULL) {
3836 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3844 * Mark the interface down.
3846 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3847 sc_if->msk_link = 0;
3851 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
3853 return sysctl_int_range(oidp, arg1, arg2, req,
3854 MSK_PROC_MIN, MSK_PROC_MAX);
3858 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3860 struct msk_softc *sc = arg1;
3861 struct lwkt_serialize *serializer = &sc->msk_serializer;
3864 lwkt_serialize_enter(serializer);
3866 v = sc->msk_intr_rate;
3867 error = sysctl_handle_int(oidp, &v, 0, req);
3868 if (error || req->newptr == NULL)
3875 if (sc->msk_intr_rate != v) {
3878 sc->msk_intr_rate = v;
3879 for (i = 0; i < 2; ++i) {
3880 if (sc->msk_if[i] != NULL) {
3881 flag |= sc->msk_if[i]->
3882 arpcom.ac_if.if_flags & IFF_RUNNING;
3886 mskc_set_imtimer(sc);
3889 lwkt_serialize_exit(serializer);
3894 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
3895 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
3897 struct msk_if_softc *sc_if = device_get_softc(dev);
3901 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag,
3903 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3904 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3906 device_printf(dev, "can't create coherent DMA memory\n");
3910 *dtag = dmem.dmem_tag;
3911 *dmap = dmem.dmem_map;
3912 *addr = dmem.dmem_addr;
3913 *paddr = dmem.dmem_busaddr;
3919 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
3922 bus_dmamap_unload(dtag, dmap);
3923 bus_dmamem_free(dtag, addr, dmap);
3924 bus_dma_tag_destroy(dtag);
3929 mskc_set_imtimer(struct msk_softc *sc)
3931 if (sc->msk_intr_rate > 0) {
3933 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3934 * and 78.125MHz for rest of chip types
3936 CSR_WRITE_4(sc, B2_IRQM_INI,
3937 MSK_USECS(sc, 1000000 / sc->msk_intr_rate));
3938 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3939 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START);
3941 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP);