1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include <sys/param.h>
104 #include <sys/endian.h>
105 #include <sys/kernel.h>
107 #include <sys/in_cksum.h>
108 #include <sys/interrupt.h>
109 #include <sys/malloc.h>
110 #include <sys/proc.h>
111 #include <sys/rman.h>
112 #include <sys/serialize.h>
113 #include <sys/socket.h>
114 #include <sys/sockio.h>
115 #include <sys/sysctl.h>
117 #include <net/ethernet.h>
120 #include <net/if_arp.h>
121 #include <net/if_dl.h>
122 #include <net/if_media.h>
123 #include <net/ifq_var.h>
124 #include <net/vlan/if_vlan_var.h>
126 #include <netinet/ip.h>
127 #include <netinet/ip_var.h>
129 #include <dev/netif/mii_layer/miivar.h>
131 #include <bus/pci/pcireg.h>
132 #include <bus/pci/pcivar.h>
134 #include "if_mskreg.h"
136 /* "device miibus" required. See GENERIC if you get errors here. */
137 #include "miibus_if.h"
139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
142 * Devices supported by this driver.
144 static const struct msk_product {
145 uint16_t msk_vendorid;
146 uint16_t msk_deviceid;
147 const char *msk_name;
149 { VENDORID_SK, DEVICEID_SK_YUKON2,
150 "SK-9Sxx Gigabit Ethernet" },
151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
152 "SK-9Exx Gigabit Ethernet"},
153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
154 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
158 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
162 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
166 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
170 "Marvell Yukon 88E8035 Gigabit Ethernet" },
171 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
172 "Marvell Yukon 88E8036 Gigabit Ethernet" },
173 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
174 "Marvell Yukon 88E8038 Gigabit Ethernet" },
175 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
176 "Marvell Yukon 88E8039 Gigabit Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
178 "Marvell Yukon 88E8050 Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
180 "Marvell Yukon 88E8052 Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
182 "Marvell Yukon 88E8053 Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
184 "Marvell Yukon 88E8055 Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
186 "Marvell Yukon 88E8056 Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
188 "Marvell Yukon 88E8058 Gigabit Ethernet" },
189 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
190 "D-Link 550SX Gigabit Ethernet" },
191 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
192 "D-Link 560T Gigabit Ethernet" },
196 static const char *model_name[] = {
204 static int mskc_probe(device_t);
205 static int mskc_attach(device_t);
206 static int mskc_detach(device_t);
207 static int mskc_shutdown(device_t);
208 static int mskc_suspend(device_t);
209 static int mskc_resume(device_t);
210 static void mskc_intr(void *);
212 static void mskc_reset(struct msk_softc *);
213 static void mskc_set_imtimer(struct msk_softc *);
214 static void mskc_intr_hwerr(struct msk_softc *);
215 static int mskc_handle_events(struct msk_softc *);
216 static void mskc_phy_power(struct msk_softc *, int);
217 static int mskc_setup_rambuffer(struct msk_softc *);
218 static int mskc_status_dma_alloc(struct msk_softc *);
219 static void mskc_status_dma_free(struct msk_softc *);
220 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
221 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
223 static int msk_probe(device_t);
224 static int msk_attach(device_t);
225 static int msk_detach(device_t);
226 static int msk_miibus_readreg(device_t, int, int);
227 static int msk_miibus_writereg(device_t, int, int, int);
228 static void msk_miibus_statchg(device_t);
230 static void msk_init(void *);
231 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
232 static void msk_start(struct ifnet *);
233 static void msk_watchdog(struct ifnet *);
234 static int msk_mediachange(struct ifnet *);
235 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
237 static void msk_tick(void *);
238 static void msk_intr_phy(struct msk_if_softc *);
239 static void msk_intr_gmac(struct msk_if_softc *);
241 msk_rxput(struct msk_if_softc *);
242 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
243 static void msk_rxeof(struct msk_if_softc *, uint32_t, int,
244 struct mbuf_chain *);
245 static void msk_txeof(struct msk_if_softc *, int);
246 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
247 static void msk_set_rambuffer(struct msk_if_softc *);
248 static void msk_stop(struct msk_if_softc *);
250 static int msk_txrx_dma_alloc(struct msk_if_softc *);
251 static void msk_txrx_dma_free(struct msk_if_softc *);
252 static int msk_init_rx_ring(struct msk_if_softc *);
253 static void msk_init_tx_ring(struct msk_if_softc *);
255 msk_discard_rxbuf(struct msk_if_softc *, int);
256 static int msk_newbuf(struct msk_if_softc *, int, int);
257 static int msk_encap(struct msk_if_softc *, struct mbuf **);
260 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
261 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
262 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
263 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
264 static void *msk_jalloc(struct msk_if_softc *);
265 static void msk_jfree(void *, void *);
268 static int msk_phy_readreg(struct msk_if_softc *, int, int);
269 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
271 static void msk_rxfilter(struct msk_if_softc *);
272 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
274 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
275 void **, bus_addr_t *, bus_dmamap_t *);
276 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
278 static device_method_t mskc_methods[] = {
279 /* Device interface */
280 DEVMETHOD(device_probe, mskc_probe),
281 DEVMETHOD(device_attach, mskc_attach),
282 DEVMETHOD(device_detach, mskc_detach),
283 DEVMETHOD(device_suspend, mskc_suspend),
284 DEVMETHOD(device_resume, mskc_resume),
285 DEVMETHOD(device_shutdown, mskc_shutdown),
288 DEVMETHOD(bus_print_child, bus_generic_print_child),
289 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
294 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
295 static devclass_t mskc_devclass;
297 static device_method_t msk_methods[] = {
298 /* Device interface */
299 DEVMETHOD(device_probe, msk_probe),
300 DEVMETHOD(device_attach, msk_attach),
301 DEVMETHOD(device_detach, msk_detach),
302 DEVMETHOD(device_shutdown, bus_generic_shutdown),
305 DEVMETHOD(bus_print_child, bus_generic_print_child),
306 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
309 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
310 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
311 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
316 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
317 static devclass_t msk_devclass;
319 DECLARE_DUMMY_MODULE(if_msk);
320 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL);
321 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL);
322 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
324 static int mskc_intr_rate = 0;
325 static int mskc_process_limit = MSK_PROC_DEFAULT;
327 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
328 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
331 msk_miibus_readreg(device_t dev, int phy, int reg)
333 struct msk_if_softc *sc_if;
335 if (phy != PHY_ADDR_MARV)
338 sc_if = device_get_softc(dev);
340 return (msk_phy_readreg(sc_if, phy, reg));
344 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
346 struct msk_softc *sc;
349 sc = sc_if->msk_softc;
351 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
352 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
354 for (i = 0; i < MSK_TIMEOUT; i++) {
356 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
357 if ((val & GM_SMI_CT_RD_VAL) != 0) {
358 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
363 if (i == MSK_TIMEOUT) {
364 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
372 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
374 struct msk_if_softc *sc_if;
376 if (phy != PHY_ADDR_MARV)
379 sc_if = device_get_softc(dev);
381 return (msk_phy_writereg(sc_if, phy, reg, val));
385 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
387 struct msk_softc *sc;
390 sc = sc_if->msk_softc;
392 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
393 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
394 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
395 for (i = 0; i < MSK_TIMEOUT; i++) {
397 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
398 GM_SMI_CT_BUSY) == 0)
401 if (i == MSK_TIMEOUT)
402 if_printf(sc_if->msk_ifp, "phy write timeout\n");
408 msk_miibus_statchg(device_t dev)
410 struct msk_if_softc *sc_if;
411 struct msk_softc *sc;
412 struct mii_data *mii;
416 sc_if = device_get_softc(dev);
417 sc = sc_if->msk_softc;
419 mii = device_get_softc(sc_if->msk_miibus);
420 ifp = sc_if->msk_ifp;
423 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
424 (IFM_AVALID | IFM_ACTIVE)) {
425 switch (IFM_SUBTYPE(mii->mii_media_active)) {
434 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
440 if (sc_if->msk_link != 0) {
441 /* Enable Tx FIFO Underrun. */
442 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
443 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
445 * Because mii(4) notify msk(4) that it detected link status
446 * change, there is no need to enable automatic
447 * speed/flow-control/duplex updates.
449 gmac = GM_GPCR_AU_ALL_DIS;
450 switch (IFM_SUBTYPE(mii->mii_media_active)) {
453 gmac |= GM_GPCR_SPEED_1000;
456 gmac |= GM_GPCR_SPEED_100;
462 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
463 gmac |= GM_GPCR_DUP_FULL;
464 /* Disable Rx flow control. */
465 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
466 gmac |= GM_GPCR_FC_RX_DIS;
467 /* Disable Tx flow control. */
468 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
469 gmac |= GM_GPCR_FC_TX_DIS;
470 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
471 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
472 /* Read again to ensure writing. */
473 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
476 if (((mii->mii_media_active & IFM_GMASK) &
477 (IFM_FLAG0 | IFM_FLAG1)) == 0)
478 gmac = GMC_PAUSE_OFF;
479 /* Diable pause for 10/100 Mbps in half-duplex mode. */
480 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
481 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
482 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
483 gmac = GMC_PAUSE_OFF;
484 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
486 /* Enable PHY interrupt for FIFO underrun/overflow. */
487 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
488 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
491 * Link state changed to down.
492 * Disable PHY interrupts.
494 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
495 /* Disable Rx/Tx MAC. */
496 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
497 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) {
498 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
499 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
500 /* Read again to ensure writing. */
501 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
507 msk_rxfilter(struct msk_if_softc *sc_if)
509 struct msk_softc *sc;
511 struct ifmultiaddr *ifma;
516 sc = sc_if->msk_softc;
517 ifp = sc_if->msk_ifp;
519 bzero(mchash, sizeof(mchash));
520 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
521 if ((ifp->if_flags & IFF_PROMISC) != 0) {
522 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
523 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
524 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
528 mode |= GM_RXCR_UCF_ENA;
529 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
530 if (ifma->ifma_addr->sa_family != AF_LINK)
532 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
533 ifma->ifma_addr), ETHER_ADDR_LEN);
534 /* Just want the 6 least significant bits. */
536 /* Set the corresponding bit in the hash table. */
537 mchash[crc >> 5] |= 1 << (crc & 0x1f);
539 if (mchash[0] != 0 || mchash[1] != 0)
540 mode |= GM_RXCR_MCF_ENA;
543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
545 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
546 (mchash[0] >> 16) & 0xffff);
547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
549 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
550 (mchash[1] >> 16) & 0xffff);
551 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
555 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
557 struct msk_softc *sc;
559 sc = sc_if->msk_softc;
560 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
561 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
563 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
566 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
568 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
574 msk_init_rx_ring(struct msk_if_softc *sc_if)
576 struct msk_ring_data *rd;
577 struct msk_rxdesc *rxd;
580 sc_if->msk_cdata.msk_rx_cons = 0;
581 sc_if->msk_cdata.msk_rx_prod = 0;
582 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
584 rd = &sc_if->msk_rdata;
585 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
586 prod = sc_if->msk_cdata.msk_rx_prod;
587 for (i = 0; i < MSK_RX_RING_CNT; i++) {
588 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
590 rxd->rx_le = &rd->msk_rx_ring[prod];
591 if (msk_newbuf(sc_if, prod, 1) != 0)
593 MSK_INC(prod, MSK_RX_RING_CNT);
596 /* Update prefetch unit. */
597 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
598 CSR_WRITE_2(sc_if->msk_softc,
599 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
600 sc_if->msk_cdata.msk_rx_prod);
607 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
609 struct msk_ring_data *rd;
610 struct msk_rxdesc *rxd;
613 MSK_IF_LOCK_ASSERT(sc_if);
615 sc_if->msk_cdata.msk_rx_cons = 0;
616 sc_if->msk_cdata.msk_rx_prod = 0;
617 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
619 rd = &sc_if->msk_rdata;
620 bzero(rd->msk_jumbo_rx_ring,
621 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
622 prod = sc_if->msk_cdata.msk_rx_prod;
623 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
624 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
626 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
627 if (msk_jumbo_newbuf(sc_if, prod) != 0)
629 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
632 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
633 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
634 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
636 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
637 CSR_WRITE_2(sc_if->msk_softc,
638 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
639 sc_if->msk_cdata.msk_rx_prod);
646 msk_init_tx_ring(struct msk_if_softc *sc_if)
648 struct msk_ring_data *rd;
649 struct msk_txdesc *txd;
652 sc_if->msk_cdata.msk_tx_prod = 0;
653 sc_if->msk_cdata.msk_tx_cons = 0;
654 sc_if->msk_cdata.msk_tx_cnt = 0;
656 rd = &sc_if->msk_rdata;
657 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
658 for (i = 0; i < MSK_TX_RING_CNT; i++) {
659 txd = &sc_if->msk_cdata.msk_txdesc[i];
661 txd->tx_le = &rd->msk_tx_ring[i];
666 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
668 struct msk_rx_desc *rx_le;
669 struct msk_rxdesc *rxd;
672 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
675 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
680 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
682 struct msk_rx_desc *rx_le;
683 struct msk_rxdesc *rxd;
686 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
689 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
694 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init)
696 struct msk_rx_desc *rx_le;
697 struct msk_rxdesc *rxd;
699 bus_dma_segment_t seg;
703 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
707 m->m_len = m->m_pkthdr.len = MCLBYTES;
708 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
709 m_adj(m, ETHER_ALIGN);
711 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag,
712 sc_if->msk_cdata.msk_rx_sparemap,
713 m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
717 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n");
721 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
722 if (rxd->rx_m != NULL) {
723 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
724 BUS_DMASYNC_POSTREAD);
725 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
728 map = rxd->rx_dmamap;
729 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
730 sc_if->msk_cdata.msk_rx_sparemap = map;
734 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
735 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER);
742 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
744 struct msk_rx_desc *rx_le;
745 struct msk_rxdesc *rxd;
747 bus_dma_segment_t segs[1];
752 MGETHDR(m, M_DONTWAIT, MT_DATA);
755 buf = msk_jalloc(sc_if);
760 /* Attach the buffer to the mbuf. */
761 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
763 if ((m->m_flags & M_EXT) == 0) {
767 m->m_pkthdr.len = m->m_len = MSK_JLEN;
768 m_adj(m, ETHER_ALIGN);
770 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
771 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
772 BUS_DMA_NOWAIT) != 0) {
776 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
778 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
779 if (rxd->rx_m != NULL) {
780 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
781 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
782 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
785 map = rxd->rx_dmamap;
786 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
787 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
788 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
789 BUS_DMASYNC_PREREAD);
792 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
794 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
804 msk_mediachange(struct ifnet *ifp)
806 struct msk_if_softc *sc_if = ifp->if_softc;
807 struct mii_data *mii;
810 mii = device_get_softc(sc_if->msk_miibus);
811 error = mii_mediachg(mii);
817 * Report current media status.
820 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
822 struct msk_if_softc *sc_if = ifp->if_softc;
823 struct mii_data *mii;
825 mii = device_get_softc(sc_if->msk_miibus);
828 ifmr->ifm_active = mii->mii_media_active;
829 ifmr->ifm_status = mii->mii_media_status;
833 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
835 struct msk_if_softc *sc_if;
837 struct mii_data *mii;
840 sc_if = ifp->if_softc;
841 ifr = (struct ifreq *)data;
847 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
851 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
852 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
856 ifp->if_mtu = ifr->ifr_mtu;
857 if ((ifp->if_flags & IFF_RUNNING) != 0)
865 if (ifp->if_flags & IFF_UP) {
866 if (ifp->if_flags & IFF_RUNNING) {
867 if (((ifp->if_flags ^ sc_if->msk_if_flags)
868 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
871 if (sc_if->msk_detach == 0)
875 if (ifp->if_flags & IFF_RUNNING)
878 sc_if->msk_if_flags = ifp->if_flags;
883 if (ifp->if_flags & IFF_RUNNING)
889 mii = device_get_softc(sc_if->msk_miibus);
890 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
894 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
895 if ((mask & IFCAP_TXCSUM) != 0) {
896 ifp->if_capenable ^= IFCAP_TXCSUM;
897 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
898 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
899 ifp->if_hwassist |= MSK_CSUM_FEATURES;
901 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
904 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
905 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
906 msk_setvlan(sc_if, ifp);
910 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
911 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
913 * In Yukon EC Ultra, TSO & checksum offload is not
914 * supported for jumbo frame.
916 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
917 ifp->if_capenable &= ~IFCAP_TXCSUM;
922 error = ether_ioctl(ifp, command, data);
930 mskc_probe(device_t dev)
932 const struct msk_product *mp;
933 uint16_t vendor, devid;
935 vendor = pci_get_vendor(dev);
936 devid = pci_get_device(dev);
937 for (mp = msk_products; mp->msk_name != NULL; ++mp) {
938 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
939 device_set_desc(dev, mp->msk_name);
947 mskc_setup_rambuffer(struct msk_softc *sc)
952 /* Get adapter SRAM size. */
953 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
955 device_printf(sc->msk_dev,
956 "RAM buffer size : %dKB\n", sc->msk_ramsize);
958 if (sc->msk_ramsize == 0)
960 sc->msk_pflags |= MSK_FLAG_RAMBUF;
963 * Give receiver 2/3 of memory and round down to the multiple
964 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
967 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
968 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
969 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
970 sc->msk_rxqstart[i] = next;
971 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
972 next = sc->msk_rxqend[i] + 1;
973 sc->msk_txqstart[i] = next;
974 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
975 next = sc->msk_txqend[i] + 1;
977 device_printf(sc->msk_dev,
978 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
979 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
981 device_printf(sc->msk_dev,
982 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
983 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
992 mskc_phy_power(struct msk_softc *sc, int mode)
998 case MSK_PHY_POWERUP:
999 /* Switch power to VCC (WA for VAUX problem). */
1000 CSR_WRITE_1(sc, B0_POWER_CTRL,
1001 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1002 /* Disable Core Clock Division, set Clock Select to 0. */
1003 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1006 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1007 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1008 /* Enable bits are inverted. */
1009 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1010 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1011 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1014 * Enable PCI & Core Clock, enable clock gating for both Links.
1016 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1018 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1019 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1020 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1021 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1022 /* Deassert Low Power for 1st PHY. */
1023 val |= PCI_Y2_PHY1_COMA;
1024 if (sc->msk_num_port > 1)
1025 val |= PCI_Y2_PHY2_COMA;
1026 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1029 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1031 /* Enable all clocks. */
1032 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1033 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1034 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1035 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1036 /* Set all bits to 0 except bits 15..12. */
1037 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1038 /* Set to default value. */
1039 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1041 /* Release PHY from PowerDown/COMA mode. */
1042 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1043 for (i = 0; i < sc->msk_num_port; i++) {
1044 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1046 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1050 case MSK_PHY_POWERDOWN:
1051 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1052 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1053 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1054 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1055 val &= ~PCI_Y2_PHY1_COMA;
1056 if (sc->msk_num_port > 1)
1057 val &= ~PCI_Y2_PHY2_COMA;
1059 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1061 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1062 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1063 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1064 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1065 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1066 /* Enable bits are inverted. */
1070 * Disable PCI & Core Clock, disable clock gating for
1073 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1074 CSR_WRITE_1(sc, B0_POWER_CTRL,
1075 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1083 mskc_reset(struct msk_softc *sc)
1090 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1093 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1094 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1095 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1098 * Since we disabled ASF, S/W reset is required for Power Management.
1100 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1101 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1103 /* Clear all error bits in the PCI status register. */
1104 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1105 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1107 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1108 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1109 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1110 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1112 switch (sc->msk_bustype) {
1114 /* Clear all PEX errors. */
1115 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1116 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1117 if ((val & PEX_RX_OV) != 0) {
1118 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1119 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1124 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1125 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1127 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1128 if (sc->msk_bustype == MSK_PCIX_BUS) {
1129 /* Set Cache Line Size opt. */
1130 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1132 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1136 /* Set PHY power state. */
1137 mskc_phy_power(sc, MSK_PHY_POWERUP);
1139 /* Reset GPHY/GMAC Control */
1140 for (i = 0; i < sc->msk_num_port; i++) {
1141 /* GPHY Control reset. */
1142 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1143 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1144 /* GMAC Control reset. */
1145 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1146 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1147 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1149 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1152 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1154 /* Clear TWSI IRQ. */
1155 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1157 /* Turn off hardware timer. */
1158 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1159 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1161 /* Turn off descriptor polling. */
1162 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1164 /* Turn off time stamps. */
1165 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1166 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1168 /* Configure timeout values. */
1169 for (i = 0; i < sc->msk_num_port; i++) {
1170 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1171 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1172 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1174 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1176 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1178 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1180 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1182 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1184 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1186 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1188 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1190 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1192 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1194 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1198 /* Disable all interrupts. */
1199 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1200 CSR_READ_4(sc, B0_HWE_IMSK);
1201 CSR_WRITE_4(sc, B0_IMSK, 0);
1202 CSR_READ_4(sc, B0_IMSK);
1205 * On dual port PCI-X card, there is an problem where status
1206 * can be received out of order due to split transactions.
1208 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1212 pcix = pci_get_pcixcap_ptr(sc->msk_dev);
1214 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1215 /* Clear Max Outstanding Split Transactions. */
1217 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1218 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1219 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1221 if (sc->msk_bustype == MSK_PEX_BUS) {
1224 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1225 /* Change Max. Read Request Size to 4096 bytes. */
1226 v &= ~PEX_DC_MAX_RRS_MSK;
1227 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1228 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1229 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1230 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1231 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1232 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1234 device_printf(sc->msk_dev,
1235 "negotiated width of link(x%d) != "
1236 "max. width of link(x%d)\n", width, v);
1240 /* Clear status list. */
1241 bzero(sc->msk_stat_ring,
1242 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1243 sc->msk_stat_cons = 0;
1244 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1245 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1246 /* Set the status list base address. */
1247 addr = sc->msk_stat_ring_paddr;
1248 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1249 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1250 /* Set the status list last index. */
1251 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1252 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1253 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1254 /* WA for dev. #4.3 */
1255 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1256 /* WA for dev. #4.18 */
1257 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1258 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1260 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1261 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1262 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1263 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1264 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1266 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1267 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1270 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1272 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1274 /* Enable status unit. */
1275 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1277 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1278 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1279 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1283 msk_probe(device_t dev)
1285 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1289 * Not much to do here. We always know there will be
1290 * at least one GMAC present, and if there are two,
1291 * mskc_attach() will create a second device instance
1294 ksnprintf(desc, sizeof(desc),
1295 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1296 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1298 device_set_desc_copy(dev, desc);
1304 msk_attach(device_t dev)
1306 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1307 struct msk_if_softc *sc_if = device_get_softc(dev);
1308 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1310 uint8_t eaddr[ETHER_ADDR_LEN];
1312 port = *(int *)device_get_ivars(dev);
1313 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
1315 kfree(device_get_ivars(dev), M_DEVBUF);
1316 device_set_ivars(dev, NULL);
1318 callout_init(&sc_if->msk_tick_ch);
1319 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1321 sc_if->msk_if_dev = dev;
1322 sc_if->msk_port = port;
1323 sc_if->msk_softc = sc;
1324 sc_if->msk_ifp = ifp;
1325 sc_if->msk_flags = sc->msk_pflags;
1326 sc->msk_if[port] = sc_if;
1328 /* Setup Tx/Rx queue register offsets. */
1329 if (port == MSK_PORT_A) {
1330 sc_if->msk_txq = Q_XA1;
1331 sc_if->msk_txsq = Q_XS1;
1332 sc_if->msk_rxq = Q_R1;
1334 sc_if->msk_txq = Q_XA2;
1335 sc_if->msk_txsq = Q_XS2;
1336 sc_if->msk_rxq = Q_R2;
1339 error = msk_txrx_dma_alloc(sc_if);
1343 ifp->if_softc = sc_if;
1344 ifp->if_mtu = ETHERMTU;
1345 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1346 ifp->if_init = msk_init;
1347 ifp->if_ioctl = msk_ioctl;
1348 ifp->if_start = msk_start;
1349 ifp->if_watchdog = msk_watchdog;
1350 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1351 ifq_set_ready(&ifp->if_snd);
1355 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1356 * has serious bug in Rx checksum offload for all Yukon II family
1357 * hardware. It seems there is a workaround to make it work somtimes.
1358 * However, the workaround also have to check OP code sequences to
1359 * verify whether the OP code is correct. Sometimes it should compute
1360 * IP/TCP/UDP checksum in driver in order to verify correctness of
1361 * checksum computed by hardware. If you have to compute checksum
1362 * with software to verify the hardware's checksum why have hardware
1363 * compute the checksum? I think there is no reason to spend time to
1364 * make Rx checksum offload work on Yukon II hardware.
1366 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
1367 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1368 ifp->if_hwassist = MSK_CSUM_FEATURES;
1369 ifp->if_capenable = ifp->if_capabilities;
1373 * Get station address for this interface. Note that
1374 * dual port cards actually come with three station
1375 * addresses: one for each port, plus an extra. The
1376 * extra one is used by the SysKonnect driver software
1377 * as a 'virtual' station address for when both ports
1378 * are operating in failover mode. Currently we don't
1379 * use this extra address.
1381 for (i = 0; i < ETHER_ADDR_LEN; i++)
1382 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1384 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
1389 error = mii_phy_probe(dev, &sc_if->msk_miibus,
1390 msk_mediachange, msk_mediastatus);
1392 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1397 * Call MI attach routine. Can't hold locks when calling into ether_*.
1399 ether_ifattach(ifp, eaddr, &sc->msk_serializer);
1402 * Tell the upper layer(s) we support long frames.
1403 * Must appear after the call to ether_ifattach() because
1404 * ether_ifattach() sets ifi_hdrlen to the default value.
1406 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1412 sc->msk_if[port] = NULL;
1417 * Attach the interface. Allocate softc structures, do ifmedia
1418 * setup and ethernet/BPF attach.
1421 mskc_attach(device_t dev)
1423 struct msk_softc *sc;
1424 int error, *port, cpuid;
1426 sc = device_get_softc(dev);
1428 lwkt_serialize_init(&sc->msk_serializer);
1431 * Initailize sysctl variables
1433 sc->msk_process_limit = mskc_process_limit;
1434 sc->msk_intr_rate = mskc_intr_rate;
1436 #ifndef BURN_BRIDGES
1438 * Handle power management nonsense.
1440 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1441 uint32_t irq, bar0, bar1;
1443 /* Save important PCI config data. */
1444 bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
1445 bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
1446 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1448 /* Reset the power state. */
1449 device_printf(dev, "chip is in D%d power mode "
1450 "-- setting to D0\n", pci_get_powerstate(dev));
1452 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1454 /* Restore PCI config data. */
1455 pci_write_config(dev, PCIR_BAR(0), bar0, 4);
1456 pci_write_config(dev, PCIR_BAR(1), bar1, 4);
1457 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1459 #endif /* BURN_BRIDGES */
1462 * Map control/status registers.
1464 pci_enable_busmaster(dev);
1467 * Allocate I/O resource
1469 #ifdef MSK_USEIOSPACE
1470 sc->msk_res_type = SYS_RES_IOPORT;
1471 sc->msk_res_rid = PCIR_BAR(1);
1473 sc->msk_res_type = SYS_RES_MEMORY;
1474 sc->msk_res_rid = PCIR_BAR(0);
1476 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1477 &sc->msk_res_rid, RF_ACTIVE);
1478 if (sc->msk_res == NULL) {
1479 if (sc->msk_res_type == SYS_RES_MEMORY) {
1480 sc->msk_res_type = SYS_RES_IOPORT;
1481 sc->msk_res_rid = PCIR_BAR(1);
1483 sc->msk_res_type = SYS_RES_MEMORY;
1484 sc->msk_res_rid = PCIR_BAR(0);
1486 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1489 if (sc->msk_res == NULL) {
1490 device_printf(dev, "couldn't allocate %s resources\n",
1491 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
1495 sc->msk_res_bt = rman_get_bustag(sc->msk_res);
1496 sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
1501 sc->msk_irq_rid = 0;
1502 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1504 RF_SHAREABLE | RF_ACTIVE);
1505 if (sc->msk_irq == NULL) {
1506 device_printf(dev, "couldn't allocate IRQ resources\n");
1511 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1512 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1513 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1514 /* Bail out if chip is not recognized. */
1515 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1516 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1517 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1518 sc->msk_hw_id, sc->msk_hw_rev);
1524 * Create sysctl tree
1526 sysctl_ctx_init(&sc->msk_sysctl_ctx);
1527 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx,
1528 SYSCTL_STATIC_CHILDREN(_hw),
1530 device_get_nameunit(dev),
1532 if (sc->msk_sysctl_tree == NULL) {
1533 device_printf(dev, "can't add sysctl node\n");
1538 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1539 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1540 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1541 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
1542 "I", "max number of Rx events to process");
1543 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1544 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1545 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1546 sc, 0, mskc_sysctl_intr_rate,
1547 "I", "max number of interrupt per second");
1548 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1549 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1550 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided,
1551 0, "# of avoided m_defrag on TX path");
1552 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1553 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1554 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied,
1555 0, "# of leading copies on TX path");
1556 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1557 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1558 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied,
1559 0, "# of trailing copies on TX path");
1562 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1563 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1564 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1565 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1566 sc->msk_coppertype = 0;
1568 sc->msk_coppertype = 1;
1569 /* Check number of MACs. */
1570 sc->msk_num_port = 1;
1571 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1573 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1577 /* Check bus type. */
1578 if (pci_is_pcie(sc->msk_dev) == 0)
1579 sc->msk_bustype = MSK_PEX_BUS;
1580 else if (pci_is_pcix(sc->msk_dev) == 0)
1581 sc->msk_bustype = MSK_PCIX_BUS;
1583 sc->msk_bustype = MSK_PCI_BUS;
1585 switch (sc->msk_hw_id) {
1586 case CHIP_ID_YUKON_EC:
1587 case CHIP_ID_YUKON_EC_U:
1588 sc->msk_clock = 125; /* 125 Mhz */
1590 case CHIP_ID_YUKON_FE:
1591 sc->msk_clock = 100; /* 100 Mhz */
1592 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1594 case CHIP_ID_YUKON_XL:
1595 sc->msk_clock = 156; /* 156 Mhz */
1598 sc->msk_clock = 156; /* 156 Mhz */
1602 error = mskc_status_dma_alloc(sc);
1606 /* Set base interrupt mask. */
1607 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1608 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1609 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1611 /* Reset the adapter. */
1614 error = mskc_setup_rambuffer(sc);
1618 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1619 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1620 device_printf(dev, "failed to add child for PORT_A\n");
1624 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1626 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1628 if (sc->msk_num_port > 1) {
1629 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1630 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1631 device_printf(dev, "failed to add child for PORT_B\n");
1635 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1637 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1640 bus_generic_attach(dev);
1642 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
1643 mskc_intr, sc, &sc->msk_intrhand,
1644 &sc->msk_serializer);
1646 device_printf(dev, "couldn't set up interrupt handler\n");
1650 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq));
1651 KKASSERT(cpuid >= 0 && cpuid < ncpus);
1653 if (sc->msk_if[0] != NULL)
1654 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid;
1655 if (sc->msk_if[1] != NULL)
1656 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid;
1664 * Shutdown hardware and free up resources. This can be called any
1665 * time after the mutex has been initialized. It is called in both
1666 * the error case in attach and the normal detach case so it needs
1667 * to be careful about only freeing resources that have actually been
1671 msk_detach(device_t dev)
1673 struct msk_if_softc *sc_if = device_get_softc(dev);
1675 if (device_is_attached(dev)) {
1676 struct msk_softc *sc = sc_if->msk_softc;
1677 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1679 lwkt_serialize_enter(ifp->if_serializer);
1681 if (sc->msk_intrhand != NULL) {
1682 if (sc->msk_if[MSK_PORT_A] != NULL)
1683 msk_stop(sc->msk_if[MSK_PORT_A]);
1684 if (sc->msk_if[MSK_PORT_B] != NULL)
1685 msk_stop(sc->msk_if[MSK_PORT_B]);
1687 bus_teardown_intr(sc->msk_dev, sc->msk_irq,
1689 sc->msk_intrhand = NULL;
1692 lwkt_serialize_exit(ifp->if_serializer);
1694 ether_ifdetach(ifp);
1697 if (sc_if->msk_miibus != NULL)
1698 device_delete_child(dev, sc_if->msk_miibus);
1700 msk_txrx_dma_free(sc_if);
1705 mskc_detach(device_t dev)
1707 struct msk_softc *sc = device_get_softc(dev);
1711 if (device_is_attached(dev)) {
1712 KASSERT(sc->msk_intrhand == NULL,
1713 ("intr is not torn down yet\n"));
1717 for (i = 0; i < sc->msk_num_port; ++i) {
1718 if (sc->msk_devs[i] != NULL) {
1719 port = device_get_ivars(sc->msk_devs[i]);
1721 kfree(port, M_DEVBUF);
1722 device_set_ivars(sc->msk_devs[i], NULL);
1724 device_delete_child(dev, sc->msk_devs[i]);
1728 /* Disable all interrupts. */
1729 CSR_WRITE_4(sc, B0_IMSK, 0);
1730 CSR_READ_4(sc, B0_IMSK);
1731 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1732 CSR_READ_4(sc, B0_HWE_IMSK);
1735 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1737 /* Put hardware reset. */
1738 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1740 mskc_status_dma_free(sc);
1742 if (sc->msk_irq != NULL) {
1743 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
1746 if (sc->msk_res != NULL) {
1747 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
1751 if (sc->msk_sysctl_tree != NULL)
1752 sysctl_ctx_free(&sc->msk_sysctl_ctx);
1757 /* Create status DMA region. */
1759 mskc_status_dma_alloc(struct msk_softc *sc)
1764 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0,
1765 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1766 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
1768 device_printf(sc->msk_dev,
1769 "failed to create status coherent DMA memory\n");
1772 sc->msk_stat_tag = dmem.dmem_tag;
1773 sc->msk_stat_map = dmem.dmem_map;
1774 sc->msk_stat_ring = dmem.dmem_addr;
1775 sc->msk_stat_ring_paddr = dmem.dmem_busaddr;
1781 mskc_status_dma_free(struct msk_softc *sc)
1783 /* Destroy status block. */
1784 if (sc->msk_stat_tag) {
1785 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1786 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1788 bus_dma_tag_destroy(sc->msk_stat_tag);
1789 sc->msk_stat_tag = NULL;
1794 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1798 struct msk_rxdesc *jrxd;
1799 struct msk_jpool_entry *entry;
1804 /* Create parent DMA tag. */
1807 * It seems that Yukon II supports full 64bits DMA operations. But
1808 * it needs two descriptors(list elements) for 64bits DMA operations.
1809 * Since we don't know what DMA address mappings(32bits or 64bits)
1810 * would be used in advance for each mbufs, we limits its DMA space
1811 * to be in range of 32bits address space. Otherwise, we should check
1812 * what DMA address is used and chain another descriptor for the
1813 * 64bits DMA operation. This also means descriptor ring size is
1814 * variable. Limiting DMA address to be in 32bit address space greatly
1815 * simplyfies descriptor handling and possibly would increase
1816 * performance a bit due to efficient handling of descriptors.
1817 * Apart from harassing checksum offloading mechanisms, it seems
1818 * it's really bad idea to use a seperate descriptor for 64bit
1819 * DMA operation to save small descriptor memory. Anyway, I've
1820 * never seen these exotic scheme on ethernet interface hardware.
1822 error = bus_dma_tag_create(
1824 1, 0, /* alignment, boundary */
1825 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1826 BUS_SPACE_MAXADDR, /* highaddr */
1827 NULL, NULL, /* filter, filterarg */
1828 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1830 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1832 &sc_if->msk_cdata.msk_parent_tag);
1834 device_printf(sc_if->msk_if_dev,
1835 "failed to create parent DMA tag\n");
1839 /* Create DMA stuffs for Tx ring. */
1840 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
1841 &sc_if->msk_cdata.msk_tx_ring_tag,
1842 (void *)&sc_if->msk_rdata.msk_tx_ring,
1843 &sc_if->msk_rdata.msk_tx_ring_paddr,
1844 &sc_if->msk_cdata.msk_tx_ring_map);
1846 device_printf(sc_if->msk_if_dev,
1847 "failed to create TX ring DMA stuffs\n");
1851 /* Create DMA stuffs for Rx ring. */
1852 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
1853 &sc_if->msk_cdata.msk_rx_ring_tag,
1854 (void *)&sc_if->msk_rdata.msk_rx_ring,
1855 &sc_if->msk_rdata.msk_rx_ring_paddr,
1856 &sc_if->msk_cdata.msk_rx_ring_map);
1858 device_printf(sc_if->msk_if_dev,
1859 "failed to create RX ring DMA stuffs\n");
1863 /* Create tag for Tx buffers. */
1864 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1865 1, 0, /* alignment, boundary */
1866 BUS_SPACE_MAXADDR, /* lowaddr */
1867 BUS_SPACE_MAXADDR, /* highaddr */
1868 NULL, NULL, /* filter, filterarg */
1869 MSK_JUMBO_FRAMELEN, /* maxsize */
1870 MSK_MAXTXSEGS, /* nsegments */
1871 MSK_MAXSGSIZE, /* maxsegsize */
1872 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
1873 BUS_DMA_ONEBPAGE, /* flags */
1874 &sc_if->msk_cdata.msk_tx_tag);
1876 device_printf(sc_if->msk_if_dev,
1877 "failed to create Tx DMA tag\n");
1881 /* Create DMA maps for Tx buffers. */
1882 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1883 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
1885 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag,
1886 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1889 device_printf(sc_if->msk_if_dev,
1890 "failed to create %dth Tx dmamap\n", i);
1892 for (j = 0; j < i; ++j) {
1893 txd = &sc_if->msk_cdata.msk_txdesc[j];
1894 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
1897 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
1898 sc_if->msk_cdata.msk_tx_tag = NULL;
1905 * Workaround hardware hang which seems to happen when Rx buffer
1906 * is not aligned on multiple of FIFO word(8 bytes).
1908 if (sc_if->msk_flags & MSK_FLAG_RAMBUF)
1909 rxalign = MSK_RX_BUF_ALIGN;
1913 /* Create tag for Rx buffers. */
1914 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1915 rxalign, 0, /* alignment, boundary */
1916 BUS_SPACE_MAXADDR, /* lowaddr */
1917 BUS_SPACE_MAXADDR, /* highaddr */
1918 NULL, NULL, /* filter, filterarg */
1919 MCLBYTES, /* maxsize */
1921 MCLBYTES, /* maxsegsize */
1922 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED |
1923 BUS_DMA_WAITOK, /* flags */
1924 &sc_if->msk_cdata.msk_rx_tag);
1926 device_printf(sc_if->msk_if_dev,
1927 "failed to create Rx DMA tag\n");
1931 /* Create DMA maps for Rx buffers. */
1932 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK,
1933 &sc_if->msk_cdata.msk_rx_sparemap);
1935 device_printf(sc_if->msk_if_dev,
1936 "failed to create spare Rx dmamap\n");
1937 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
1938 sc_if->msk_cdata.msk_rx_tag = NULL;
1941 for (i = 0; i < MSK_RX_RING_CNT; i++) {
1942 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
1944 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag,
1945 BUS_DMA_WAITOK, &rxd->rx_dmamap);
1947 device_printf(sc_if->msk_if_dev,
1948 "failed to create %dth Rx dmamap\n", i);
1950 for (j = 0; j < i; ++j) {
1951 rxd = &sc_if->msk_cdata.msk_rxdesc[j];
1952 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
1955 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
1956 sc_if->msk_cdata.msk_rx_sparemap);
1957 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
1958 sc_if->msk_cdata.msk_rx_tag = NULL;
1965 SLIST_INIT(&sc_if->msk_jfree_listhead);
1966 SLIST_INIT(&sc_if->msk_jinuse_listhead);
1968 /* Create tag for jumbo Rx ring. */
1969 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1970 MSK_RING_ALIGN, 0, /* alignment, boundary */
1971 BUS_SPACE_MAXADDR, /* lowaddr */
1972 BUS_SPACE_MAXADDR, /* highaddr */
1973 NULL, NULL, /* filter, filterarg */
1974 MSK_JUMBO_RX_RING_SZ, /* maxsize */
1976 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
1978 NULL, NULL, /* lockfunc, lockarg */
1979 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
1981 device_printf(sc_if->msk_if_dev,
1982 "failed to create jumbo Rx ring DMA tag\n");
1986 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
1987 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
1988 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
1989 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
1990 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
1992 device_printf(sc_if->msk_if_dev,
1993 "failed to allocate DMA'able memory for jumbo Rx ring\n");
1997 ctx.msk_busaddr = 0;
1998 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
1999 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2000 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2001 msk_dmamap_cb, &ctx, 0);
2003 device_printf(sc_if->msk_if_dev,
2004 "failed to load DMA'able memory for jumbo Rx ring\n");
2007 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2009 /* Create tag for jumbo buffer blocks. */
2010 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2011 PAGE_SIZE, 0, /* alignment, boundary */
2012 BUS_SPACE_MAXADDR, /* lowaddr */
2013 BUS_SPACE_MAXADDR, /* highaddr */
2014 NULL, NULL, /* filter, filterarg */
2015 MSK_JMEM, /* maxsize */
2017 MSK_JMEM, /* maxsegsize */
2019 NULL, NULL, /* lockfunc, lockarg */
2020 &sc_if->msk_cdata.msk_jumbo_tag);
2022 device_printf(sc_if->msk_if_dev,
2023 "failed to create jumbo Rx buffer block DMA tag\n");
2027 /* Create tag for jumbo Rx buffers. */
2028 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2029 PAGE_SIZE, 0, /* alignment, boundary */
2030 BUS_SPACE_MAXADDR, /* lowaddr */
2031 BUS_SPACE_MAXADDR, /* highaddr */
2032 NULL, NULL, /* filter, filterarg */
2033 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2034 MSK_MAXRXSEGS, /* nsegments */
2035 MSK_JLEN, /* maxsegsize */
2037 NULL, NULL, /* lockfunc, lockarg */
2038 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2040 device_printf(sc_if->msk_if_dev,
2041 "failed to create jumbo Rx DMA tag\n");
2045 /* Create DMA maps for jumbo Rx buffers. */
2046 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2047 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2048 device_printf(sc_if->msk_if_dev,
2049 "failed to create spare jumbo Rx dmamap\n");
2052 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2053 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2055 jrxd->rx_dmamap = NULL;
2056 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2059 device_printf(sc_if->msk_if_dev,
2060 "failed to create jumbo Rx dmamap\n");
2065 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2066 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2067 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2068 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2069 &sc_if->msk_cdata.msk_jumbo_map);
2071 device_printf(sc_if->msk_if_dev,
2072 "failed to allocate DMA'able memory for jumbo buf\n");
2076 ctx.msk_busaddr = 0;
2077 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2078 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2079 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2081 device_printf(sc_if->msk_if_dev,
2082 "failed to load DMA'able memory for jumbobuf\n");
2085 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2088 * Now divide it up into 9K pieces and save the addresses
2091 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2092 for (i = 0; i < MSK_JSLOTS; i++) {
2093 sc_if->msk_cdata.msk_jslots[i] = ptr;
2095 entry = malloc(sizeof(struct msk_jpool_entry),
2096 M_DEVBUF, M_WAITOK);
2097 if (entry == NULL) {
2098 device_printf(sc_if->msk_if_dev,
2099 "no memory for jumbo buffers!\n");
2104 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2112 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2114 struct msk_txdesc *txd;
2115 struct msk_rxdesc *rxd;
2117 struct msk_rxdesc *jrxd;
2118 struct msk_jpool_entry *entry;
2123 MSK_JLIST_LOCK(sc_if);
2124 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2125 device_printf(sc_if->msk_if_dev,
2126 "asked to free buffer that is in use!\n");
2127 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2128 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2132 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2133 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2134 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2135 free(entry, M_DEVBUF);
2137 MSK_JLIST_UNLOCK(sc_if);
2139 /* Destroy jumbo buffer block. */
2140 if (sc_if->msk_cdata.msk_jumbo_map)
2141 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2142 sc_if->msk_cdata.msk_jumbo_map);
2144 if (sc_if->msk_rdata.msk_jumbo_buf) {
2145 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2146 sc_if->msk_rdata.msk_jumbo_buf,
2147 sc_if->msk_cdata.msk_jumbo_map);
2148 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2149 sc_if->msk_cdata.msk_jumbo_map = NULL;
2152 /* Jumbo Rx ring. */
2153 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2154 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2155 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2156 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2157 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2158 sc_if->msk_rdata.msk_jumbo_rx_ring)
2159 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2160 sc_if->msk_rdata.msk_jumbo_rx_ring,
2161 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2162 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2163 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2164 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2165 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2168 /* Jumbo Rx buffers. */
2169 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2170 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2171 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2172 if (jrxd->rx_dmamap) {
2174 sc_if->msk_cdata.msk_jumbo_rx_tag,
2176 jrxd->rx_dmamap = NULL;
2179 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2180 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2181 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2182 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2184 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2185 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2190 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
2191 sc_if->msk_rdata.msk_tx_ring,
2192 sc_if->msk_cdata.msk_tx_ring_map);
2195 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
2196 sc_if->msk_rdata.msk_rx_ring,
2197 sc_if->msk_cdata.msk_rx_ring_map);
2200 if (sc_if->msk_cdata.msk_tx_tag) {
2201 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2202 txd = &sc_if->msk_cdata.msk_txdesc[i];
2203 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2206 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2207 sc_if->msk_cdata.msk_tx_tag = NULL;
2211 if (sc_if->msk_cdata.msk_rx_tag) {
2212 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2213 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2214 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2217 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2218 sc_if->msk_cdata.msk_rx_sparemap);
2219 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2220 sc_if->msk_cdata.msk_rx_tag = NULL;
2223 if (sc_if->msk_cdata.msk_parent_tag) {
2224 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2225 sc_if->msk_cdata.msk_parent_tag = NULL;
2231 * Allocate a jumbo buffer.
2234 msk_jalloc(struct msk_if_softc *sc_if)
2236 struct msk_jpool_entry *entry;
2238 MSK_JLIST_LOCK(sc_if);
2240 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2242 if (entry == NULL) {
2243 MSK_JLIST_UNLOCK(sc_if);
2247 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2248 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2250 MSK_JLIST_UNLOCK(sc_if);
2252 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2256 * Release a jumbo buffer.
2259 msk_jfree(void *buf, void *args)
2261 struct msk_if_softc *sc_if;
2262 struct msk_jpool_entry *entry;
2265 /* Extract the softc struct pointer. */
2266 sc_if = (struct msk_if_softc *)args;
2267 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2269 MSK_JLIST_LOCK(sc_if);
2270 /* Calculate the slot this buffer belongs to. */
2271 i = ((vm_offset_t)buf
2272 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2273 KASSERT(i >= 0 && i < MSK_JSLOTS,
2274 ("%s: asked to free buffer that we don't manage!", __func__));
2276 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2277 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2279 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2280 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2281 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2284 MSK_JLIST_UNLOCK(sc_if);
2289 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2291 struct msk_txdesc *txd, *txd_last;
2292 struct msk_tx_desc *tx_le;
2295 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2296 uint32_t control, prod, si;
2297 uint16_t offset, tcp_offset;
2298 int error, i, nsegs, maxsegs, defrag;
2300 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt -
2301 MSK_RESERVED_TX_DESC_CNT;
2302 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT,
2303 ("not enough spare TX desc\n"));
2304 if (maxsegs > MSK_MAXTXSEGS)
2305 maxsegs = MSK_MAXTXSEGS;
2308 * Align TX buffer to 64bytes boundary. This greately improves
2309 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2310 * Try avoiding m_defrag(), if the mbufs are not chained together
2311 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
2314 #define MSK_TXBUF_ALIGN 64
2315 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1)
2319 if (m->m_len == m->m_pkthdr.len) {
2322 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK);
2324 if (M_WRITABLE(m)) {
2325 if (M_TRAILINGSPACE(m) >= space) {
2327 bcopy(m->m_data, m->m_data + space,
2331 sc_if->msk_softc->msk_trailing_copied++;
2333 space = MSK_TXBUF_ALIGN - space;
2334 if (M_LEADINGSPACE(m) >= space) {
2335 /* e.g. Small UDP datagrams */
2342 msk_leading_copied++;
2347 /* e.g. on forwarding path */
2352 m = m_defrag(*m_head, MB_DONTWAIT);
2360 sc_if->msk_softc->msk_defrag_avoided++;
2363 #undef MSK_TXBUF_MASK
2364 #undef MSK_TXBUF_ALIGN
2366 tcp_offset = offset = 0;
2367 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2369 * Since mbuf has no protocol specific structure information
2370 * in it we have to inspect protocol information here to
2371 * setup TSO and checksum offload. I don't know why Marvell
2372 * made a such decision in chip design because other GigE
2373 * hardwares normally takes care of all these chores in
2374 * hardware. However, TSO performance of Yukon II is very
2375 * good such that it's worth to implement it.
2377 struct ether_header *eh;
2380 /* TODO check for M_WRITABLE(m) */
2382 offset = sizeof(struct ether_header);
2383 m = m_pullup(m, offset);
2388 eh = mtod(m, struct ether_header *);
2389 /* Check if hardware VLAN insertion is off. */
2390 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2391 offset = sizeof(struct ether_vlan_header);
2392 m = m_pullup(m, offset);
2398 m = m_pullup(m, offset + sizeof(struct ip));
2403 ip = (struct ip *)(mtod(m, char *) + offset);
2404 offset += (ip->ip_hl << 2);
2405 tcp_offset = offset;
2407 * It seems that Yukon II has Tx checksum offload bug for
2408 * small TCP packets that's less than 60 bytes in size
2409 * (e.g. TCP window probe packet, pure ACK packet).
2410 * Common work around like padding with zeros to make the
2411 * frame minimum ethernet frame size didn't work at all.
2412 * Instead of disabling checksum offload completely we
2413 * resort to S/W checksum routine when we encounter short
2415 * Short UDP packets appear to be handled correctly by
2418 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2419 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2422 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2423 (ip->ip_hl << 2), offset);
2424 *(uint16_t *)(m->m_data + offset +
2425 m->m_pkthdr.csum_data) = csum;
2426 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2431 prod = sc_if->msk_cdata.msk_tx_prod;
2432 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2434 map = txd->tx_dmamap;
2436 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map,
2437 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2443 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2450 /* Check if we have a VLAN tag to insert. */
2451 if ((m->m_flags & M_VLANTAG) != 0) {
2452 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2453 tx_le->msk_addr = htole32(0);
2454 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2455 htons(m->m_pkthdr.ether_vtag));
2456 sc_if->msk_cdata.msk_tx_cnt++;
2457 MSK_INC(prod, MSK_TX_RING_CNT);
2458 control |= INS_VLAN;
2461 /* Check if we have to handle checksum offload. */
2462 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2463 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2464 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2465 & 0xffff) | ((uint32_t)tcp_offset << 16));
2466 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2467 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2468 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2470 sc_if->msk_cdata.msk_tx_cnt++;
2471 MSK_INC(prod, MSK_TX_RING_CNT);
2475 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2476 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2477 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2479 sc_if->msk_cdata.msk_tx_cnt++;
2480 MSK_INC(prod, MSK_TX_RING_CNT);
2482 for (i = 1; i < nsegs; i++) {
2483 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2484 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2485 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2486 OP_BUFFER | HW_OWNER);
2487 sc_if->msk_cdata.msk_tx_cnt++;
2488 MSK_INC(prod, MSK_TX_RING_CNT);
2490 /* Update producer index. */
2491 sc_if->msk_cdata.msk_tx_prod = prod;
2493 /* Set EOP on the last desciptor. */
2494 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2495 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2496 tx_le->msk_control |= htole32(EOP);
2498 /* Turn the first descriptor ownership to hardware. */
2499 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2500 tx_le->msk_control |= htole32(HW_OWNER);
2502 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2503 map = txd_last->tx_dmamap;
2504 txd_last->tx_dmamap = txd->tx_dmamap;
2505 txd->tx_dmamap = map;
2512 msk_start(struct ifnet *ifp)
2514 struct msk_if_softc *sc_if;
2515 struct mbuf *m_head;
2518 sc_if = ifp->if_softc;
2520 ASSERT_SERIALIZED(ifp->if_serializer);
2522 if (!sc_if->msk_link) {
2523 ifq_purge(&ifp->if_snd);
2527 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2531 while (!ifq_is_empty(&ifp->if_snd)) {
2532 if (MSK_IS_OACTIVE(sc_if)) {
2533 ifp->if_flags |= IFF_OACTIVE;
2537 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2542 * Pack the data into the transmit ring. If we
2543 * don't have room, set the OACTIVE flag and wait
2544 * for the NIC to drain the ring.
2546 if (msk_encap(sc_if, &m_head) != 0) {
2548 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2551 ifp->if_flags |= IFF_OACTIVE;
2558 * If there's a BPF listener, bounce a copy of this frame
2561 BPF_MTAP(ifp, m_head);
2566 CSR_WRITE_2(sc_if->msk_softc,
2567 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2568 sc_if->msk_cdata.msk_tx_prod);
2570 /* Set a timeout in case the chip goes out to lunch. */
2571 ifp->if_timer = MSK_TX_TIMEOUT;
2576 msk_watchdog(struct ifnet *ifp)
2578 struct msk_if_softc *sc_if = ifp->if_softc;
2582 ASSERT_SERIALIZED(ifp->if_serializer);
2584 if (sc_if->msk_link == 0) {
2586 if_printf(sc_if->msk_ifp, "watchdog timeout "
2594 * Reclaim first as there is a possibility of losing Tx completion
2597 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2598 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2599 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2600 msk_txeof(sc_if, idx);
2601 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2602 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2604 if (!ifq_is_empty(&ifp->if_snd))
2610 if_printf(ifp, "watchdog timeout\n");
2613 if (!ifq_is_empty(&ifp->if_snd))
2618 mskc_shutdown(device_t dev)
2620 struct msk_softc *sc = device_get_softc(dev);
2623 lwkt_serialize_enter(&sc->msk_serializer);
2625 for (i = 0; i < sc->msk_num_port; i++) {
2626 if (sc->msk_if[i] != NULL)
2627 msk_stop(sc->msk_if[i]);
2630 /* Disable all interrupts. */
2631 CSR_WRITE_4(sc, B0_IMSK, 0);
2632 CSR_READ_4(sc, B0_IMSK);
2633 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2634 CSR_READ_4(sc, B0_HWE_IMSK);
2636 /* Put hardware reset. */
2637 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2639 lwkt_serialize_exit(&sc->msk_serializer);
2644 mskc_suspend(device_t dev)
2646 struct msk_softc *sc = device_get_softc(dev);
2649 lwkt_serialize_enter(&sc->msk_serializer);
2651 for (i = 0; i < sc->msk_num_port; i++) {
2652 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2653 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
2654 msk_stop(sc->msk_if[i]);
2657 /* Disable all interrupts. */
2658 CSR_WRITE_4(sc, B0_IMSK, 0);
2659 CSR_READ_4(sc, B0_IMSK);
2660 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2661 CSR_READ_4(sc, B0_HWE_IMSK);
2663 mskc_phy_power(sc, MSK_PHY_POWERDOWN);
2665 /* Put hardware reset. */
2666 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2667 sc->msk_suspended = 1;
2669 lwkt_serialize_exit(&sc->msk_serializer);
2675 mskc_resume(device_t dev)
2677 struct msk_softc *sc = device_get_softc(dev);
2680 lwkt_serialize_enter(&sc->msk_serializer);
2683 for (i = 0; i < sc->msk_num_port; i++) {
2684 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2685 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2686 msk_init(sc->msk_if[i]);
2688 sc->msk_suspended = 0;
2690 lwkt_serialize_exit(&sc->msk_serializer);
2696 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len,
2697 struct mbuf_chain *chain)
2701 struct msk_rxdesc *rxd;
2704 ifp = sc_if->msk_ifp;
2706 cons = sc_if->msk_cdata.msk_rx_cons;
2708 rxlen = status >> 16;
2709 if ((status & GMR_FS_VLAN) != 0 &&
2710 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2711 rxlen -= EVL_ENCAPLEN;
2712 if (len > sc_if->msk_framesize ||
2713 ((status & GMR_FS_ANY_ERR) != 0) ||
2714 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2715 /* Don't count flow-control packet as errors. */
2716 if ((status & GMR_FS_GOOD_FC) == 0)
2718 msk_discard_rxbuf(sc_if, cons);
2721 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2723 if (msk_newbuf(sc_if, cons, 0) != 0) {
2725 /* Reuse old buffer. */
2726 msk_discard_rxbuf(sc_if, cons);
2729 m->m_pkthdr.rcvif = ifp;
2730 m->m_pkthdr.len = m->m_len = len;
2733 /* Check for VLAN tagged packets. */
2734 if ((status & GMR_FS_VLAN) != 0 &&
2735 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2736 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2737 m->m_flags |= M_VLANTAG;
2741 ether_input_chain(ifp, m, NULL, chain);
2744 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2745 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2750 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2754 struct msk_rxdesc *jrxd;
2757 ifp = sc_if->msk_ifp;
2759 MSK_IF_LOCK_ASSERT(sc_if);
2761 cons = sc_if->msk_cdata.msk_rx_cons;
2763 rxlen = status >> 16;
2764 if ((status & GMR_FS_VLAN) != 0 &&
2765 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2766 rxlen -= ETHER_VLAN_ENCAP_LEN;
2767 if (len > sc_if->msk_framesize ||
2768 ((status & GMR_FS_ANY_ERR) != 0) ||
2769 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2770 /* Don't count flow-control packet as errors. */
2771 if ((status & GMR_FS_GOOD_FC) == 0)
2773 msk_discard_jumbo_rxbuf(sc_if, cons);
2776 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2778 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2780 /* Reuse old buffer. */
2781 msk_discard_jumbo_rxbuf(sc_if, cons);
2784 m->m_pkthdr.rcvif = ifp;
2785 m->m_pkthdr.len = m->m_len = len;
2787 /* Check for VLAN tagged packets. */
2788 if ((status & GMR_FS_VLAN) != 0 &&
2789 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2790 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2791 m->m_flags |= M_VLANTAG;
2793 MSK_IF_UNLOCK(sc_if);
2794 (*ifp->if_input)(ifp, m);
2798 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2799 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2804 msk_txeof(struct msk_if_softc *sc_if, int idx)
2806 struct msk_txdesc *txd;
2807 struct msk_tx_desc *cur_tx;
2812 ifp = sc_if->msk_ifp;
2815 * Go through our tx ring and free mbufs for those
2816 * frames that have been sent.
2818 cons = sc_if->msk_cdata.msk_tx_cons;
2820 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2821 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2824 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2825 control = le32toh(cur_tx->msk_control);
2826 sc_if->msk_cdata.msk_tx_cnt--;
2827 if ((control & EOP) == 0)
2829 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2830 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2833 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2840 sc_if->msk_cdata.msk_tx_cons = cons;
2841 if (!MSK_IS_OACTIVE(sc_if))
2842 ifp->if_flags &= ~IFF_OACTIVE;
2843 if (sc_if->msk_cdata.msk_tx_cnt == 0)
2845 /* No need to sync LEs as we didn't update LEs. */
2850 msk_tick(void *xsc_if)
2852 struct msk_if_softc *sc_if = xsc_if;
2853 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2854 struct mii_data *mii;
2856 lwkt_serialize_enter(ifp->if_serializer);
2858 mii = device_get_softc(sc_if->msk_miibus);
2861 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
2863 lwkt_serialize_exit(ifp->if_serializer);
2867 msk_intr_phy(struct msk_if_softc *sc_if)
2871 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2872 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2873 /* Handle FIFO Underrun/Overflow? */
2874 if (status & PHY_M_IS_FIFO_ERROR) {
2875 device_printf(sc_if->msk_if_dev,
2876 "PHY FIFO underrun/overflow.\n");
2881 msk_intr_gmac(struct msk_if_softc *sc_if)
2883 struct msk_softc *sc;
2886 sc = sc_if->msk_softc;
2887 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
2889 /* GMAC Rx FIFO overrun. */
2890 if ((status & GM_IS_RX_FF_OR) != 0) {
2891 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
2893 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
2895 /* GMAC Tx FIFO underrun. */
2896 if ((status & GM_IS_TX_FF_UR) != 0) {
2897 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
2899 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
2902 * In case of Tx underrun, we may need to flush/reset
2903 * Tx MAC but that would also require resynchronization
2904 * with status LEs. Reintializing status LEs would
2905 * affect other port in dual MAC configuration so it
2906 * should be avoided as possible as we can.
2907 * Due to lack of documentation it's all vague guess but
2908 * it needs more investigation.
2914 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
2916 struct msk_softc *sc;
2918 sc = sc_if->msk_softc;
2919 if ((status & Y2_IS_PAR_RD1) != 0) {
2920 device_printf(sc_if->msk_if_dev,
2921 "RAM buffer read parity error\n");
2923 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
2926 if ((status & Y2_IS_PAR_WR1) != 0) {
2927 device_printf(sc_if->msk_if_dev,
2928 "RAM buffer write parity error\n");
2930 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
2933 if ((status & Y2_IS_PAR_MAC1) != 0) {
2934 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
2936 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
2939 if ((status & Y2_IS_PAR_RX1) != 0) {
2940 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
2942 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
2944 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
2945 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
2947 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
2952 mskc_intr_hwerr(struct msk_softc *sc)
2955 uint32_t tlphead[4];
2957 status = CSR_READ_4(sc, B0_HWE_ISRC);
2958 /* Time Stamp timer overflow. */
2959 if ((status & Y2_IS_TIST_OV) != 0)
2960 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2961 if ((status & Y2_IS_PCI_NEXP) != 0) {
2963 * PCI Express Error occured which is not described in PEX
2965 * This error is also mapped either to Master Abort(
2966 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
2967 * can only be cleared there.
2969 device_printf(sc->msk_dev,
2970 "PCI Express protocol violation error\n");
2973 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
2976 if ((status & Y2_IS_MST_ERR) != 0)
2977 device_printf(sc->msk_dev,
2978 "unexpected IRQ Status error\n");
2980 device_printf(sc->msk_dev,
2981 "unexpected IRQ Master error\n");
2982 /* Reset all bits in the PCI status register. */
2983 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
2984 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2985 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
2986 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
2987 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
2988 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2991 /* Check for PCI Express Uncorrectable Error. */
2992 if ((status & Y2_IS_PCI_EXP) != 0) {
2996 * On PCI Express bus bridges are called root complexes (RC).
2997 * PCI Express errors are recognized by the root complex too,
2998 * which requests the system to handle the problem. After
2999 * error occurence it may be that no access to the adapter
3000 * may be performed any longer.
3003 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3004 if ((v32 & PEX_UNSUP_REQ) != 0) {
3005 /* Ignore unsupported request error. */
3007 device_printf(sc->msk_dev,
3008 "Uncorrectable PCI Express error\n");
3011 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3014 /* Get TLP header form Log Registers. */
3015 for (i = 0; i < 4; i++)
3016 tlphead[i] = CSR_PCI_READ_4(sc,
3017 PEX_HEADER_LOG + i * 4);
3018 /* Check for vendor defined broadcast message. */
3019 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3020 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3021 CSR_WRITE_4(sc, B0_HWE_IMSK,
3022 sc->msk_intrhwemask);
3023 CSR_READ_4(sc, B0_HWE_IMSK);
3026 /* Clear the interrupt. */
3027 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3028 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3029 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3032 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3033 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3034 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3035 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3038 static __inline void
3039 msk_rxput(struct msk_if_softc *sc_if)
3041 struct msk_softc *sc;
3043 sc = sc_if->msk_softc;
3045 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3047 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3048 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3049 BUS_DMASYNC_PREWRITE);
3052 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3053 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3057 mskc_handle_events(struct msk_softc *sc)
3059 struct msk_if_softc *sc_if;
3061 struct msk_stat_desc *sd;
3062 uint32_t control, status;
3063 int cons, idx, len, port, rxprog;
3064 struct mbuf_chain chain[MAXCPU];
3066 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3067 if (idx == sc->msk_stat_cons)
3070 ether_input_chain_init(chain);
3072 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3075 for (cons = sc->msk_stat_cons; cons != idx;) {
3076 sd = &sc->msk_stat_ring[cons];
3077 control = le32toh(sd->msk_control);
3078 if ((control & HW_OWNER) == 0)
3081 * Marvell's FreeBSD driver updates status LE after clearing
3082 * HW_OWNER. However we don't have a way to sync single LE
3083 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3084 * an entire DMA map. So don't sync LE until we have a better
3087 control &= ~HW_OWNER;
3088 sd->msk_control = htole32(control);
3089 status = le32toh(sd->msk_status);
3090 len = control & STLE_LEN_MASK;
3091 port = (control >> 16) & 0x01;
3092 sc_if = sc->msk_if[port];
3093 if (sc_if == NULL) {
3094 device_printf(sc->msk_dev, "invalid port opcode "
3095 "0x%08x\n", control & STLE_OP_MASK);
3099 switch (control & STLE_OP_MASK) {
3101 sc_if->msk_vtag = ntohs(len);
3104 sc_if->msk_vtag = ntohs(len);
3108 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3109 msk_jumbo_rxeof(sc_if, status, len);
3112 msk_rxeof(sc_if, status, len, chain);
3115 * Because there is no way to sync single Rx LE
3116 * put the DMA sync operation off until the end of
3120 /* Update prefetch unit if we've passed water mark. */
3121 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3127 if (sc->msk_if[MSK_PORT_A] != NULL) {
3128 msk_txeof(sc->msk_if[MSK_PORT_A],
3129 status & STLE_TXA1_MSKL);
3131 if (sc->msk_if[MSK_PORT_B] != NULL) {
3132 msk_txeof(sc->msk_if[MSK_PORT_B],
3133 ((status & STLE_TXA2_MSKL) >>
3135 ((len & STLE_TXA2_MSKH) <<
3140 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3141 control & STLE_OP_MASK);
3144 MSK_INC(cons, MSK_STAT_RING_CNT);
3145 if (rxprog > sc->msk_process_limit)
3150 ether_input_dispatch(chain);
3152 sc->msk_stat_cons = cons;
3153 /* XXX We should sync status LEs here. See above notes. */
3155 if (rxput[MSK_PORT_A] > 0)
3156 msk_rxput(sc->msk_if[MSK_PORT_A]);
3157 if (rxput[MSK_PORT_B] > 0)
3158 msk_rxput(sc->msk_if[MSK_PORT_B]);
3160 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3163 /* Legacy interrupt handler for shared interrupt. */
3165 mskc_intr(void *xsc)
3167 struct msk_softc *sc;
3168 struct msk_if_softc *sc_if0, *sc_if1;
3169 struct ifnet *ifp0, *ifp1;
3173 ASSERT_SERIALIZED(&sc->msk_serializer);
3175 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3176 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3177 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3178 (status & sc->msk_intrmask) == 0) {
3179 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3183 sc_if0 = sc->msk_if[MSK_PORT_A];
3184 sc_if1 = sc->msk_if[MSK_PORT_B];
3187 ifp0 = sc_if0->msk_ifp;
3189 ifp1 = sc_if1->msk_ifp;
3191 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3192 msk_intr_phy(sc_if0);
3193 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3194 msk_intr_phy(sc_if1);
3195 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3196 msk_intr_gmac(sc_if0);
3197 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3198 msk_intr_gmac(sc_if1);
3199 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3200 device_printf(sc->msk_dev, "Rx descriptor error\n");
3201 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3202 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3203 CSR_READ_4(sc, B0_IMSK);
3205 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3206 device_printf(sc->msk_dev, "Tx descriptor error\n");
3207 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3208 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3209 CSR_READ_4(sc, B0_IMSK);
3211 if ((status & Y2_IS_HW_ERR) != 0)
3212 mskc_intr_hwerr(sc);
3214 while (mskc_handle_events(sc) != 0)
3216 if ((status & Y2_IS_STAT_BMU) != 0)
3217 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3219 /* Reenable interrupts. */
3220 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3222 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
3223 !ifq_is_empty(&ifp0->if_snd))
3225 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
3226 !ifq_is_empty(&ifp1->if_snd))
3233 struct msk_if_softc *sc_if = xsc;
3234 struct msk_softc *sc = sc_if->msk_softc;
3235 struct ifnet *ifp = sc_if->msk_ifp;
3236 struct mii_data *mii;
3237 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3241 ASSERT_SERIALIZED(ifp->if_serializer);
3243 mii = device_get_softc(sc_if->msk_miibus);
3246 /* Cancel pending I/O and free all Rx/Tx buffers. */
3249 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
3250 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3251 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3253 * In Yukon EC Ultra, TSO & checksum offload is not
3254 * supported for jumbo frame.
3256 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
3257 ifp->if_capenable &= ~IFCAP_TXCSUM;
3261 * Initialize GMAC first.
3262 * Without this initialization, Rx MAC did not work as expected
3263 * and Rx MAC garbled status LEs and it resulted in out-of-order
3264 * or duplicated frame delivery which in turn showed very poor
3265 * Rx performance.(I had to write a packet analysis code that
3266 * could be embeded in driver to diagnose this issue.)
3267 * I've spent almost 2 months to fix this issue. If I have had
3268 * datasheet for Yukon II I wouldn't have encountered this. :-(
3270 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3271 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3273 /* Dummy read the Interrupt Source Register. */
3274 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3276 /* Set MIB Clear Counter Mode. */
3277 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3278 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3279 /* Read all MIB Counters with Clear Mode set. */
3280 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3281 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3282 /* Clear MIB Clear Counter Mode. */
3283 gmac &= ~GM_PAR_MIB_CLR;
3284 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3287 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3289 /* Setup Transmit Control Register. */
3290 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3292 /* Setup Transmit Flow Control Register. */
3293 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3295 /* Setup Transmit Parameter Register. */
3296 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3297 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3298 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3300 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3301 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3303 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3304 gmac |= GM_SMOD_JUMBO_ENA;
3305 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3307 /* Set station address. */
3308 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3309 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3310 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3312 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3313 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3316 /* Disable interrupts for counter overflows. */
3317 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3318 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3319 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3321 /* Configure Rx MAC FIFO. */
3322 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3323 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3324 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3325 GMF_OPER_ON | GMF_RX_F_FL_ON);
3327 /* Set receive filter. */
3328 msk_rxfilter(sc_if);
3330 /* Flush Rx MAC FIFO on any flow control or error. */
3331 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3335 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3336 * due to hardware hang on receipt of pause frames.
3338 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3339 RX_GMF_FL_THR_DEF + 1);
3341 /* Configure Tx MAC FIFO. */
3342 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3343 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3344 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3346 /* Configure hardware VLAN tag insertion/stripping. */
3347 msk_setvlan(sc_if, ifp);
3349 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
3350 /* Set Rx Pause threshould. */
3351 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3353 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3355 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3357 * Set Tx GMAC FIFO Almost Empty Threshold.
3359 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3360 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3361 /* Disable Store & Forward mode for Tx. */
3362 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3363 TX_JUMBO_ENA | TX_STFW_DIS);
3365 /* Enable Store & Forward mode for Tx. */
3366 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3367 TX_JUMBO_DIS | TX_STFW_ENA);
3372 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3373 * arbiter as we don't use Sync Tx queue.
3375 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3376 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3377 /* Enable the RAM Interface Arbiter. */
3378 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3380 /* Setup RAM buffer. */
3381 msk_set_rambuffer(sc_if);
3383 /* Disable Tx sync Queue. */
3384 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3386 /* Setup Tx Queue Bus Memory Interface. */
3387 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3388 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3389 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3390 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3391 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3392 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3393 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3394 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3397 /* Setup Rx Queue Bus Memory Interface. */
3398 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3399 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3400 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3401 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3402 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3403 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3404 /* MAC Rx RAM Read is controlled by hardware. */
3405 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3408 msk_set_prefetch(sc, sc_if->msk_txq,
3409 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3410 msk_init_tx_ring(sc_if);
3412 /* Disable Rx checksum offload and RSS hash. */
3413 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3414 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3416 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3417 msk_set_prefetch(sc, sc_if->msk_rxq,
3418 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3419 MSK_JUMBO_RX_RING_CNT - 1);
3420 error = msk_init_jumbo_rx_ring(sc_if);
3424 msk_set_prefetch(sc, sc_if->msk_rxq,
3425 sc_if->msk_rdata.msk_rx_ring_paddr,
3426 MSK_RX_RING_CNT - 1);
3427 error = msk_init_rx_ring(sc_if);
3430 device_printf(sc_if->msk_if_dev,
3431 "initialization failed: no memory for Rx buffers\n");
3436 /* Configure interrupt handling. */
3437 if (sc_if->msk_port == MSK_PORT_A) {
3438 sc->msk_intrmask |= Y2_IS_PORT_A;
3439 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3441 sc->msk_intrmask |= Y2_IS_PORT_B;
3442 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3444 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3445 CSR_READ_4(sc, B0_HWE_IMSK);
3446 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3447 CSR_READ_4(sc, B0_IMSK);
3449 sc_if->msk_link = 0;
3452 mskc_set_imtimer(sc);
3454 ifp->if_flags |= IFF_RUNNING;
3455 ifp->if_flags &= ~IFF_OACTIVE;
3457 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3461 msk_set_rambuffer(struct msk_if_softc *sc_if)
3463 struct msk_softc *sc;
3466 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3469 sc = sc_if->msk_softc;
3471 /* Setup Rx Queue. */
3472 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3473 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3474 sc->msk_rxqstart[sc_if->msk_port] / 8);
3475 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3476 sc->msk_rxqend[sc_if->msk_port] / 8);
3477 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3478 sc->msk_rxqstart[sc_if->msk_port] / 8);
3479 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3480 sc->msk_rxqstart[sc_if->msk_port] / 8);
3482 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3483 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3484 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3485 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3486 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3487 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3488 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3489 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3490 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3492 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3493 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3495 /* Setup Tx Queue. */
3496 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3497 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3498 sc->msk_txqstart[sc_if->msk_port] / 8);
3499 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3500 sc->msk_txqend[sc_if->msk_port] / 8);
3501 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3502 sc->msk_txqstart[sc_if->msk_port] / 8);
3503 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3504 sc->msk_txqstart[sc_if->msk_port] / 8);
3505 /* Enable Store & Forward for Tx side. */
3506 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3507 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3508 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3512 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3516 /* Reset the prefetch unit. */
3517 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3519 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3521 /* Set LE base address. */
3522 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3524 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3526 /* Set the list last index. */
3527 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3529 /* Turn on prefetch unit. */
3530 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3532 /* Dummy read to ensure write. */
3533 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3537 msk_stop(struct msk_if_softc *sc_if)
3539 struct msk_softc *sc = sc_if->msk_softc;
3540 struct ifnet *ifp = sc_if->msk_ifp;
3541 struct msk_txdesc *txd;
3542 struct msk_rxdesc *rxd;
3544 struct msk_rxdesc *jrxd;
3549 ASSERT_SERIALIZED(ifp->if_serializer);
3551 callout_stop(&sc_if->msk_tick_ch);
3554 /* Disable interrupts. */
3555 if (sc_if->msk_port == MSK_PORT_A) {
3556 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3557 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3559 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3560 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3562 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3563 CSR_READ_4(sc, B0_HWE_IMSK);
3564 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3565 CSR_READ_4(sc, B0_IMSK);
3567 /* Disable Tx/Rx MAC. */
3568 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3569 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3570 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3571 /* Read again to ensure writing. */
3572 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3575 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3576 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3577 for (i = 0; i < MSK_TIMEOUT; i++) {
3578 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3579 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3581 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3586 if (i == MSK_TIMEOUT)
3587 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3588 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3589 RB_RST_SET | RB_DIS_OP_MD);
3591 /* Disable all GMAC interrupt. */
3592 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3593 /* Disable PHY interrupt. */
3594 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3596 /* Disable the RAM Interface Arbiter. */
3597 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3599 /* Reset the PCI FIFO of the async Tx queue */
3600 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3601 BMU_RST_SET | BMU_FIFO_RST);
3603 /* Reset the Tx prefetch units. */
3604 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3607 /* Reset the RAM Buffer async Tx queue. */
3608 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3610 /* Reset Tx MAC FIFO. */
3611 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3612 /* Set Pause Off. */
3613 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3616 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3617 * reach the end of packet and since we can't make sure that we have
3618 * incoming data, we must reset the BMU while it is not during a DMA
3619 * transfer. Since it is possible that the Rx path is still active,
3620 * the Rx RAM buffer will be stopped first, so any possible incoming
3621 * data will not trigger a DMA. After the RAM buffer is stopped, the
3622 * BMU is polled until any DMA in progress is ended and only then it
3626 /* Disable the RAM Buffer receive queue. */
3627 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3628 for (i = 0; i < MSK_TIMEOUT; i++) {
3629 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3630 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3634 if (i == MSK_TIMEOUT)
3635 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3636 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3637 BMU_RST_SET | BMU_FIFO_RST);
3638 /* Reset the Rx prefetch unit. */
3639 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3641 /* Reset the RAM Buffer receive queue. */
3642 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3643 /* Reset Rx MAC FIFO. */
3644 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3646 /* Free Rx and Tx mbufs still in the queues. */
3647 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3648 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3649 if (rxd->rx_m != NULL) {
3650 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3657 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3658 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3659 if (jrxd->rx_m != NULL) {
3660 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3661 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3662 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3664 m_freem(jrxd->rx_m);
3669 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3670 txd = &sc_if->msk_cdata.msk_txdesc[i];
3671 if (txd->tx_m != NULL) {
3672 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3680 * Mark the interface down.
3682 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3683 sc_if->msk_link = 0;
3687 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
3689 return sysctl_int_range(oidp, arg1, arg2, req,
3690 MSK_PROC_MIN, MSK_PROC_MAX);
3694 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3696 struct msk_softc *sc = arg1;
3697 struct lwkt_serialize *serializer = &sc->msk_serializer;
3700 lwkt_serialize_enter(serializer);
3702 v = sc->msk_intr_rate;
3703 error = sysctl_handle_int(oidp, &v, 0, req);
3704 if (error || req->newptr == NULL)
3711 if (sc->msk_intr_rate != v) {
3714 sc->msk_intr_rate = v;
3715 for (i = 0; i < 2; ++i) {
3716 if (sc->msk_if[i] != NULL) {
3717 flag |= sc->msk_if[i]->
3718 arpcom.ac_if.if_flags & IFF_RUNNING;
3722 mskc_set_imtimer(sc);
3725 lwkt_serialize_exit(serializer);
3730 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
3731 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
3733 struct msk_if_softc *sc_if = device_get_softc(dev);
3737 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag,
3739 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3740 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
3742 device_printf(dev, "can't create coherent DMA memory\n");
3746 *dtag = dmem.dmem_tag;
3747 *dmap = dmem.dmem_map;
3748 *addr = dmem.dmem_addr;
3749 *paddr = dmem.dmem_busaddr;
3755 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
3758 bus_dmamap_unload(dtag, dmap);
3759 bus_dmamem_free(dtag, addr, dmap);
3760 bus_dma_tag_destroy(dtag);
3765 mskc_set_imtimer(struct msk_softc *sc)
3767 if (sc->msk_intr_rate > 0) {
3769 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3770 * and 78.125MHz for rest of chip types
3772 CSR_WRITE_4(sc, B2_IRQM_INI,
3773 MSK_USECS(sc, 1000000 / sc->msk_intr_rate));
3774 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3775 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START);
3777 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP);