1 /******************************************************************************
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
9 *****************************************************************************/
11 /******************************************************************************
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 *****************************************************************************/
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
96 /* $DragonFly: src/sys/dev/netif/msk/if_msk.c,v 1.5 2008/06/26 13:08:55 sephe Exp $ */
99 * Device driver for the Marvell Yukon II Ethernet controller.
100 * Due to lack of documentation, this driver is based on the code from
101 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
103 #include "opt_ethernet.h"
105 #include <sys/param.h>
106 #include <sys/endian.h>
107 #include <sys/kernel.h>
109 #include <sys/in_cksum.h>
110 #include <sys/interrupt.h>
111 #include <sys/malloc.h>
112 #include <sys/proc.h>
113 #include <sys/rman.h>
114 #include <sys/serialize.h>
115 #include <sys/socket.h>
116 #include <sys/sockio.h>
117 #include <sys/sysctl.h>
119 #include <net/ethernet.h>
122 #include <net/if_arp.h>
123 #include <net/if_dl.h>
124 #include <net/if_media.h>
125 #include <net/ifq_var.h>
126 #include <net/vlan/if_vlan_var.h>
128 #include <netinet/ip.h>
129 #include <netinet/ip_var.h>
131 #include <dev/netif/mii_layer/miivar.h>
133 #include <bus/pci/pcireg.h>
134 #include <bus/pci/pcivar.h>
136 #include "if_mskreg.h"
138 /* "device miibus" required. See GENERIC if you get errors here. */
139 #include "miibus_if.h"
141 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
144 * Devices supported by this driver.
146 static const struct msk_product {
147 uint16_t msk_vendorid;
148 uint16_t msk_deviceid;
149 const char *msk_name;
151 { VENDORID_SK, DEVICEID_SK_YUKON2,
152 "SK-9Sxx Gigabit Ethernet" },
153 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
154 "SK-9Exx Gigabit Ethernet"},
155 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
156 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
157 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
158 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
159 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
160 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
161 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
162 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
163 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
164 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
165 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
166 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
167 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
168 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
169 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
170 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
171 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
172 "Marvell Yukon 88E8035 Gigabit Ethernet" },
173 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
174 "Marvell Yukon 88E8036 Gigabit Ethernet" },
175 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
176 "Marvell Yukon 88E8038 Gigabit Ethernet" },
177 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
178 "Marvell Yukon 88E8039 Gigabit Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
180 "Marvell Yukon 88E8050 Gigabit Ethernet" },
181 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
182 "Marvell Yukon 88E8052 Gigabit Ethernet" },
183 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
184 "Marvell Yukon 88E8053 Gigabit Ethernet" },
185 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
186 "Marvell Yukon 88E8055 Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
188 "Marvell Yukon 88E8056 Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
190 "Marvell Yukon 88E8058 Gigabit Ethernet" },
191 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
192 "D-Link 550SX Gigabit Ethernet" },
193 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
194 "D-Link 560T Gigabit Ethernet" },
198 static const char *model_name[] = {
206 static int mskc_probe(device_t);
207 static int mskc_attach(device_t);
208 static int mskc_detach(device_t);
209 static int mskc_shutdown(device_t);
210 static int mskc_suspend(device_t);
211 static int mskc_resume(device_t);
212 static void mskc_intr(void *);
214 static void mskc_reset(struct msk_softc *);
215 static void mskc_set_imtimer(struct msk_softc *);
216 static void mskc_intr_hwerr(struct msk_softc *);
217 static int mskc_handle_events(struct msk_softc *);
218 static void mskc_phy_power(struct msk_softc *, int);
219 static int mskc_setup_rambuffer(struct msk_softc *);
220 static int mskc_status_dma_alloc(struct msk_softc *);
221 static void mskc_status_dma_free(struct msk_softc *);
222 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
223 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
225 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int);
227 static int msk_probe(device_t);
228 static int msk_attach(device_t);
229 static int msk_detach(device_t);
230 static int msk_miibus_readreg(device_t, int, int);
231 static int msk_miibus_writereg(device_t, int, int, int);
232 static void msk_miibus_statchg(device_t);
234 static void msk_init(void *);
235 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
236 static void msk_start(struct ifnet *);
237 static void msk_watchdog(struct ifnet *);
238 static int msk_mediachange(struct ifnet *);
239 static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
241 static void msk_tick(void *);
242 static void msk_intr_phy(struct msk_if_softc *);
243 static void msk_intr_gmac(struct msk_if_softc *);
245 msk_rxput(struct msk_if_softc *);
246 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
247 static void msk_rxeof(struct msk_if_softc *, uint32_t, int,
248 struct mbuf_chain *);
249 static void msk_txeof(struct msk_if_softc *, int);
250 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
251 static void msk_set_rambuffer(struct msk_if_softc *);
252 static void msk_stop(struct msk_if_softc *);
254 static void msk_dmamap_cb(void *, bus_dma_segment_t *, int, int);
255 static void msk_dmamap_mbuf_cb(void *, bus_dma_segment_t *, int,
257 static int msk_txrx_dma_alloc(struct msk_if_softc *);
258 static void msk_txrx_dma_free(struct msk_if_softc *);
259 static int msk_init_rx_ring(struct msk_if_softc *);
260 static void msk_init_tx_ring(struct msk_if_softc *);
262 msk_discard_rxbuf(struct msk_if_softc *, int);
263 static int msk_newbuf(struct msk_if_softc *, int);
265 msk_defrag(struct mbuf *, int, int);
266 static int msk_encap(struct msk_if_softc *, struct mbuf **);
269 static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
270 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
271 static int msk_jumbo_newbuf(struct msk_if_softc *, int);
272 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
273 static void *msk_jalloc(struct msk_if_softc *);
274 static void msk_jfree(void *, void *);
277 static int msk_phy_readreg(struct msk_if_softc *, int, int);
278 static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
280 static void msk_setmulti(struct msk_if_softc *);
281 static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
282 static void msk_setpromisc(struct msk_if_softc *);
284 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
285 void **, bus_addr_t *, bus_dmamap_t *);
286 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
288 static device_method_t mskc_methods[] = {
289 /* Device interface */
290 DEVMETHOD(device_probe, mskc_probe),
291 DEVMETHOD(device_attach, mskc_attach),
292 DEVMETHOD(device_detach, mskc_detach),
293 DEVMETHOD(device_suspend, mskc_suspend),
294 DEVMETHOD(device_resume, mskc_resume),
295 DEVMETHOD(device_shutdown, mskc_shutdown),
298 DEVMETHOD(bus_print_child, bus_generic_print_child),
299 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
304 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
305 static devclass_t mskc_devclass;
307 static device_method_t msk_methods[] = {
308 /* Device interface */
309 DEVMETHOD(device_probe, msk_probe),
310 DEVMETHOD(device_attach, msk_attach),
311 DEVMETHOD(device_detach, msk_detach),
312 DEVMETHOD(device_shutdown, bus_generic_shutdown),
315 DEVMETHOD(bus_print_child, bus_generic_print_child),
316 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
319 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
320 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
321 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
326 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
327 static devclass_t msk_devclass;
329 DECLARE_DUMMY_MODULE(if_msk);
330 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, 0, 0);
331 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, 0, 0);
332 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, 0, 0);
334 static int mskc_intr_rate = 0;
335 static int mskc_process_limit = MSK_PROC_DEFAULT;
337 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
338 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
341 msk_miibus_readreg(device_t dev, int phy, int reg)
343 struct msk_if_softc *sc_if;
345 if (phy != PHY_ADDR_MARV)
348 sc_if = device_get_softc(dev);
350 return (msk_phy_readreg(sc_if, phy, reg));
354 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
356 struct msk_softc *sc;
359 sc = sc_if->msk_softc;
361 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
362 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
364 for (i = 0; i < MSK_TIMEOUT; i++) {
366 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
367 if ((val & GM_SMI_CT_RD_VAL) != 0) {
368 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
373 if (i == MSK_TIMEOUT) {
374 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
382 msk_miibus_writereg(device_t dev, int phy, int reg, int val)
384 struct msk_if_softc *sc_if;
386 if (phy != PHY_ADDR_MARV)
389 sc_if = device_get_softc(dev);
391 return (msk_phy_writereg(sc_if, phy, reg, val));
395 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
397 struct msk_softc *sc;
400 sc = sc_if->msk_softc;
402 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
403 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
404 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
405 for (i = 0; i < MSK_TIMEOUT; i++) {
407 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
408 GM_SMI_CT_BUSY) == 0)
411 if (i == MSK_TIMEOUT)
412 if_printf(sc_if->msk_ifp, "phy write timeout\n");
418 msk_miibus_statchg(device_t dev)
420 struct msk_if_softc *sc_if;
421 struct msk_softc *sc;
422 struct mii_data *mii;
426 sc_if = device_get_softc(dev);
427 sc = sc_if->msk_softc;
429 mii = device_get_softc(sc_if->msk_miibus);
430 ifp = sc_if->msk_ifp;
432 if (mii->mii_media_status & IFM_ACTIVE) {
433 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE)
438 if (sc_if->msk_link != 0) {
439 /* Enable Tx FIFO Underrun. */
440 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
441 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
443 * Because mii(4) notify msk(4) that it detected link status
444 * change, there is no need to enable automatic
445 * speed/flow-control/duplex updates.
447 gmac = GM_GPCR_AU_ALL_DIS;
448 switch (IFM_SUBTYPE(mii->mii_media_active)) {
451 gmac |= GM_GPCR_SPEED_1000;
454 gmac |= GM_GPCR_SPEED_100;
460 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0)
461 gmac |= GM_GPCR_DUP_FULL;
462 /* Disable Rx flow control. */
463 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
464 gmac |= GM_GPCR_FC_RX_DIS;
465 /* Disable Tx flow control. */
466 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
467 gmac |= GM_GPCR_FC_TX_DIS;
468 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
469 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
470 /* Read again to ensure writing. */
471 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
474 if (((mii->mii_media_active & IFM_GMASK) &
475 (IFM_FLAG0 | IFM_FLAG1)) == 0)
476 gmac = GMC_PAUSE_OFF;
477 /* Diable pause for 10/100 Mbps in half-duplex mode. */
478 if ((((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) &&
479 (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX ||
480 IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T))
481 gmac = GMC_PAUSE_OFF;
482 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
484 /* Enable PHY interrupt for FIFO underrun/overflow. */
485 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
486 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
489 * Link state changed to down.
490 * Disable PHY interrupts.
492 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
493 /* Disable Rx/Tx MAC. */
494 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
495 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
496 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
497 /* Read again to ensure writing. */
498 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
503 msk_setmulti(struct msk_if_softc *sc_if)
505 struct msk_softc *sc;
507 struct ifmultiaddr *ifma;
512 sc = sc_if->msk_softc;
513 ifp = sc_if->msk_ifp;
515 bzero(mchash, sizeof(mchash));
516 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
517 mode |= GM_RXCR_UCF_ENA;
518 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
519 if ((ifp->if_flags & IFF_PROMISC) != 0)
520 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
521 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
526 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
527 if (ifma->ifma_addr->sa_family != AF_LINK)
529 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
530 ifma->ifma_addr), ETHER_ADDR_LEN);
531 /* Just want the 6 least significant bits. */
533 /* Set the corresponding bit in the hash table. */
534 mchash[crc >> 5] |= 1 << (crc & 0x1f);
536 mode |= GM_RXCR_MCF_ENA;
539 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
541 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
542 (mchash[0] >> 16) & 0xffff);
543 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
545 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
546 (mchash[1] >> 16) & 0xffff);
547 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
551 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
553 struct msk_softc *sc;
555 sc = sc_if->msk_softc;
556 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
557 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
559 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
562 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
564 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
570 msk_setpromisc(struct msk_if_softc *sc_if)
572 struct msk_softc *sc;
576 sc = sc_if->msk_softc;
577 ifp = sc_if->msk_ifp;
579 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
580 if (ifp->if_flags & IFF_PROMISC)
581 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
583 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
584 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
588 msk_init_rx_ring(struct msk_if_softc *sc_if)
590 struct msk_ring_data *rd;
591 struct msk_rxdesc *rxd;
594 sc_if->msk_cdata.msk_rx_cons = 0;
595 sc_if->msk_cdata.msk_rx_prod = 0;
596 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
598 rd = &sc_if->msk_rdata;
599 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
600 prod = sc_if->msk_cdata.msk_rx_prod;
601 for (i = 0; i < MSK_RX_RING_CNT; i++) {
602 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
604 rxd->rx_le = &rd->msk_rx_ring[prod];
605 if (msk_newbuf(sc_if, prod) != 0)
607 MSK_INC(prod, MSK_RX_RING_CNT);
610 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_ring_tag,
611 sc_if->msk_cdata.msk_rx_ring_map, BUS_DMASYNC_PREWRITE);
613 /* Update prefetch unit. */
614 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
615 CSR_WRITE_2(sc_if->msk_softc,
616 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
617 sc_if->msk_cdata.msk_rx_prod);
624 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
626 struct msk_ring_data *rd;
627 struct msk_rxdesc *rxd;
630 MSK_IF_LOCK_ASSERT(sc_if);
632 sc_if->msk_cdata.msk_rx_cons = 0;
633 sc_if->msk_cdata.msk_rx_prod = 0;
634 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
636 rd = &sc_if->msk_rdata;
637 bzero(rd->msk_jumbo_rx_ring,
638 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
639 prod = sc_if->msk_cdata.msk_rx_prod;
640 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
641 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
643 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
644 if (msk_jumbo_newbuf(sc_if, prod) != 0)
646 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
649 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
650 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
653 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
654 CSR_WRITE_2(sc_if->msk_softc,
655 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
656 sc_if->msk_cdata.msk_rx_prod);
663 msk_init_tx_ring(struct msk_if_softc *sc_if)
665 struct msk_ring_data *rd;
666 struct msk_txdesc *txd;
669 sc_if->msk_cdata.msk_tx_prod = 0;
670 sc_if->msk_cdata.msk_tx_cons = 0;
671 sc_if->msk_cdata.msk_tx_cnt = 0;
673 rd = &sc_if->msk_rdata;
674 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
675 for (i = 0; i < MSK_TX_RING_CNT; i++) {
676 txd = &sc_if->msk_cdata.msk_txdesc[i];
678 txd->tx_le = &rd->msk_tx_ring[i];
681 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
682 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE);
686 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
688 struct msk_rx_desc *rx_le;
689 struct msk_rxdesc *rxd;
692 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
695 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
700 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
702 struct msk_rx_desc *rx_le;
703 struct msk_rxdesc *rxd;
706 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
709 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
714 msk_newbuf(struct msk_if_softc *sc_if, int idx)
716 struct msk_rx_desc *rx_le;
717 struct msk_rxdesc *rxd;
719 struct msk_dmamap_arg ctx;
720 bus_dma_segment_t seg;
723 m = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR);
727 m->m_len = m->m_pkthdr.len = MCLBYTES;
728 m_adj(m, ETHER_ALIGN);
730 bzero(&ctx, sizeof(ctx));
733 if (bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_rx_tag,
734 sc_if->msk_cdata.msk_rx_sparemap, m, msk_dmamap_mbuf_cb, &ctx,
735 BUS_DMA_NOWAIT) != 0) {
739 KASSERT(ctx.nseg == 1,
740 ("%s: %d segments returned!", __func__, ctx.nseg));
742 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
743 if (rxd->rx_m != NULL) {
744 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
745 BUS_DMASYNC_POSTREAD);
746 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
748 map = rxd->rx_dmamap;
749 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
750 sc_if->msk_cdata.msk_rx_sparemap = map;
751 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
752 BUS_DMASYNC_PREREAD);
755 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
757 htole32(seg.ds_len | OP_PACKET | HW_OWNER);
764 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
766 struct msk_rx_desc *rx_le;
767 struct msk_rxdesc *rxd;
769 bus_dma_segment_t segs[1];
774 MGETHDR(m, M_DONTWAIT, MT_DATA);
777 buf = msk_jalloc(sc_if);
782 /* Attach the buffer to the mbuf. */
783 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
785 if ((m->m_flags & M_EXT) == 0) {
789 m->m_pkthdr.len = m->m_len = MSK_JLEN;
790 m_adj(m, ETHER_ALIGN);
792 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
793 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
794 BUS_DMA_NOWAIT) != 0) {
798 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
800 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
801 if (rxd->rx_m != NULL) {
802 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
803 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
804 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
807 map = rxd->rx_dmamap;
808 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
809 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
810 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
811 BUS_DMASYNC_PREREAD);
814 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
816 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
826 msk_mediachange(struct ifnet *ifp)
828 struct msk_if_softc *sc_if = ifp->if_softc;
829 struct mii_data *mii;
831 mii = device_get_softc(sc_if->msk_miibus);
838 * Report current media status.
841 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
843 struct msk_if_softc *sc_if = ifp->if_softc;
844 struct mii_data *mii;
846 mii = device_get_softc(sc_if->msk_miibus);
849 ifmr->ifm_active = mii->mii_media_active;
850 ifmr->ifm_status = mii->mii_media_status;
854 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
856 struct msk_if_softc *sc_if;
858 struct mii_data *mii;
861 sc_if = ifp->if_softc;
862 ifr = (struct ifreq *)data;
868 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
872 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
873 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
877 ifp->if_mtu = ifr->ifr_mtu;
878 if ((ifp->if_flags & IFF_RUNNING) != 0)
886 if (ifp->if_flags & IFF_UP) {
887 if (ifp->if_flags & IFF_RUNNING) {
888 if (((ifp->if_flags ^ sc_if->msk_if_flags)
889 & IFF_PROMISC) != 0) {
890 msk_setpromisc(sc_if);
894 if (sc_if->msk_detach == 0)
898 if (ifp->if_flags & IFF_RUNNING)
901 sc_if->msk_if_flags = ifp->if_flags;
906 if (ifp->if_flags & IFF_RUNNING)
912 mii = device_get_softc(sc_if->msk_miibus);
913 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
917 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
918 if ((mask & IFCAP_TXCSUM) != 0) {
919 ifp->if_capenable ^= IFCAP_TXCSUM;
920 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
921 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
922 ifp->if_hwassist |= MSK_CSUM_FEATURES;
924 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
927 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
928 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
929 msk_setvlan(sc_if, ifp);
933 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
934 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
936 * In Yukon EC Ultra, TSO & checksum offload is not
937 * supported for jumbo frame.
939 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
940 ifp->if_capenable &= ~IFCAP_TXCSUM;
945 error = ether_ioctl(ifp, command, data);
953 mskc_probe(device_t dev)
955 const struct msk_product *mp;
956 uint16_t vendor, devid;
958 vendor = pci_get_vendor(dev);
959 devid = pci_get_device(dev);
960 for (mp = msk_products; mp->msk_name != NULL; ++mp) {
961 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
962 device_set_desc(dev, mp->msk_name);
970 mskc_setup_rambuffer(struct msk_softc *sc)
976 /* Get adapter SRAM size. */
977 val = CSR_READ_1(sc, B2_E_0);
978 sc->msk_ramsize = (val == 0) ? 128 : val * 4;
980 device_printf(sc->msk_dev,
981 "RAM buffer size : %dKB\n", sc->msk_ramsize);
984 * Give receiver 2/3 of memory and round down to the multiple
985 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
988 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
989 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
990 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
991 sc->msk_rxqstart[i] = next;
992 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
993 next = sc->msk_rxqend[i] + 1;
994 sc->msk_txqstart[i] = next;
995 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
996 next = sc->msk_txqend[i] + 1;
998 device_printf(sc->msk_dev,
999 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1000 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1002 device_printf(sc->msk_dev,
1003 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1004 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1013 mskc_phy_power(struct msk_softc *sc, int mode)
1019 case MSK_PHY_POWERUP:
1020 /* Switch power to VCC (WA for VAUX problem). */
1021 CSR_WRITE_1(sc, B0_POWER_CTRL,
1022 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1023 /* Disable Core Clock Division, set Clock Select to 0. */
1024 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1027 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1028 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1029 /* Enable bits are inverted. */
1030 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1031 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1032 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1035 * Enable PCI & Core Clock, enable clock gating for both Links.
1037 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1039 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1040 val &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
1041 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1042 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1043 /* Deassert Low Power for 1st PHY. */
1044 val |= PCI_Y2_PHY1_COMA;
1045 if (sc->msk_num_port > 1)
1046 val |= PCI_Y2_PHY2_COMA;
1047 } else if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
1050 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
1052 /* Enable all clocks. */
1053 pci_write_config(sc->msk_dev, PCI_OUR_REG_3, 0, 4);
1054 our = pci_read_config(sc->msk_dev, PCI_OUR_REG_4, 4);
1055 our &= (PCI_FORCE_ASPM_REQUEST|PCI_ASPM_GPHY_LINK_DOWN|
1056 PCI_ASPM_INT_FIFO_EMPTY|PCI_ASPM_CLKRUN_REQUEST);
1057 /* Set all bits to 0 except bits 15..12. */
1058 pci_write_config(sc->msk_dev, PCI_OUR_REG_4, our, 4);
1059 /* Set to default value. */
1060 pci_write_config(sc->msk_dev, PCI_OUR_REG_5, 0, 4);
1062 /* Release PHY from PowerDown/COMA mode. */
1063 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1064 for (i = 0; i < sc->msk_num_port; i++) {
1065 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1067 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1071 case MSK_PHY_POWERDOWN:
1072 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1073 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1074 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1075 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1076 val &= ~PCI_Y2_PHY1_COMA;
1077 if (sc->msk_num_port > 1)
1078 val &= ~PCI_Y2_PHY2_COMA;
1080 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1082 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1083 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1084 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1085 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1086 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1087 /* Enable bits are inverted. */
1091 * Disable PCI & Core Clock, disable clock gating for
1094 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1095 CSR_WRITE_1(sc, B0_POWER_CTRL,
1096 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1104 mskc_reset(struct msk_softc *sc)
1111 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1114 if (sc->msk_hw_id < CHIP_ID_YUKON_XL) {
1115 CSR_WRITE_4(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1116 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1119 * Since we disabled ASF, S/W reset is required for Power Management.
1121 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1122 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1124 /* Clear all error bits in the PCI status register. */
1125 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1126 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1128 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1129 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1130 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1131 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1133 switch (sc->msk_bustype) {
1135 /* Clear all PEX errors. */
1136 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1137 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1138 if ((val & PEX_RX_OV) != 0) {
1139 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1140 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1145 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1146 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1148 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1149 if (sc->msk_bustype == MSK_PCIX_BUS) {
1150 /* Set Cache Line Size opt. */
1151 val = pci_read_config(sc->msk_dev, PCI_OUR_REG_1, 4);
1153 pci_write_config(sc->msk_dev, PCI_OUR_REG_1, val, 4);
1157 /* Set PHY power state. */
1158 mskc_phy_power(sc, MSK_PHY_POWERUP);
1160 /* Reset GPHY/GMAC Control */
1161 for (i = 0; i < sc->msk_num_port; i++) {
1162 /* GPHY Control reset. */
1163 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1164 CSR_WRITE_4(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
1165 /* GMAC Control reset. */
1166 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1167 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1168 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
1170 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1173 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1175 /* Clear TWSI IRQ. */
1176 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1178 /* Turn off hardware timer. */
1179 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1180 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1182 /* Turn off descriptor polling. */
1183 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1185 /* Turn off time stamps. */
1186 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1187 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1189 /* Configure timeout values. */
1190 for (i = 0; i < sc->msk_num_port; i++) {
1191 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_SET);
1192 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
1193 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1195 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1197 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1199 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1201 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1203 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1205 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1207 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1209 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1211 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1213 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1215 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1219 /* Disable all interrupts. */
1220 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1221 CSR_READ_4(sc, B0_HWE_IMSK);
1222 CSR_WRITE_4(sc, B0_IMSK, 0);
1223 CSR_READ_4(sc, B0_IMSK);
1226 * On dual port PCI-X card, there is an problem where status
1227 * can be received out of order due to split transactions.
1229 if (sc->msk_bustype == MSK_PCIX_BUS && sc->msk_num_port > 1) {
1233 pcix = pci_get_pcixcap_ptr(sc->msk_dev);
1235 pcix_cmd = pci_read_config(sc->msk_dev, pcix + 2, 2);
1236 /* Clear Max Outstanding Split Transactions. */
1238 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1239 pci_write_config(sc->msk_dev, pcix + 2, pcix_cmd, 2);
1240 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1242 if (sc->msk_bustype == MSK_PEX_BUS) {
1245 v = pci_read_config(sc->msk_dev, PEX_DEV_CTRL, 2);
1246 /* Change Max. Read Request Size to 4096 bytes. */
1247 v &= ~PEX_DC_MAX_RRS_MSK;
1248 v |= PEX_DC_MAX_RD_RQ_SIZE(5);
1249 pci_write_config(sc->msk_dev, PEX_DEV_CTRL, v, 2);
1250 width = pci_read_config(sc->msk_dev, PEX_LNK_STAT, 2);
1251 width = (width & PEX_LS_LINK_WI_MSK) >> 4;
1252 v = pci_read_config(sc->msk_dev, PEX_LNK_CAP, 2);
1253 v = (v & PEX_LS_LINK_WI_MSK) >> 4;
1255 device_printf(sc->msk_dev,
1256 "negotiated width of link(x%d) != "
1257 "max. width of link(x%d)\n", width, v);
1261 /* Clear status list. */
1262 bzero(sc->msk_stat_ring,
1263 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1264 sc->msk_stat_cons = 0;
1265 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
1266 BUS_DMASYNC_PREWRITE);
1267 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1268 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1269 /* Set the status list base address. */
1270 addr = sc->msk_stat_ring_paddr;
1271 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1272 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1273 /* Set the status list last index. */
1274 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1275 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1276 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1277 /* WA for dev. #4.3 */
1278 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1279 /* WA for dev. #4.18 */
1280 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1281 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1283 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1284 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1285 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1286 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1287 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1289 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1290 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1293 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1295 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1297 /* Enable status unit. */
1298 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1300 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1301 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1302 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1306 msk_probe(device_t dev)
1308 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1312 * Not much to do here. We always know there will be
1313 * at least one GMAC present, and if there are two,
1314 * mskc_attach() will create a second device instance
1317 ksnprintf(desc, sizeof(desc),
1318 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1319 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1321 device_set_desc_copy(dev, desc);
1327 msk_attach(device_t dev)
1329 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1330 struct msk_if_softc *sc_if = device_get_softc(dev);
1331 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1333 uint8_t eaddr[ETHER_ADDR_LEN];
1335 port = *(int *)device_get_ivars(dev);
1336 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
1338 kfree(device_get_ivars(dev), M_DEVBUF);
1339 device_set_ivars(dev, NULL);
1341 callout_init(&sc_if->msk_tick_ch);
1342 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1344 sc_if->msk_if_dev = dev;
1345 sc_if->msk_port = port;
1346 sc_if->msk_softc = sc;
1347 sc_if->msk_ifp = ifp;
1348 sc->msk_if[port] = sc_if;
1350 /* Setup Tx/Rx queue register offsets. */
1351 if (port == MSK_PORT_A) {
1352 sc_if->msk_txq = Q_XA1;
1353 sc_if->msk_txsq = Q_XS1;
1354 sc_if->msk_rxq = Q_R1;
1356 sc_if->msk_txq = Q_XA2;
1357 sc_if->msk_txsq = Q_XS2;
1358 sc_if->msk_rxq = Q_R2;
1361 error = msk_txrx_dma_alloc(sc_if);
1365 ifp->if_softc = sc_if;
1366 ifp->if_mtu = ETHERMTU;
1367 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1368 ifp->if_init = msk_init;
1369 ifp->if_ioctl = msk_ioctl;
1370 ifp->if_start = msk_start;
1371 ifp->if_watchdog = msk_watchdog;
1372 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1373 ifq_set_ready(&ifp->if_snd);
1377 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1378 * has serious bug in Rx checksum offload for all Yukon II family
1379 * hardware. It seems there is a workaround to make it work somtimes.
1380 * However, the workaround also have to check OP code sequences to
1381 * verify whether the OP code is correct. Sometimes it should compute
1382 * IP/TCP/UDP checksum in driver in order to verify correctness of
1383 * checksum computed by hardware. If you have to compute checksum
1384 * with software to verify the hardware's checksum why have hardware
1385 * compute the checksum? I think there is no reason to spend time to
1386 * make Rx checksum offload work on Yukon II hardware.
1388 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
1389 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1390 ifp->if_hwassist = MSK_CSUM_FEATURES;
1391 ifp->if_capenable = ifp->if_capabilities;
1395 * Get station address for this interface. Note that
1396 * dual port cards actually come with three station
1397 * addresses: one for each port, plus an extra. The
1398 * extra one is used by the SysKonnect driver software
1399 * as a 'virtual' station address for when both ports
1400 * are operating in failover mode. Currently we don't
1401 * use this extra address.
1403 for (i = 0; i < ETHER_ADDR_LEN; i++)
1404 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1406 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
1411 error = mii_phy_probe(dev, &sc_if->msk_miibus,
1412 msk_mediachange, msk_mediastatus);
1414 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1419 * Call MI attach routine. Can't hold locks when calling into ether_*.
1421 ether_ifattach(ifp, eaddr, &sc->msk_serializer);
1424 * Tell the upper layer(s) we support long frames.
1425 * Must appear after the call to ether_ifattach() because
1426 * ether_ifattach() sets ifi_hdrlen to the default value.
1428 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1434 sc->msk_if[port] = NULL;
1439 * Attach the interface. Allocate softc structures, do ifmedia
1440 * setup and ethernet/BPF attach.
1443 mskc_attach(device_t dev)
1445 struct msk_softc *sc;
1446 int error, *port, cpuid;
1448 sc = device_get_softc(dev);
1450 lwkt_serialize_init(&sc->msk_serializer);
1453 * Initailize sysctl variables
1455 sc->msk_process_limit = mskc_process_limit;
1456 sc->msk_intr_rate = mskc_intr_rate;
1458 #ifndef BURN_BRIDGES
1460 * Handle power management nonsense.
1462 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1463 uint32_t irq, bar0, bar1;
1465 /* Save important PCI config data. */
1466 bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
1467 bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
1468 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1470 /* Reset the power state. */
1471 device_printf(dev, "chip is in D%d power mode "
1472 "-- setting to D0\n", pci_get_powerstate(dev));
1474 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1476 /* Restore PCI config data. */
1477 pci_write_config(dev, PCIR_BAR(0), bar0, 4);
1478 pci_write_config(dev, PCIR_BAR(1), bar1, 4);
1479 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1481 #endif /* BURN_BRIDGES */
1484 * Map control/status registers.
1486 pci_enable_busmaster(dev);
1489 * Allocate I/O resource
1491 #ifdef MSK_USEIOSPACE
1492 sc->msk_res_type = SYS_RES_IOPORT;
1493 sc->msk_res_rid = PCIR_BAR(1);
1495 sc->msk_res_type = SYS_RES_MEMORY;
1496 sc->msk_res_rid = PCIR_BAR(0);
1498 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1499 &sc->msk_res_rid, RF_ACTIVE);
1500 if (sc->msk_res == NULL) {
1501 if (sc->msk_res_type == SYS_RES_MEMORY) {
1502 sc->msk_res_type = SYS_RES_IOPORT;
1503 sc->msk_res_rid = PCIR_BAR(1);
1505 sc->msk_res_type = SYS_RES_MEMORY;
1506 sc->msk_res_rid = PCIR_BAR(0);
1508 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1511 if (sc->msk_res == NULL) {
1512 device_printf(dev, "couldn't allocate %s resources\n",
1513 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
1517 sc->msk_res_bt = rman_get_bustag(sc->msk_res);
1518 sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
1523 sc->msk_irq_rid = 0;
1524 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1526 RF_SHAREABLE | RF_ACTIVE);
1527 if (sc->msk_irq == NULL) {
1528 device_printf(dev, "couldn't allocate IRQ resources\n");
1533 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1534 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1535 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1536 /* Bail out if chip is not recognized. */
1537 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
1538 sc->msk_hw_id > CHIP_ID_YUKON_FE) {
1539 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1540 sc->msk_hw_id, sc->msk_hw_rev);
1546 * Create sysctl tree
1548 sysctl_ctx_init(&sc->msk_sysctl_ctx);
1549 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx,
1550 SYSCTL_STATIC_CHILDREN(_hw),
1552 device_get_nameunit(dev),
1554 if (sc->msk_sysctl_tree == NULL) {
1555 device_printf(dev, "can't add sysctl node\n");
1560 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1561 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1562 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1563 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
1564 "I", "max number of Rx events to process");
1565 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1566 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1567 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1568 sc, 0, mskc_sysctl_intr_rate,
1569 "I", "max number of interrupt per second");
1572 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1573 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1574 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1575 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1576 sc->msk_coppertype = 0;
1578 sc->msk_coppertype = 1;
1579 /* Check number of MACs. */
1580 sc->msk_num_port = 1;
1581 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1583 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1587 /* Check bus type. */
1588 if (pci_is_pcie(sc->msk_dev) == 0)
1589 sc->msk_bustype = MSK_PEX_BUS;
1590 else if (pci_is_pcix(sc->msk_dev) == 0)
1591 sc->msk_bustype = MSK_PCIX_BUS;
1593 sc->msk_bustype = MSK_PCI_BUS;
1595 switch (sc->msk_hw_id) {
1596 case CHIP_ID_YUKON_EC:
1597 case CHIP_ID_YUKON_EC_U:
1598 sc->msk_clock = 125; /* 125 Mhz */
1600 case CHIP_ID_YUKON_FE:
1601 sc->msk_clock = 100; /* 100 Mhz */
1603 case CHIP_ID_YUKON_XL:
1604 sc->msk_clock = 156; /* 156 Mhz */
1607 sc->msk_clock = 156; /* 156 Mhz */
1611 error = mskc_status_dma_alloc(sc);
1615 /* Set base interrupt mask. */
1616 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1617 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1618 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1620 /* Reset the adapter. */
1623 error = mskc_setup_rambuffer(sc);
1627 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1628 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1629 device_printf(dev, "failed to add child for PORT_A\n");
1633 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1635 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1637 if (sc->msk_num_port > 1) {
1638 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1639 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1640 device_printf(dev, "failed to add child for PORT_B\n");
1644 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1646 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1649 bus_generic_attach(dev);
1651 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
1652 mskc_intr, sc, &sc->msk_intrhand,
1653 &sc->msk_serializer);
1655 device_printf(dev, "couldn't set up interrupt handler\n");
1659 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq));
1660 KKASSERT(cpuid >= 0 && cpuid < ncpus);
1662 if (sc->msk_if[0] != NULL)
1663 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid;
1664 if (sc->msk_if[1] != NULL)
1665 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid;
1673 * Shutdown hardware and free up resources. This can be called any
1674 * time after the mutex has been initialized. It is called in both
1675 * the error case in attach and the normal detach case so it needs
1676 * to be careful about only freeing resources that have actually been
1680 msk_detach(device_t dev)
1682 struct msk_if_softc *sc_if = device_get_softc(dev);
1684 if (device_is_attached(dev)) {
1685 struct msk_softc *sc = sc_if->msk_softc;
1686 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1688 lwkt_serialize_enter(ifp->if_serializer);
1690 if (sc->msk_intrhand != NULL) {
1691 if (sc->msk_if[MSK_PORT_A] != NULL)
1692 msk_stop(sc->msk_if[MSK_PORT_A]);
1693 if (sc->msk_if[MSK_PORT_B] != NULL)
1694 msk_stop(sc->msk_if[MSK_PORT_B]);
1696 bus_teardown_intr(sc->msk_dev, sc->msk_irq,
1698 sc->msk_intrhand = NULL;
1701 lwkt_serialize_exit(ifp->if_serializer);
1703 ether_ifdetach(ifp);
1706 if (sc_if->msk_miibus != NULL)
1707 device_delete_child(dev, sc_if->msk_miibus);
1709 msk_txrx_dma_free(sc_if);
1714 mskc_detach(device_t dev)
1716 struct msk_softc *sc = device_get_softc(dev);
1720 if (device_is_attached(dev)) {
1721 KASSERT(sc->msk_intrhand == NULL,
1722 ("intr is not torn down yet\n"));
1726 for (i = 0; i < sc->msk_num_port; ++i) {
1727 if (sc->msk_devs[i] != NULL) {
1728 port = device_get_ivars(sc->msk_devs[i]);
1730 kfree(port, M_DEVBUF);
1731 device_set_ivars(sc->msk_devs[i], NULL);
1733 device_delete_child(dev, sc->msk_devs[i]);
1737 /* Disable all interrupts. */
1738 CSR_WRITE_4(sc, B0_IMSK, 0);
1739 CSR_READ_4(sc, B0_IMSK);
1740 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1741 CSR_READ_4(sc, B0_HWE_IMSK);
1744 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1746 /* Put hardware reset. */
1747 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1749 mskc_status_dma_free(sc);
1751 if (sc->msk_irq != NULL) {
1752 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
1755 if (sc->msk_res != NULL) {
1756 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
1760 if (sc->msk_sysctl_tree != NULL)
1761 sysctl_ctx_free(&sc->msk_sysctl_ctx);
1767 msk_dmamap_mbuf_cb(void *arg, bus_dma_segment_t *segs, int nseg,
1768 bus_size_t mapsz __unused, int error)
1770 struct msk_dmamap_arg *ctx = arg;
1776 if (ctx->nseg < nseg) {
1782 for (i = 0; i < ctx->nseg; ++i)
1783 ctx->segs[i] = segs[i];
1787 msk_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1789 struct msk_dmamap_arg *ctx = arg;
1795 KKASSERT(nseg <= ctx->nseg);
1798 for (i = 0; i < ctx->nseg; ++i)
1799 ctx->segs[i] = segs[i];
1802 /* Create status DMA region. */
1804 mskc_status_dma_alloc(struct msk_softc *sc)
1806 struct msk_dmamap_arg ctx;
1807 bus_dma_segment_t seg;
1810 error = bus_dma_tag_create(
1811 NULL, /* XXX parent */
1812 MSK_STAT_ALIGN, 0, /* alignment, boundary */
1813 BUS_SPACE_MAXADDR, /* lowaddr */
1814 BUS_SPACE_MAXADDR, /* highaddr */
1815 NULL, NULL, /* filter, filterarg */
1816 MSK_STAT_RING_SZ, /* maxsize */
1818 MSK_STAT_RING_SZ, /* maxsegsize */
1822 device_printf(sc->msk_dev,
1823 "failed to create status DMA tag\n");
1827 /* Allocate DMA'able memory and load the DMA map for status ring. */
1828 error = bus_dmamem_alloc(sc->msk_stat_tag,
1829 (void **)&sc->msk_stat_ring,
1830 BUS_DMA_WAITOK | BUS_DMA_ZERO,
1833 device_printf(sc->msk_dev,
1834 "failed to allocate DMA'able memory for status ring\n");
1835 bus_dma_tag_destroy(sc->msk_stat_tag);
1836 sc->msk_stat_tag = NULL;
1840 bzero(&ctx, sizeof(ctx));
1843 error = bus_dmamap_load(sc->msk_stat_tag, sc->msk_stat_map,
1844 sc->msk_stat_ring, MSK_STAT_RING_SZ,
1845 msk_dmamap_cb, &ctx, 0);
1847 device_printf(sc->msk_dev,
1848 "failed to load DMA'able memory for status ring\n");
1849 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1851 bus_dma_tag_destroy(sc->msk_stat_tag);
1852 sc->msk_stat_tag = NULL;
1855 sc->msk_stat_ring_paddr = seg.ds_addr;
1861 mskc_status_dma_free(struct msk_softc *sc)
1863 /* Destroy status block. */
1864 if (sc->msk_stat_tag) {
1865 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1866 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1868 bus_dma_tag_destroy(sc->msk_stat_tag);
1869 sc->msk_stat_tag = NULL;
1874 msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1878 struct msk_rxdesc *jrxd;
1879 struct msk_jpool_entry *entry;
1883 /* Create parent DMA tag. */
1886 * It seems that Yukon II supports full 64bits DMA operations. But
1887 * it needs two descriptors(list elements) for 64bits DMA operations.
1888 * Since we don't know what DMA address mappings(32bits or 64bits)
1889 * would be used in advance for each mbufs, we limits its DMA space
1890 * to be in range of 32bits address space. Otherwise, we should check
1891 * what DMA address is used and chain another descriptor for the
1892 * 64bits DMA operation. This also means descriptor ring size is
1893 * variable. Limiting DMA address to be in 32bit address space greatly
1894 * simplyfies descriptor handling and possibly would increase
1895 * performance a bit due to efficient handling of descriptors.
1896 * Apart from harassing checksum offloading mechanisms, it seems
1897 * it's really bad idea to use a seperate descriptor for 64bit
1898 * DMA operation to save small descriptor memory. Anyway, I've
1899 * never seen these exotic scheme on ethernet interface hardware.
1901 error = bus_dma_tag_create(
1903 1, 0, /* alignment, boundary */
1904 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1905 BUS_SPACE_MAXADDR, /* highaddr */
1906 NULL, NULL, /* filter, filterarg */
1907 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1909 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1911 &sc_if->msk_cdata.msk_parent_tag);
1913 device_printf(sc_if->msk_if_dev,
1914 "failed to create parent DMA tag\n");
1918 /* Create DMA stuffs for Tx ring. */
1919 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
1920 &sc_if->msk_cdata.msk_tx_ring_tag,
1921 (void **)&sc_if->msk_rdata.msk_tx_ring,
1922 &sc_if->msk_rdata.msk_tx_ring_paddr,
1923 &sc_if->msk_cdata.msk_tx_ring_map);
1925 device_printf(sc_if->msk_if_dev,
1926 "failed to create TX ring DMA stuffs\n");
1930 /* Create DMA stuffs for Rx ring. */
1931 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
1932 &sc_if->msk_cdata.msk_rx_ring_tag,
1933 (void **)&sc_if->msk_rdata.msk_rx_ring,
1934 &sc_if->msk_rdata.msk_rx_ring_paddr,
1935 &sc_if->msk_cdata.msk_rx_ring_map);
1937 device_printf(sc_if->msk_if_dev,
1938 "failed to create RX ring DMA stuffs\n");
1942 /* Create tag for Tx buffers. */
1943 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1944 1, 0, /* alignment, boundary */
1945 BUS_SPACE_MAXADDR, /* lowaddr */
1946 BUS_SPACE_MAXADDR, /* highaddr */
1947 NULL, NULL, /* filter, filterarg */
1948 MSK_TSO_MAXSIZE, /* maxsize */
1949 MSK_MAXTXSEGS, /* nsegments */
1950 MSK_TSO_MAXSGSIZE, /* maxsegsize */
1952 &sc_if->msk_cdata.msk_tx_tag);
1954 device_printf(sc_if->msk_if_dev,
1955 "failed to create Tx DMA tag\n");
1959 /* Create DMA maps for Tx buffers. */
1960 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1961 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
1963 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 0,
1966 device_printf(sc_if->msk_if_dev,
1967 "failed to create %dth Tx dmamap\n", i);
1969 for (j = 0; j < i; ++j) {
1970 txd = &sc_if->msk_cdata.msk_txdesc[j];
1971 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
1974 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
1975 sc_if->msk_cdata.msk_tx_tag = NULL;
1981 /* Create tag for Rx buffers. */
1982 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1983 1, 0, /* alignment, boundary */
1984 BUS_SPACE_MAXADDR, /* lowaddr */
1985 BUS_SPACE_MAXADDR, /* highaddr */
1986 NULL, NULL, /* filter, filterarg */
1987 MCLBYTES, /* maxsize */
1989 MCLBYTES, /* maxsegsize */
1991 &sc_if->msk_cdata.msk_rx_tag);
1993 device_printf(sc_if->msk_if_dev,
1994 "failed to create Rx DMA tag\n");
1998 /* Create DMA maps for Rx buffers. */
1999 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2000 &sc_if->msk_cdata.msk_rx_sparemap);
2002 device_printf(sc_if->msk_if_dev,
2003 "failed to create spare Rx dmamap\n");
2004 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2005 sc_if->msk_cdata.msk_rx_tag = NULL;
2008 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2009 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2011 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 0,
2014 device_printf(sc_if->msk_if_dev,
2015 "failed to create %dth Rx dmamap\n", i);
2017 for (j = 0; j < i; ++j) {
2018 rxd = &sc_if->msk_cdata.msk_rxdesc[j];
2019 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2022 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2023 sc_if->msk_cdata.msk_rx_tag = NULL;
2030 SLIST_INIT(&sc_if->msk_jfree_listhead);
2031 SLIST_INIT(&sc_if->msk_jinuse_listhead);
2033 /* Create tag for jumbo Rx ring. */
2034 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2035 MSK_RING_ALIGN, 0, /* alignment, boundary */
2036 BUS_SPACE_MAXADDR, /* lowaddr */
2037 BUS_SPACE_MAXADDR, /* highaddr */
2038 NULL, NULL, /* filter, filterarg */
2039 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2041 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2043 NULL, NULL, /* lockfunc, lockarg */
2044 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2046 device_printf(sc_if->msk_if_dev,
2047 "failed to create jumbo Rx ring DMA tag\n");
2051 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2052 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2053 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2054 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2055 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2057 device_printf(sc_if->msk_if_dev,
2058 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2062 ctx.msk_busaddr = 0;
2063 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2064 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2065 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2066 msk_dmamap_cb, &ctx, 0);
2068 device_printf(sc_if->msk_if_dev,
2069 "failed to load DMA'able memory for jumbo Rx ring\n");
2072 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2074 /* Create tag for jumbo buffer blocks. */
2075 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2076 PAGE_SIZE, 0, /* alignment, boundary */
2077 BUS_SPACE_MAXADDR, /* lowaddr */
2078 BUS_SPACE_MAXADDR, /* highaddr */
2079 NULL, NULL, /* filter, filterarg */
2080 MSK_JMEM, /* maxsize */
2082 MSK_JMEM, /* maxsegsize */
2084 NULL, NULL, /* lockfunc, lockarg */
2085 &sc_if->msk_cdata.msk_jumbo_tag);
2087 device_printf(sc_if->msk_if_dev,
2088 "failed to create jumbo Rx buffer block DMA tag\n");
2092 /* Create tag for jumbo Rx buffers. */
2093 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2094 PAGE_SIZE, 0, /* alignment, boundary */
2095 BUS_SPACE_MAXADDR, /* lowaddr */
2096 BUS_SPACE_MAXADDR, /* highaddr */
2097 NULL, NULL, /* filter, filterarg */
2098 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2099 MSK_MAXRXSEGS, /* nsegments */
2100 MSK_JLEN, /* maxsegsize */
2102 NULL, NULL, /* lockfunc, lockarg */
2103 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2105 device_printf(sc_if->msk_if_dev,
2106 "failed to create jumbo Rx DMA tag\n");
2110 /* Create DMA maps for jumbo Rx buffers. */
2111 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2112 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2113 device_printf(sc_if->msk_if_dev,
2114 "failed to create spare jumbo Rx dmamap\n");
2117 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2118 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2120 jrxd->rx_dmamap = NULL;
2121 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2124 device_printf(sc_if->msk_if_dev,
2125 "failed to create jumbo Rx dmamap\n");
2130 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2131 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2132 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2133 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2134 &sc_if->msk_cdata.msk_jumbo_map);
2136 device_printf(sc_if->msk_if_dev,
2137 "failed to allocate DMA'able memory for jumbo buf\n");
2141 ctx.msk_busaddr = 0;
2142 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2143 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2144 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2146 device_printf(sc_if->msk_if_dev,
2147 "failed to load DMA'able memory for jumbobuf\n");
2150 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2153 * Now divide it up into 9K pieces and save the addresses
2156 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2157 for (i = 0; i < MSK_JSLOTS; i++) {
2158 sc_if->msk_cdata.msk_jslots[i] = ptr;
2160 entry = malloc(sizeof(struct msk_jpool_entry),
2161 M_DEVBUF, M_WAITOK);
2162 if (entry == NULL) {
2163 device_printf(sc_if->msk_if_dev,
2164 "no memory for jumbo buffers!\n");
2169 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2177 msk_txrx_dma_free(struct msk_if_softc *sc_if)
2179 struct msk_txdesc *txd;
2180 struct msk_rxdesc *rxd;
2182 struct msk_rxdesc *jrxd;
2183 struct msk_jpool_entry *entry;
2188 MSK_JLIST_LOCK(sc_if);
2189 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2190 device_printf(sc_if->msk_if_dev,
2191 "asked to free buffer that is in use!\n");
2192 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2193 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2197 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2198 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2199 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2200 free(entry, M_DEVBUF);
2202 MSK_JLIST_UNLOCK(sc_if);
2204 /* Destroy jumbo buffer block. */
2205 if (sc_if->msk_cdata.msk_jumbo_map)
2206 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2207 sc_if->msk_cdata.msk_jumbo_map);
2209 if (sc_if->msk_rdata.msk_jumbo_buf) {
2210 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2211 sc_if->msk_rdata.msk_jumbo_buf,
2212 sc_if->msk_cdata.msk_jumbo_map);
2213 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2214 sc_if->msk_cdata.msk_jumbo_map = NULL;
2217 /* Jumbo Rx ring. */
2218 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2219 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2220 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2221 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2222 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2223 sc_if->msk_rdata.msk_jumbo_rx_ring)
2224 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2225 sc_if->msk_rdata.msk_jumbo_rx_ring,
2226 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2227 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2228 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2229 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2230 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2233 /* Jumbo Rx buffers. */
2234 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2235 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2236 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2237 if (jrxd->rx_dmamap) {
2239 sc_if->msk_cdata.msk_jumbo_rx_tag,
2241 jrxd->rx_dmamap = NULL;
2244 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2245 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2246 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2247 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2249 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2250 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2255 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
2256 sc_if->msk_rdata.msk_tx_ring,
2257 sc_if->msk_cdata.msk_tx_ring_map);
2260 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
2261 sc_if->msk_rdata.msk_rx_ring,
2262 sc_if->msk_cdata.msk_rx_ring_map);
2265 if (sc_if->msk_cdata.msk_tx_tag) {
2266 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2267 txd = &sc_if->msk_cdata.msk_txdesc[i];
2268 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2271 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2272 sc_if->msk_cdata.msk_tx_tag = NULL;
2276 if (sc_if->msk_cdata.msk_rx_tag) {
2277 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2278 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2279 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2282 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2283 sc_if->msk_cdata.msk_rx_sparemap);
2284 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2285 sc_if->msk_cdata.msk_rx_tag = NULL;
2288 if (sc_if->msk_cdata.msk_parent_tag) {
2289 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2290 sc_if->msk_cdata.msk_parent_tag = NULL;
2296 * Allocate a jumbo buffer.
2299 msk_jalloc(struct msk_if_softc *sc_if)
2301 struct msk_jpool_entry *entry;
2303 MSK_JLIST_LOCK(sc_if);
2305 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2307 if (entry == NULL) {
2308 MSK_JLIST_UNLOCK(sc_if);
2312 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2313 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2315 MSK_JLIST_UNLOCK(sc_if);
2317 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2321 * Release a jumbo buffer.
2324 msk_jfree(void *buf, void *args)
2326 struct msk_if_softc *sc_if;
2327 struct msk_jpool_entry *entry;
2330 /* Extract the softc struct pointer. */
2331 sc_if = (struct msk_if_softc *)args;
2332 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2334 MSK_JLIST_LOCK(sc_if);
2335 /* Calculate the slot this buffer belongs to. */
2336 i = ((vm_offset_t)buf
2337 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2338 KASSERT(i >= 0 && i < MSK_JSLOTS,
2339 ("%s: asked to free buffer that we don't manage!", __func__));
2341 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2342 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2344 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2345 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2346 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2349 MSK_JLIST_UNLOCK(sc_if);
2354 * It's copy of ath_defrag(ath(4)).
2356 * Defragment an mbuf chain, returning at most maxfrags separate
2357 * mbufs+clusters. If this is not possible NULL is returned and
2358 * the original mbuf chain is left in it's present (potentially
2359 * modified) state. We use two techniques: collapsing consecutive
2360 * mbufs and replacing consecutive mbufs by a cluster.
2362 static struct mbuf *
2363 msk_defrag(struct mbuf *m0, int how, int maxfrags)
2365 struct mbuf *m, *n, *n2, **prev;
2369 * Calculate the current number of frags.
2372 for (m = m0; m != NULL; m = m->m_next)
2375 * First, try to collapse mbufs. Note that we always collapse
2376 * towards the front so we don't need to deal with moving the
2377 * pkthdr. This may be suboptimal if the first mbuf has much
2378 * less data than the following.
2386 if (n->m_len < M_TRAILINGSPACE(m)) {
2387 bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
2389 m->m_len += n->m_len;
2390 m->m_next = n->m_next;
2392 if (--curfrags <= maxfrags)
2397 KASSERT(maxfrags > 1,
2398 ("maxfrags %u, but normal collapse failed", maxfrags));
2400 * Collapse consecutive mbufs to a cluster.
2402 prev = &m0->m_next; /* NB: not the first mbuf */
2403 while ((n = *prev) != NULL) {
2404 if ((n2 = n->m_next) != NULL &&
2405 n->m_len + n2->m_len < MCLBYTES) {
2406 m = m_getcl(how, MT_DATA, 0);
2409 bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
2410 bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
2412 m->m_len = n->m_len + n2->m_len;
2413 m->m_next = n2->m_next;
2417 if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
2420 * Still not there, try the normal collapse
2421 * again before we allocate another cluster.
2428 * No place where we can collapse to a cluster; punt.
2429 * This can occur if, for example, you request 2 frags
2430 * but the packet requires that both be clusters (we
2431 * never reallocate the first mbuf to avoid moving the
2439 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2441 struct msk_txdesc *txd, *txd_last;
2442 struct msk_tx_desc *tx_le;
2445 struct msk_dmamap_arg ctx;
2446 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2447 uint32_t control, prod, si;
2448 uint16_t offset, tcp_offset;
2451 tcp_offset = offset = 0;
2453 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2455 * Since mbuf has no protocol specific structure information
2456 * in it we have to inspect protocol information here to
2457 * setup TSO and checksum offload. I don't know why Marvell
2458 * made a such decision in chip design because other GigE
2459 * hardwares normally takes care of all these chores in
2460 * hardware. However, TSO performance of Yukon II is very
2461 * good such that it's worth to implement it.
2463 struct ether_header *eh;
2466 /* TODO check for M_WRITABLE(m) */
2468 offset = sizeof(struct ether_header);
2469 m = m_pullup(m, offset);
2474 eh = mtod(m, struct ether_header *);
2475 /* Check if hardware VLAN insertion is off. */
2476 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2477 offset = sizeof(struct ether_vlan_header);
2478 m = m_pullup(m, offset);
2484 m = m_pullup(m, offset + sizeof(struct ip));
2489 ip = (struct ip *)(mtod(m, char *) + offset);
2490 offset += (ip->ip_hl << 2);
2491 tcp_offset = offset;
2493 * It seems that Yukon II has Tx checksum offload bug for
2494 * small TCP packets that's less than 60 bytes in size
2495 * (e.g. TCP window probe packet, pure ACK packet).
2496 * Common work around like padding with zeros to make the
2497 * frame minimum ethernet frame size didn't work at all.
2498 * Instead of disabling checksum offload completely we
2499 * resort to S/W checksum routine when we encounter short
2501 * Short UDP packets appear to be handled correctly by
2504 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2505 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2508 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2509 (ip->ip_hl << 2), offset);
2510 *(uint16_t *)(m->m_data + offset +
2511 m->m_pkthdr.csum_data) = csum;
2512 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2517 prod = sc_if->msk_cdata.msk_tx_prod;
2518 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2520 map = txd->tx_dmamap;
2521 bzero(&ctx, sizeof(ctx));
2522 ctx.nseg = MSK_MAXTXSEGS;
2524 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag, map,
2525 *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT);
2526 if (error == 0 && ctx.nseg == 0) {
2527 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2530 if (error == EFBIG) {
2531 m = msk_defrag(*m_head, MB_DONTWAIT, MSK_MAXTXSEGS);
2539 bzero(&ctx, sizeof(ctx));
2540 ctx.nseg = MSK_MAXTXSEGS;
2542 error = bus_dmamap_load_mbuf(sc_if->msk_cdata.msk_tx_tag,
2543 map, *m_head, msk_dmamap_mbuf_cb, &ctx, BUS_DMA_NOWAIT);
2544 if (error == 0 && ctx.nseg == 0) {
2545 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2553 } else if (error != 0) {
2557 /* Check number of available descriptors. */
2558 if (sc_if->msk_cdata.msk_tx_cnt + ctx.nseg >=
2559 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT)) {
2560 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, map);
2568 /* Check if we have a VLAN tag to insert. */
2569 if ((m->m_flags & M_VLANTAG) != 0) {
2570 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2571 tx_le->msk_addr = htole32(0);
2572 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2573 htons(m->m_pkthdr.ether_vtag));
2574 sc_if->msk_cdata.msk_tx_cnt++;
2575 MSK_INC(prod, MSK_TX_RING_CNT);
2576 control |= INS_VLAN;
2579 /* Check if we have to handle checksum offload. */
2580 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2581 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2582 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2583 & 0xffff) | ((uint32_t)tcp_offset << 16));
2584 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2585 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2586 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2588 sc_if->msk_cdata.msk_tx_cnt++;
2589 MSK_INC(prod, MSK_TX_RING_CNT);
2593 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2594 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2595 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2597 sc_if->msk_cdata.msk_tx_cnt++;
2598 MSK_INC(prod, MSK_TX_RING_CNT);
2600 for (i = 1; i < ctx.nseg; i++) {
2601 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2602 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2603 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2604 OP_BUFFER | HW_OWNER);
2605 sc_if->msk_cdata.msk_tx_cnt++;
2606 MSK_INC(prod, MSK_TX_RING_CNT);
2608 /* Update producer index. */
2609 sc_if->msk_cdata.msk_tx_prod = prod;
2611 /* Set EOP on the last desciptor. */
2612 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2613 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2614 tx_le->msk_control |= htole32(EOP);
2616 /* Turn the first descriptor ownership to hardware. */
2617 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2618 tx_le->msk_control |= htole32(HW_OWNER);
2620 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2621 map = txd_last->tx_dmamap;
2622 txd_last->tx_dmamap = txd->tx_dmamap;
2623 txd->tx_dmamap = map;
2626 /* Sync descriptors. */
2627 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2628 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2629 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_PREWRITE);
2635 msk_start(struct ifnet *ifp)
2637 struct msk_if_softc *sc_if;
2638 struct mbuf *m_head;
2641 sc_if = ifp->if_softc;
2643 ASSERT_SERIALIZED(ifp->if_serializer);
2645 if (!sc_if->msk_link) {
2646 ifq_purge(&ifp->if_snd);
2650 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2653 for (enq = 0; !ifq_is_empty(&ifp->if_snd) &&
2654 sc_if->msk_cdata.msk_tx_cnt <
2655 (MSK_TX_RING_CNT - MSK_RESERVED_TX_DESC_CNT); ) {
2656 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2661 * Pack the data into the transmit ring. If we
2662 * don't have room, set the OACTIVE flag and wait
2663 * for the NIC to drain the ring.
2665 if (msk_encap(sc_if, &m_head) != 0) {
2669 ifp->if_flags |= IFF_OACTIVE;
2675 * If there's a BPF listener, bounce a copy of this frame
2678 BPF_MTAP(ifp, m_head);
2683 CSR_WRITE_2(sc_if->msk_softc,
2684 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2685 sc_if->msk_cdata.msk_tx_prod);
2687 /* Set a timeout in case the chip goes out to lunch. */
2688 ifp->if_timer = MSK_TX_TIMEOUT;
2693 msk_watchdog(struct ifnet *ifp)
2695 struct msk_if_softc *sc_if = ifp->if_softc;
2699 ASSERT_SERIALIZED(ifp->if_serializer);
2701 if (sc_if->msk_link == 0) {
2703 if_printf(sc_if->msk_ifp, "watchdog timeout "
2711 * Reclaim first as there is a possibility of losing Tx completion
2714 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2715 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2716 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2717 msk_txeof(sc_if, idx);
2718 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2719 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2721 if (!ifq_is_empty(&ifp->if_snd))
2727 if_printf(ifp, "watchdog timeout\n");
2730 if (!ifq_is_empty(&ifp->if_snd))
2735 mskc_shutdown(device_t dev)
2737 struct msk_softc *sc = device_get_softc(dev);
2740 lwkt_serialize_enter(&sc->msk_serializer);
2742 for (i = 0; i < sc->msk_num_port; i++) {
2743 if (sc->msk_if[i] != NULL)
2744 msk_stop(sc->msk_if[i]);
2747 /* Disable all interrupts. */
2748 CSR_WRITE_4(sc, B0_IMSK, 0);
2749 CSR_READ_4(sc, B0_IMSK);
2750 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2751 CSR_READ_4(sc, B0_HWE_IMSK);
2753 /* Put hardware reset. */
2754 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2756 lwkt_serialize_exit(&sc->msk_serializer);
2761 mskc_suspend(device_t dev)
2763 struct msk_softc *sc = device_get_softc(dev);
2766 lwkt_serialize_enter(&sc->msk_serializer);
2768 for (i = 0; i < sc->msk_num_port; i++) {
2769 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2770 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
2771 msk_stop(sc->msk_if[i]);
2774 /* Disable all interrupts. */
2775 CSR_WRITE_4(sc, B0_IMSK, 0);
2776 CSR_READ_4(sc, B0_IMSK);
2777 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2778 CSR_READ_4(sc, B0_HWE_IMSK);
2780 mskc_phy_power(sc, MSK_PHY_POWERDOWN);
2782 /* Put hardware reset. */
2783 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2784 sc->msk_suspended = 1;
2786 lwkt_serialize_exit(&sc->msk_serializer);
2792 mskc_resume(device_t dev)
2794 struct msk_softc *sc = device_get_softc(dev);
2797 lwkt_serialize_enter(&sc->msk_serializer);
2800 for (i = 0; i < sc->msk_num_port; i++) {
2801 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2802 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2803 msk_init(sc->msk_if[i]);
2805 sc->msk_suspended = 0;
2807 lwkt_serialize_exit(&sc->msk_serializer);
2813 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len,
2814 struct mbuf_chain *chain)
2818 struct msk_rxdesc *rxd;
2821 ifp = sc_if->msk_ifp;
2823 cons = sc_if->msk_cdata.msk_rx_cons;
2825 rxlen = status >> 16;
2826 if ((status & GMR_FS_VLAN) != 0 &&
2827 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2828 rxlen -= EVL_ENCAPLEN;
2829 if (len > sc_if->msk_framesize ||
2830 ((status & GMR_FS_ANY_ERR) != 0) ||
2831 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2832 /* Don't count flow-control packet as errors. */
2833 if ((status & GMR_FS_GOOD_FC) == 0)
2835 msk_discard_rxbuf(sc_if, cons);
2838 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2840 if (msk_newbuf(sc_if, cons) != 0) {
2842 /* Reuse old buffer. */
2843 msk_discard_rxbuf(sc_if, cons);
2846 m->m_pkthdr.rcvif = ifp;
2847 m->m_pkthdr.len = m->m_len = len;
2850 /* Check for VLAN tagged packets. */
2851 if ((status & GMR_FS_VLAN) != 0 &&
2852 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2853 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2854 m->m_flags |= M_VLANTAG;
2858 #ifdef ETHER_INPUT_CHAIN
2860 ether_input_chain2(ifp, m, chain);
2862 ether_input_chain(ifp, m, chain);
2865 ifp->if_input(ifp, m);
2869 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2870 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2875 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2879 struct msk_rxdesc *jrxd;
2882 ifp = sc_if->msk_ifp;
2884 MSK_IF_LOCK_ASSERT(sc_if);
2886 cons = sc_if->msk_cdata.msk_rx_cons;
2888 rxlen = status >> 16;
2889 if ((status & GMR_FS_VLAN) != 0 &&
2890 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2891 rxlen -= ETHER_VLAN_ENCAP_LEN;
2892 if (len > sc_if->msk_framesize ||
2893 ((status & GMR_FS_ANY_ERR) != 0) ||
2894 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2895 /* Don't count flow-control packet as errors. */
2896 if ((status & GMR_FS_GOOD_FC) == 0)
2898 msk_discard_jumbo_rxbuf(sc_if, cons);
2901 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2903 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2905 /* Reuse old buffer. */
2906 msk_discard_jumbo_rxbuf(sc_if, cons);
2909 m->m_pkthdr.rcvif = ifp;
2910 m->m_pkthdr.len = m->m_len = len;
2912 /* Check for VLAN tagged packets. */
2913 if ((status & GMR_FS_VLAN) != 0 &&
2914 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2915 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2916 m->m_flags |= M_VLANTAG;
2918 MSK_IF_UNLOCK(sc_if);
2919 (*ifp->if_input)(ifp, m);
2923 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2924 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2929 msk_txeof(struct msk_if_softc *sc_if, int idx)
2931 struct msk_txdesc *txd;
2932 struct msk_tx_desc *cur_tx;
2937 ifp = sc_if->msk_ifp;
2939 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_ring_tag,
2940 sc_if->msk_cdata.msk_tx_ring_map, BUS_DMASYNC_POSTREAD);
2943 * Go through our tx ring and free mbufs for those
2944 * frames that have been sent.
2946 cons = sc_if->msk_cdata.msk_tx_cons;
2948 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2949 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2952 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2953 control = le32toh(cur_tx->msk_control);
2954 sc_if->msk_cdata.msk_tx_cnt--;
2955 ifp->if_flags &= ~IFF_OACTIVE;
2956 if ((control & EOP) == 0)
2958 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2959 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap,
2960 BUS_DMASYNC_POSTWRITE);
2961 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2964 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2971 sc_if->msk_cdata.msk_tx_cons = cons;
2972 if (sc_if->msk_cdata.msk_tx_cnt == 0)
2974 /* No need to sync LEs as we didn't update LEs. */
2979 msk_tick(void *xsc_if)
2981 struct msk_if_softc *sc_if = xsc_if;
2982 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2983 struct mii_data *mii;
2985 lwkt_serialize_enter(ifp->if_serializer);
2987 mii = device_get_softc(sc_if->msk_miibus);
2990 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
2992 lwkt_serialize_exit(ifp->if_serializer);
2996 msk_intr_phy(struct msk_if_softc *sc_if)
3000 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3001 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
3002 /* Handle FIFO Underrun/Overflow? */
3003 if (status & PHY_M_IS_FIFO_ERROR) {
3004 device_printf(sc_if->msk_if_dev,
3005 "PHY FIFO underrun/overflow.\n");
3010 msk_intr_gmac(struct msk_if_softc *sc_if)
3012 struct msk_softc *sc;
3015 sc = sc_if->msk_softc;
3016 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3018 /* GMAC Rx FIFO overrun. */
3019 if ((status & GM_IS_RX_FF_OR) != 0) {
3020 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3022 device_printf(sc_if->msk_if_dev, "Rx FIFO overrun!\n");
3024 /* GMAC Tx FIFO underrun. */
3025 if ((status & GM_IS_TX_FF_UR) != 0) {
3026 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3028 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
3031 * In case of Tx underrun, we may need to flush/reset
3032 * Tx MAC but that would also require resynchronization
3033 * with status LEs. Reintializing status LEs would
3034 * affect other port in dual MAC configuration so it
3035 * should be avoided as possible as we can.
3036 * Due to lack of documentation it's all vague guess but
3037 * it needs more investigation.
3043 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3045 struct msk_softc *sc;
3047 sc = sc_if->msk_softc;
3048 if ((status & Y2_IS_PAR_RD1) != 0) {
3049 device_printf(sc_if->msk_if_dev,
3050 "RAM buffer read parity error\n");
3052 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3055 if ((status & Y2_IS_PAR_WR1) != 0) {
3056 device_printf(sc_if->msk_if_dev,
3057 "RAM buffer write parity error\n");
3059 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3062 if ((status & Y2_IS_PAR_MAC1) != 0) {
3063 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3065 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3068 if ((status & Y2_IS_PAR_RX1) != 0) {
3069 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3071 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3073 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3074 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3076 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3081 mskc_intr_hwerr(struct msk_softc *sc)
3084 uint32_t tlphead[4];
3086 status = CSR_READ_4(sc, B0_HWE_ISRC);
3087 /* Time Stamp timer overflow. */
3088 if ((status & Y2_IS_TIST_OV) != 0)
3089 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3090 if ((status & Y2_IS_PCI_NEXP) != 0) {
3092 * PCI Express Error occured which is not described in PEX
3094 * This error is also mapped either to Master Abort(
3095 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3096 * can only be cleared there.
3098 device_printf(sc->msk_dev,
3099 "PCI Express protocol violation error\n");
3102 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3105 if ((status & Y2_IS_MST_ERR) != 0)
3106 device_printf(sc->msk_dev,
3107 "unexpected IRQ Status error\n");
3109 device_printf(sc->msk_dev,
3110 "unexpected IRQ Master error\n");
3111 /* Reset all bits in the PCI status register. */
3112 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3113 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3114 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3115 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3116 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3117 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3120 /* Check for PCI Express Uncorrectable Error. */
3121 if ((status & Y2_IS_PCI_EXP) != 0) {
3125 * On PCI Express bus bridges are called root complexes (RC).
3126 * PCI Express errors are recognized by the root complex too,
3127 * which requests the system to handle the problem. After
3128 * error occurence it may be that no access to the adapter
3129 * may be performed any longer.
3132 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3133 if ((v32 & PEX_UNSUP_REQ) != 0) {
3134 /* Ignore unsupported request error. */
3136 device_printf(sc->msk_dev,
3137 "Uncorrectable PCI Express error\n");
3140 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3143 /* Get TLP header form Log Registers. */
3144 for (i = 0; i < 4; i++)
3145 tlphead[i] = CSR_PCI_READ_4(sc,
3146 PEX_HEADER_LOG + i * 4);
3147 /* Check for vendor defined broadcast message. */
3148 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3149 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3150 CSR_WRITE_4(sc, B0_HWE_IMSK,
3151 sc->msk_intrhwemask);
3152 CSR_READ_4(sc, B0_HWE_IMSK);
3155 /* Clear the interrupt. */
3156 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3157 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3158 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3161 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3162 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3163 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3164 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3167 static __inline void
3168 msk_rxput(struct msk_if_softc *sc_if)
3170 struct msk_softc *sc;
3172 sc = sc_if->msk_softc;
3174 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3176 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3177 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3178 BUS_DMASYNC_PREWRITE);
3183 sc_if->msk_cdata.msk_rx_ring_tag,
3184 sc_if->msk_cdata.msk_rx_ring_map,
3185 BUS_DMASYNC_PREWRITE);
3187 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3188 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3192 mskc_handle_events(struct msk_softc *sc)
3194 struct msk_if_softc *sc_if;
3196 struct msk_stat_desc *sd;
3197 uint32_t control, status;
3198 int cons, idx, len, port, rxprog;
3199 #ifdef ETHER_INPUT_CHAIN
3200 struct mbuf_chain chain0[MAXCPU];
3202 struct mbuf_chain *chain;
3204 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3205 if (idx == sc->msk_stat_cons)
3208 #ifdef ETHER_INPUT_CHAIN
3210 ether_input_chain_init(chain);
3215 /* Sync status LEs. */
3216 bus_dmamap_sync(sc->msk_stat_tag, sc->msk_stat_map,
3217 BUS_DMASYNC_POSTREAD);
3218 /* XXX Sync Rx LEs here. */
3220 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3223 for (cons = sc->msk_stat_cons; cons != idx;) {
3224 sd = &sc->msk_stat_ring[cons];
3225 control = le32toh(sd->msk_control);
3226 if ((control & HW_OWNER) == 0)
3229 * Marvell's FreeBSD driver updates status LE after clearing
3230 * HW_OWNER. However we don't have a way to sync single LE
3231 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3232 * an entire DMA map. So don't sync LE until we have a better
3235 control &= ~HW_OWNER;
3236 sd->msk_control = htole32(control);
3237 status = le32toh(sd->msk_status);
3238 len = control & STLE_LEN_MASK;
3239 port = (control >> 16) & 0x01;
3240 sc_if = sc->msk_if[port];
3241 if (sc_if == NULL) {
3242 device_printf(sc->msk_dev, "invalid port opcode "
3243 "0x%08x\n", control & STLE_OP_MASK);
3247 switch (control & STLE_OP_MASK) {
3249 sc_if->msk_vtag = ntohs(len);
3252 sc_if->msk_vtag = ntohs(len);
3256 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3257 msk_jumbo_rxeof(sc_if, status, len);
3260 msk_rxeof(sc_if, status, len, chain);
3263 * Because there is no way to sync single Rx LE
3264 * put the DMA sync operation off until the end of
3268 /* Update prefetch unit if we've passed water mark. */
3269 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3275 if (sc->msk_if[MSK_PORT_A] != NULL) {
3276 msk_txeof(sc->msk_if[MSK_PORT_A],
3277 status & STLE_TXA1_MSKL);
3279 if (sc->msk_if[MSK_PORT_B] != NULL) {
3280 msk_txeof(sc->msk_if[MSK_PORT_B],
3281 ((status & STLE_TXA2_MSKL) >>
3283 ((len & STLE_TXA2_MSKH) <<
3288 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3289 control & STLE_OP_MASK);
3292 MSK_INC(cons, MSK_STAT_RING_CNT);
3293 if (rxprog > sc->msk_process_limit)
3297 #ifdef ETHER_INPUT_CHAIN
3299 ether_input_dispatch(chain);
3302 sc->msk_stat_cons = cons;
3303 /* XXX We should sync status LEs here. See above notes. */
3305 if (rxput[MSK_PORT_A] > 0)
3306 msk_rxput(sc->msk_if[MSK_PORT_A]);
3307 if (rxput[MSK_PORT_B] > 0)
3308 msk_rxput(sc->msk_if[MSK_PORT_B]);
3310 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3313 /* Legacy interrupt handler for shared interrupt. */
3315 mskc_intr(void *xsc)
3317 struct msk_softc *sc;
3318 struct msk_if_softc *sc_if0, *sc_if1;
3319 struct ifnet *ifp0, *ifp1;
3323 ASSERT_SERIALIZED(&sc->msk_serializer);
3325 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3326 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3327 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3328 (status & sc->msk_intrmask) == 0) {
3329 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3333 sc_if0 = sc->msk_if[MSK_PORT_A];
3334 sc_if1 = sc->msk_if[MSK_PORT_B];
3337 ifp0 = sc_if0->msk_ifp;
3339 ifp1 = sc_if1->msk_ifp;
3341 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3342 msk_intr_phy(sc_if0);
3343 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3344 msk_intr_phy(sc_if1);
3345 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3346 msk_intr_gmac(sc_if0);
3347 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3348 msk_intr_gmac(sc_if1);
3349 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3350 device_printf(sc->msk_dev, "Rx descriptor error\n");
3351 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3352 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3353 CSR_READ_4(sc, B0_IMSK);
3355 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3356 device_printf(sc->msk_dev, "Tx descriptor error\n");
3357 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3358 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3359 CSR_READ_4(sc, B0_IMSK);
3361 if ((status & Y2_IS_HW_ERR) != 0)
3362 mskc_intr_hwerr(sc);
3364 while (mskc_handle_events(sc) != 0)
3366 if ((status & Y2_IS_STAT_BMU) != 0)
3367 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3369 /* Reenable interrupts. */
3370 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3372 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
3373 !ifq_is_empty(&ifp0->if_snd))
3375 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
3376 !ifq_is_empty(&ifp1->if_snd))
3383 struct msk_if_softc *sc_if = xsc;
3384 struct msk_softc *sc = sc_if->msk_softc;
3385 struct ifnet *ifp = sc_if->msk_ifp;
3386 struct mii_data *mii;
3387 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3391 ASSERT_SERIALIZED(ifp->if_serializer);
3393 mii = device_get_softc(sc_if->msk_miibus);
3396 /* Cancel pending I/O and free all Rx/Tx buffers. */
3399 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
3400 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3401 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3403 * In Yukon EC Ultra, TSO & checksum offload is not
3404 * supported for jumbo frame.
3406 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
3407 ifp->if_capenable &= ~IFCAP_TXCSUM;
3411 * Initialize GMAC first.
3412 * Without this initialization, Rx MAC did not work as expected
3413 * and Rx MAC garbled status LEs and it resulted in out-of-order
3414 * or duplicated frame delivery which in turn showed very poor
3415 * Rx performance.(I had to write a packet analysis code that
3416 * could be embeded in driver to diagnose this issue.)
3417 * I've spent almost 2 months to fix this issue. If I have had
3418 * datasheet for Yukon II I wouldn't have encountered this. :-(
3420 gmac = GM_GPCR_SPEED_100 | GM_GPCR_SPEED_1000 | GM_GPCR_DUP_FULL;
3421 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
3423 /* Dummy read the Interrupt Source Register. */
3424 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3426 /* Set MIB Clear Counter Mode. */
3427 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3429 /* Read all MIB Counters with Clear Mode set. */
3430 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3431 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3432 /* Clear MIB Clear Counter Mode. */
3433 gmac &= ~GM_PAR_MIB_CLR;
3434 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3437 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3439 /* Setup Transmit Control Register. */
3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3442 /* Setup Transmit Flow Control Register. */
3443 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3445 /* Setup Transmit Parameter Register. */
3446 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3447 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3448 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3450 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3451 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3453 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3454 gmac |= GM_SMOD_JUMBO_ENA;
3455 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3457 /* Set station address. */
3458 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3459 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3460 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3462 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3463 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3466 /* Disable interrupts for counter overflows. */
3467 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3468 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3469 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3471 /* Configure Rx MAC FIFO. */
3472 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3473 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
3474 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3475 GMF_OPER_ON | GMF_RX_F_FL_ON);
3477 /* Set promiscuous mode. */
3478 msk_setpromisc(sc_if);
3480 /* Set multicast filter. */
3481 msk_setmulti(sc_if);
3483 /* Flush Rx MAC FIFO on any flow control or error. */
3484 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3487 /* Set Rx FIFO flush threshold to 64 bytes. */
3488 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR),
3491 /* Configure Tx MAC FIFO. */
3492 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3493 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3494 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3496 /* Configure hardware VLAN tag insertion/stripping. */
3497 msk_setvlan(sc_if, ifp);
3499 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3500 /* Set Rx Pause threshould. */
3501 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
3503 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
3505 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) {
3507 * Set Tx GMAC FIFO Almost Empty Threshold.
3509 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3510 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3511 /* Disable Store & Forward mode for Tx. */
3512 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3513 TX_JUMBO_ENA | TX_STFW_DIS);
3515 /* Enable Store & Forward mode for Tx. */
3516 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3517 TX_JUMBO_DIS | TX_STFW_ENA);
3522 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3523 * arbiter as we don't use Sync Tx queue.
3525 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3526 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3527 /* Enable the RAM Interface Arbiter. */
3528 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3530 /* Setup RAM buffer. */
3531 msk_set_rambuffer(sc_if);
3533 /* Disable Tx sync Queue. */
3534 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3536 /* Setup Tx Queue Bus Memory Interface. */
3537 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3538 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3539 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3540 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
3541 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3542 sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3543 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3544 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), MSK_ECU_TXFF_LEV);
3547 /* Setup Rx Queue Bus Memory Interface. */
3548 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3550 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3551 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3552 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3553 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3554 /* MAC Rx RAM Read is controlled by hardware. */
3555 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3558 msk_set_prefetch(sc, sc_if->msk_txq,
3559 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3560 msk_init_tx_ring(sc_if);
3562 /* Disable Rx checksum offload and RSS hash. */
3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3564 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3566 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3567 msk_set_prefetch(sc, sc_if->msk_rxq,
3568 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3569 MSK_JUMBO_RX_RING_CNT - 1);
3570 error = msk_init_jumbo_rx_ring(sc_if);
3574 msk_set_prefetch(sc, sc_if->msk_rxq,
3575 sc_if->msk_rdata.msk_rx_ring_paddr,
3576 MSK_RX_RING_CNT - 1);
3577 error = msk_init_rx_ring(sc_if);
3580 device_printf(sc_if->msk_if_dev,
3581 "initialization failed: no memory for Rx buffers\n");
3586 /* Configure interrupt handling. */
3587 if (sc_if->msk_port == MSK_PORT_A) {
3588 sc->msk_intrmask |= Y2_IS_PORT_A;
3589 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3591 sc->msk_intrmask |= Y2_IS_PORT_B;
3592 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3594 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3595 CSR_READ_4(sc, B0_HWE_IMSK);
3596 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3597 CSR_READ_4(sc, B0_IMSK);
3599 sc_if->msk_link = 0;
3602 mskc_set_imtimer(sc);
3604 ifp->if_flags |= IFF_RUNNING;
3605 ifp->if_flags &= ~IFF_OACTIVE;
3607 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3611 msk_set_rambuffer(struct msk_if_softc *sc_if)
3613 struct msk_softc *sc;
3616 sc = sc_if->msk_softc;
3618 /* Setup Rx Queue. */
3619 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3620 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3621 sc->msk_rxqstart[sc_if->msk_port] / 8);
3622 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3623 sc->msk_rxqend[sc_if->msk_port] / 8);
3624 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3625 sc->msk_rxqstart[sc_if->msk_port] / 8);
3626 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3627 sc->msk_rxqstart[sc_if->msk_port] / 8);
3629 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3630 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3631 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3632 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3633 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3634 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3635 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3636 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3637 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3639 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3640 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3642 /* Setup Tx Queue. */
3643 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3644 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3645 sc->msk_txqstart[sc_if->msk_port] / 8);
3646 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3647 sc->msk_txqend[sc_if->msk_port] / 8);
3648 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3649 sc->msk_txqstart[sc_if->msk_port] / 8);
3650 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3651 sc->msk_txqstart[sc_if->msk_port] / 8);
3652 /* Enable Store & Forward for Tx side. */
3653 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3654 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3655 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3659 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3663 /* Reset the prefetch unit. */
3664 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3666 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3668 /* Set LE base address. */
3669 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3671 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3673 /* Set the list last index. */
3674 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3676 /* Turn on prefetch unit. */
3677 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3679 /* Dummy read to ensure write. */
3680 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3684 msk_stop(struct msk_if_softc *sc_if)
3686 struct msk_softc *sc = sc_if->msk_softc;
3687 struct ifnet *ifp = sc_if->msk_ifp;
3688 struct msk_txdesc *txd;
3689 struct msk_rxdesc *rxd;
3691 struct msk_rxdesc *jrxd;
3696 ASSERT_SERIALIZED(ifp->if_serializer);
3698 callout_stop(&sc_if->msk_tick_ch);
3701 /* Disable interrupts. */
3702 if (sc_if->msk_port == MSK_PORT_A) {
3703 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3704 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3706 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3707 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3709 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3710 CSR_READ_4(sc, B0_HWE_IMSK);
3711 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3712 CSR_READ_4(sc, B0_IMSK);
3714 /* Disable Tx/Rx MAC. */
3715 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3716 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3717 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3718 /* Read again to ensure writing. */
3719 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3722 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3723 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3724 for (i = 0; i < MSK_TIMEOUT; i++) {
3725 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3726 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3728 CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3733 if (i == MSK_TIMEOUT)
3734 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3735 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3736 RB_RST_SET | RB_DIS_OP_MD);
3738 /* Disable all GMAC interrupt. */
3739 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3740 /* Disable PHY interrupt. */
3741 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3743 /* Disable the RAM Interface Arbiter. */
3744 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3746 /* Reset the PCI FIFO of the async Tx queue */
3747 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3748 BMU_RST_SET | BMU_FIFO_RST);
3750 /* Reset the Tx prefetch units. */
3751 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3754 /* Reset the RAM Buffer async Tx queue. */
3755 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3757 /* Reset Tx MAC FIFO. */
3758 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3759 /* Set Pause Off. */
3760 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3763 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3764 * reach the end of packet and since we can't make sure that we have
3765 * incoming data, we must reset the BMU while it is not during a DMA
3766 * transfer. Since it is possible that the Rx path is still active,
3767 * the Rx RAM buffer will be stopped first, so any possible incoming
3768 * data will not trigger a DMA. After the RAM buffer is stopped, the
3769 * BMU is polled until any DMA in progress is ended and only then it
3773 /* Disable the RAM Buffer receive queue. */
3774 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3775 for (i = 0; i < MSK_TIMEOUT; i++) {
3776 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3777 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3781 if (i == MSK_TIMEOUT)
3782 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3783 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3784 BMU_RST_SET | BMU_FIFO_RST);
3785 /* Reset the Rx prefetch unit. */
3786 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3788 /* Reset the RAM Buffer receive queue. */
3789 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3790 /* Reset Rx MAC FIFO. */
3791 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3793 /* Free Rx and Tx mbufs still in the queues. */
3794 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3795 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3796 if (rxd->rx_m != NULL) {
3797 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag,
3798 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3799 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3806 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3807 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3808 if (jrxd->rx_m != NULL) {
3809 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3810 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3811 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3813 m_freem(jrxd->rx_m);
3818 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3819 txd = &sc_if->msk_cdata.msk_txdesc[i];
3820 if (txd->tx_m != NULL) {
3821 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag,
3822 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE);
3823 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3831 * Mark the interface down.
3833 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3834 sc_if->msk_link = 0;
3838 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
3844 value = *(int *)arg1;
3845 error = sysctl_handle_int(oidp, &value, 0, req);
3846 if (error || !req->newptr)
3848 if (value < low || value > high)
3850 *(int *)arg1 = value;
3856 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
3858 return sysctl_int_range(oidp, arg1, arg2, req,
3859 MSK_PROC_MIN, MSK_PROC_MAX);
3863 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3865 struct msk_softc *sc = arg1;
3866 struct lwkt_serialize *serializer = &sc->msk_serializer;
3869 lwkt_serialize_enter(serializer);
3871 v = sc->msk_intr_rate;
3872 error = sysctl_handle_int(oidp, &v, 0, req);
3873 if (error || req->newptr == NULL)
3880 if (sc->msk_intr_rate != v) {
3883 sc->msk_intr_rate = v;
3884 for (i = 0; i < 2; ++i) {
3885 if (sc->msk_if[i] != NULL) {
3886 flag |= sc->msk_if[i]->
3887 arpcom.ac_if.if_flags & IFF_RUNNING;
3891 mskc_set_imtimer(sc);
3894 lwkt_serialize_exit(serializer);
3899 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
3900 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
3902 struct msk_if_softc *sc_if = device_get_softc(dev);
3903 struct msk_dmamap_arg ctx;
3904 bus_dma_segment_t seg;
3907 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,
3909 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
3911 size, 1, BUS_SPACE_MAXSIZE_32BIT,
3914 device_printf(dev, "can't create DMA tag\n");
3918 error = bus_dmamem_alloc(*dtag, addr, BUS_DMA_WAITOK | BUS_DMA_ZERO,
3921 device_printf(dev, "can't allocate DMA mem\n");
3922 bus_dma_tag_destroy(*dtag);
3927 bzero(&ctx, sizeof(ctx));
3930 error = bus_dmamap_load(*dtag, *dmap, *addr, size,
3931 msk_dmamap_cb, &ctx, BUS_DMA_WAITOK);
3933 device_printf(dev, "can't load DMA mem\n");
3934 bus_dmamem_free(*dtag, *addr, *dmap);
3935 bus_dma_tag_destroy(*dtag);
3939 *paddr = seg.ds_addr;
3944 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap)
3947 bus_dmamap_unload(dtag, dmap);
3948 bus_dmamem_free(dtag, addr, dmap);
3949 bus_dma_tag_destroy(dtag);
3954 mskc_set_imtimer(struct msk_softc *sc)
3956 if (sc->msk_intr_rate > 0) {
3958 * XXX myk(4) seems to use 125MHz for EC/FE/XL
3959 * and 78.125MHz for rest of chip types
3961 CSR_WRITE_4(sc, B2_IRQM_INI,
3962 MSK_USECS(sc, 1000000 / sc->msk_intr_rate));
3963 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask);
3964 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START);
3966 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP);