msk(4): Disable ASF on the chips that have it
[dragonfly.git] / sys / dev / netif / msk / if_msk.c
CommitLineData
2d586421
SZ
1/******************************************************************************
2 *
3 * Name : sky2.c
4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x
5 * Version: $Revision: 1.23 $
6 * Date : $Date: 2005/12/22 09:04:11 $
7 * Purpose: Main driver source file
8 *
9 *****************************************************************************/
10
11/******************************************************************************
12 *
13 * LICENSE:
14 * Copyright (C) Marvell International Ltd. and/or its affiliates
15 *
16 * The computer program files contained in this folder ("Files")
17 * are provided to you under the BSD-type license terms provided
18 * below, and any use of such Files and any derivative works
19 * thereof created by you shall be governed by the following terms
20 * and conditions:
21 *
22 * - Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials provided
27 * with the distribution.
28 * - Neither the name of Marvell nor the names of its contributors
29 * may be used to endorse or promote products derived from this
30 * software without specific prior written permission.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 * /LICENSE
45 *
46 *****************************************************************************/
47
48/*-
49 * Copyright (c) 1997, 1998, 1999, 2000
50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
51 *
52 * Redistribution and use in source and binary forms, with or without
53 * modification, are permitted provided that the following conditions
54 * are met:
55 * 1. Redistributions of source code must retain the above copyright
56 * notice, this list of conditions and the following disclaimer.
57 * 2. Redistributions in binary form must reproduce the above copyright
58 * notice, this list of conditions and the following disclaimer in the
59 * documentation and/or other materials provided with the distribution.
60 * 3. All advertising materials mentioning features or use of this software
61 * must display the following acknowledgement:
62 * This product includes software developed by Bill Paul.
63 * 4. Neither the name of the author nor the names of any co-contributors
64 * may be used to endorse or promote products derived from this software
65 * without specific prior written permission.
66 *
67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
77 * THE POSSIBILITY OF SUCH DAMAGE.
78 */
79/*-
80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
81 *
82 * Permission to use, copy, modify, and distribute this software for any
83 * purpose with or without fee is hereby granted, provided that the above
84 * copyright notice and this permission notice appear in all copies.
85 *
86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
93 */
94
95/* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */
2d586421
SZ
96
97/*
98 * Device driver for the Marvell Yukon II Ethernet controller.
99 * Due to lack of documentation, this driver is based on the code from
100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x.
101 */
102
103#include <sys/param.h>
104#include <sys/endian.h>
105#include <sys/kernel.h>
106#include <sys/bus.h>
107#include <sys/in_cksum.h>
9db4b353 108#include <sys/interrupt.h>
2d586421
SZ
109#include <sys/malloc.h>
110#include <sys/proc.h>
111#include <sys/rman.h>
112#include <sys/serialize.h>
113#include <sys/socket.h>
114#include <sys/sockio.h>
115#include <sys/sysctl.h>
116
117#include <net/ethernet.h>
118#include <net/if.h>
119#include <net/bpf.h>
120#include <net/if_arp.h>
121#include <net/if_dl.h>
122#include <net/if_media.h>
123#include <net/ifq_var.h>
124#include <net/vlan/if_vlan_var.h>
125
126#include <netinet/ip.h>
127#include <netinet/ip_var.h>
128
129#include <dev/netif/mii_layer/miivar.h>
130
131#include <bus/pci/pcireg.h>
132#include <bus/pci/pcivar.h>
133
134#include "if_mskreg.h"
135
136/* "device miibus" required. See GENERIC if you get errors here. */
137#include "miibus_if.h"
138
139#define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
140
141/*
142 * Devices supported by this driver.
143 */
144static const struct msk_product {
145 uint16_t msk_vendorid;
146 uint16_t msk_deviceid;
147 const char *msk_name;
148} msk_products[] = {
149 { VENDORID_SK, DEVICEID_SK_YUKON2,
150 "SK-9Sxx Gigabit Ethernet" },
151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR,
152 "SK-9Exx Gigabit Ethernet"},
153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU,
154 "Marvell Yukon 88E8021CU Gigabit Ethernet" },
155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X,
156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" },
157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU,
158 "Marvell Yukon 88E8022CU Gigabit Ethernet" },
159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X,
160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" },
161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU,
162 "Marvell Yukon 88E8061CU Gigabit Ethernet" },
163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X,
164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" },
165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU,
166 "Marvell Yukon 88E8062CU Gigabit Ethernet" },
167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X,
168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" },
169 { VENDORID_MARVELL, DEVICEID_MRVL_8035,
b0968976 170 "Marvell Yukon 88E8035 Fast Ethernet" },
2d586421 171 { VENDORID_MARVELL, DEVICEID_MRVL_8036,
b0968976 172 "Marvell Yukon 88E8036 Fast Ethernet" },
2d586421 173 { VENDORID_MARVELL, DEVICEID_MRVL_8038,
b0968976 174 "Marvell Yukon 88E8038 Fast Ethernet" },
2d586421 175 { VENDORID_MARVELL, DEVICEID_MRVL_8039,
b0968976 176 "Marvell Yukon 88E8039 Fast Ethernet" },
080bd27e
SZ
177 { VENDORID_MARVELL, DEVICEID_MRVL_8040,
178 "Marvell Yukon 88E8040 Fast Ethernet" },
179 { VENDORID_MARVELL, DEVICEID_MRVL_8040T,
180 "Marvell Yukon 88E8040T Fast Ethernet" },
cb658b16
SZ
181 { VENDORID_MARVELL, DEVICEID_MRVL_8042,
182 "Marvell Yukon 88E8042 Fast Ethernet" },
080bd27e
SZ
183 { VENDORID_MARVELL, DEVICEID_MRVL_8048,
184 "Marvell Yukon 88E8048 Fast Ethernet" },
2d586421
SZ
185 { VENDORID_MARVELL, DEVICEID_MRVL_4361,
186 "Marvell Yukon 88E8050 Gigabit Ethernet" },
187 { VENDORID_MARVELL, DEVICEID_MRVL_4360,
188 "Marvell Yukon 88E8052 Gigabit Ethernet" },
189 { VENDORID_MARVELL, DEVICEID_MRVL_4362,
190 "Marvell Yukon 88E8053 Gigabit Ethernet" },
191 { VENDORID_MARVELL, DEVICEID_MRVL_4363,
192 "Marvell Yukon 88E8055 Gigabit Ethernet" },
193 { VENDORID_MARVELL, DEVICEID_MRVL_4364,
194 "Marvell Yukon 88E8056 Gigabit Ethernet" },
d9e919c4
SZ
195 { VENDORID_MARVELL, DEVICEID_MRVL_4365,
196 "Marvell Yukon 88E8070 Gigabit Ethernet" },
2d586421
SZ
197 { VENDORID_MARVELL, DEVICEID_MRVL_436A,
198 "Marvell Yukon 88E8058 Gigabit Ethernet" },
d9e919c4
SZ
199 { VENDORID_MARVELL, DEVICEID_MRVL_436B,
200 "Marvell Yukon 88E8071 Gigabit Ethernet" },
201 { VENDORID_MARVELL, DEVICEID_MRVL_436C,
202 "Marvell Yukon 88E8072 Gigabit Ethernet" },
3b7c5d2c
SZ
203 { VENDORID_MARVELL, DEVICEID_MRVL_4380,
204 "Marvell Yukon 88E8057 Gigabit Ethernet" },
8e916b42
SZ
205 { VENDORID_MARVELL, DEVICEID_MRVL_4381,
206 "Marvell Yukon 88E8059 Gigabit Ethernet" },
2d586421
SZ
207 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX,
208 "D-Link 550SX Gigabit Ethernet" },
209 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T,
210 "D-Link 560T Gigabit Ethernet" },
211 { 0, 0, NULL }
212};
213
214static const char *model_name[] = {
215 "Yukon XL",
080bd27e 216 "Yukon EC Ultra",
d9e919c4 217 "Yukon EX",
080bd27e
SZ
218 "Yukon EC",
219 "Yukon FE",
3b7c5d2c
SZ
220 "Yukon FE+",
221 "Yukon Supreme",
8e916b42
SZ
222 "Yukon Ultra 2",
223 "Yukon Unknown",
224 "Yukon Optima"
2d586421
SZ
225};
226
227static int mskc_probe(device_t);
228static int mskc_attach(device_t);
229static int mskc_detach(device_t);
230static int mskc_shutdown(device_t);
231static int mskc_suspend(device_t);
232static int mskc_resume(device_t);
233static void mskc_intr(void *);
234
235static void mskc_reset(struct msk_softc *);
f59f1081 236static void mskc_set_imtimer(struct msk_softc *);
2d586421
SZ
237static void mskc_intr_hwerr(struct msk_softc *);
238static int mskc_handle_events(struct msk_softc *);
239static void mskc_phy_power(struct msk_softc *, int);
240static int mskc_setup_rambuffer(struct msk_softc *);
241static int mskc_status_dma_alloc(struct msk_softc *);
242static void mskc_status_dma_free(struct msk_softc *);
f59f1081
SZ
243static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS);
244static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS);
245
2d586421
SZ
246static int msk_probe(device_t);
247static int msk_attach(device_t);
248static int msk_detach(device_t);
249static int msk_miibus_readreg(device_t, int, int);
250static int msk_miibus_writereg(device_t, int, int, int);
251static void msk_miibus_statchg(device_t);
252
253static void msk_init(void *);
254static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
255static void msk_start(struct ifnet *);
256static void msk_watchdog(struct ifnet *);
257static int msk_mediachange(struct ifnet *);
258static void msk_mediastatus(struct ifnet *, struct ifmediareq *);
259
260static void msk_tick(void *);
261static void msk_intr_phy(struct msk_if_softc *);
262static void msk_intr_gmac(struct msk_if_softc *);
263static __inline void
264 msk_rxput(struct msk_if_softc *);
265static void msk_handle_hwerr(struct msk_if_softc *, uint32_t);
0ae155c2
SZ
266static void msk_rxeof(struct msk_if_softc *, uint32_t, int,
267 struct mbuf_chain *);
2d586421
SZ
268static void msk_txeof(struct msk_if_softc *, int);
269static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t);
270static void msk_set_rambuffer(struct msk_if_softc *);
271static void msk_stop(struct msk_if_softc *);
272
2d586421
SZ
273static int msk_txrx_dma_alloc(struct msk_if_softc *);
274static void msk_txrx_dma_free(struct msk_if_softc *);
275static int msk_init_rx_ring(struct msk_if_softc *);
276static void msk_init_tx_ring(struct msk_if_softc *);
277static __inline void
278 msk_discard_rxbuf(struct msk_if_softc *, int);
2499c577 279static int msk_newbuf(struct msk_if_softc *, int, int);
2d586421
SZ
280static int msk_encap(struct msk_if_softc *, struct mbuf **);
281
282#ifdef MSK_JUMBO
283static int msk_init_jumbo_rx_ring(struct msk_if_softc *);
284static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int);
285static int msk_jumbo_newbuf(struct msk_if_softc *, int);
286static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
287static void *msk_jalloc(struct msk_if_softc *);
288static void msk_jfree(void *, void *);
289#endif
290
291static int msk_phy_readreg(struct msk_if_softc *, int, int);
292static int msk_phy_writereg(struct msk_if_softc *, int, int, int);
293
dc7303ff 294static void msk_rxfilter(struct msk_if_softc *);
2d586421 295static void msk_setvlan(struct msk_if_softc *, struct ifnet *);
d9e919c4 296static void msk_set_tx_stfwd(struct msk_if_softc *);
2d586421 297
2d586421
SZ
298static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *,
299 void **, bus_addr_t *, bus_dmamap_t *);
300static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t);
301
302static device_method_t mskc_methods[] = {
303 /* Device interface */
304 DEVMETHOD(device_probe, mskc_probe),
305 DEVMETHOD(device_attach, mskc_attach),
306 DEVMETHOD(device_detach, mskc_detach),
307 DEVMETHOD(device_suspend, mskc_suspend),
308 DEVMETHOD(device_resume, mskc_resume),
309 DEVMETHOD(device_shutdown, mskc_shutdown),
310
311 /* bus interface */
312 DEVMETHOD(bus_print_child, bus_generic_print_child),
313 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
314
315 { NULL, NULL }
316};
317
318static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc));
319static devclass_t mskc_devclass;
320
321static device_method_t msk_methods[] = {
322 /* Device interface */
323 DEVMETHOD(device_probe, msk_probe),
324 DEVMETHOD(device_attach, msk_attach),
325 DEVMETHOD(device_detach, msk_detach),
326 DEVMETHOD(device_shutdown, bus_generic_shutdown),
327
328 /* bus interface */
329 DEVMETHOD(bus_print_child, bus_generic_print_child),
330 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
331
332 /* MII interface */
333 DEVMETHOD(miibus_readreg, msk_miibus_readreg),
334 DEVMETHOD(miibus_writereg, msk_miibus_writereg),
335 DEVMETHOD(miibus_statchg, msk_miibus_statchg),
336
337 { NULL, NULL }
338};
339
340static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc));
341static devclass_t msk_devclass;
342
343DECLARE_DUMMY_MODULE(if_msk);
aa2b9d05
SW
344DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL);
345DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL);
346DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL);
2d586421 347
f59f1081
SZ
348static int mskc_intr_rate = 0;
349static int mskc_process_limit = MSK_PROC_DEFAULT;
350
351TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate);
352TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit);
353
2d586421
SZ
354static int
355msk_miibus_readreg(device_t dev, int phy, int reg)
356{
357 struct msk_if_softc *sc_if;
358
359 if (phy != PHY_ADDR_MARV)
360 return (0);
361
362 sc_if = device_get_softc(dev);
363
364 return (msk_phy_readreg(sc_if, phy, reg));
365}
366
367static int
368msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg)
369{
370 struct msk_softc *sc;
371 int i, val;
372
373 sc = sc_if->msk_softc;
374
375 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
376 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
377
378 for (i = 0; i < MSK_TIMEOUT; i++) {
379 DELAY(1);
380 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL);
381 if ((val & GM_SMI_CT_RD_VAL) != 0) {
382 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA);
383 break;
384 }
385 }
386
387 if (i == MSK_TIMEOUT) {
388 if_printf(sc_if->msk_ifp, "phy failed to come ready\n");
389 val = 0;
390 }
391
392 return (val);
393}
394
395static int
396msk_miibus_writereg(device_t dev, int phy, int reg, int val)
397{
398 struct msk_if_softc *sc_if;
399
400 if (phy != PHY_ADDR_MARV)
401 return (0);
402
403 sc_if = device_get_softc(dev);
404
405 return (msk_phy_writereg(sc_if, phy, reg, val));
406}
407
408static int
409msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val)
410{
411 struct msk_softc *sc;
412 int i;
413
414 sc = sc_if->msk_softc;
415
416 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val);
417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL,
418 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg));
419 for (i = 0; i < MSK_TIMEOUT; i++) {
420 DELAY(1);
421 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) &
422 GM_SMI_CT_BUSY) == 0)
423 break;
424 }
425 if (i == MSK_TIMEOUT)
426 if_printf(sc_if->msk_ifp, "phy write timeout\n");
427
428 return (0);
429}
430
431static void
432msk_miibus_statchg(device_t dev)
433{
434 struct msk_if_softc *sc_if;
435 struct msk_softc *sc;
436 struct mii_data *mii;
437 struct ifnet *ifp;
438 uint32_t gmac;
439
440 sc_if = device_get_softc(dev);
441 sc = sc_if->msk_softc;
442
443 mii = device_get_softc(sc_if->msk_miibus);
444 ifp = sc_if->msk_ifp;
445
4992f870
SZ
446 sc_if->msk_link = 0;
447 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) ==
448 (IFM_AVALID | IFM_ACTIVE)) {
449 switch (IFM_SUBTYPE(mii->mii_media_active)) {
450 case IFM_10_T:
451 case IFM_100_TX:
2d586421 452 sc_if->msk_link = 1;
4992f870
SZ
453 break;
454 case IFM_1000_T:
455 case IFM_1000_SX:
456 case IFM_1000_LX:
457 case IFM_1000_CX:
458 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0)
459 sc_if->msk_link = 1;
460 break;
461 }
462 }
2d586421
SZ
463
464 if (sc_if->msk_link != 0) {
465 /* Enable Tx FIFO Underrun. */
466 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK),
467 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR);
468 /*
469 * Because mii(4) notify msk(4) that it detected link status
470 * change, there is no need to enable automatic
471 * speed/flow-control/duplex updates.
472 */
473 gmac = GM_GPCR_AU_ALL_DIS;
474 switch (IFM_SUBTYPE(mii->mii_media_active)) {
475 case IFM_1000_SX:
476 case IFM_1000_T:
477 gmac |= GM_GPCR_SPEED_1000;
478 break;
479 case IFM_100_TX:
480 gmac |= GM_GPCR_SPEED_100;
481 break;
482 case IFM_10_T:
483 break;
484 }
485
8f5842de 486 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)
2d586421 487 gmac |= GM_GPCR_DUP_FULL;
8f5842de
SZ
488 else
489 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS;
2d586421
SZ
490 /* Disable Rx flow control. */
491 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0)
492 gmac |= GM_GPCR_FC_RX_DIS;
493 /* Disable Tx flow control. */
494 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0)
495 gmac |= GM_GPCR_FC_TX_DIS;
496 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
497 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
498 /* Read again to ensure writing. */
499 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
500
8f5842de
SZ
501 gmac = GMC_PAUSE_OFF;
502 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) &&
503 ((mii->mii_media_active & IFM_GMASK) & IFM_FDX))
504 gmac = GMC_PAUSE_ON;
2d586421
SZ
505 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac);
506
507 /* Enable PHY interrupt for FIFO underrun/overflow. */
508 msk_phy_writereg(sc_if, PHY_ADDR_MARV,
509 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR);
510 } else {
511 /*
512 * Link state changed to down.
513 * Disable PHY interrupts.
514 */
515 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
516 /* Disable Rx/Tx MAC. */
517 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
53f9b600
SZ
518 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) {
519 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
520 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac);
521 /* Read again to ensure writing. */
522 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
523 }
2d586421
SZ
524 }
525}
526
527static void
dc7303ff 528msk_rxfilter(struct msk_if_softc *sc_if)
2d586421
SZ
529{
530 struct msk_softc *sc;
531 struct ifnet *ifp;
532 struct ifmultiaddr *ifma;
533 uint32_t mchash[2];
534 uint32_t crc;
535 uint16_t mode;
536
537 sc = sc_if->msk_softc;
538 ifp = sc_if->msk_ifp;
539
540 bzero(mchash, sizeof(mchash));
541 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL);
dc7303ff
SZ
542 if ((ifp->if_flags & IFF_PROMISC) != 0) {
543 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
544 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) {
545 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
546 mchash[0] = 0xffff;
547 mchash[1] = 0xffff;
2d586421 548 } else {
dc7303ff 549 mode |= GM_RXCR_UCF_ENA;
441d34b2 550 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2d586421
SZ
551 if (ifma->ifma_addr->sa_family != AF_LINK)
552 continue;
553 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *)
554 ifma->ifma_addr), ETHER_ADDR_LEN);
555 /* Just want the 6 least significant bits. */
556 crc &= 0x3f;
557 /* Set the corresponding bit in the hash table. */
558 mchash[crc >> 5] |= 1 << (crc & 0x1f);
559 }
dc7303ff
SZ
560 if (mchash[0] != 0 || mchash[1] != 0)
561 mode |= GM_RXCR_MCF_ENA;
2d586421
SZ
562 }
563
564 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1,
565 mchash[0] & 0xffff);
566 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2,
567 (mchash[0] >> 16) & 0xffff);
568 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3,
569 mchash[1] & 0xffff);
570 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4,
571 (mchash[1] >> 16) & 0xffff);
572 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode);
573}
574
575static void
576msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp)
577{
578 struct msk_softc *sc;
579
580 sc = sc_if->msk_softc;
581 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
582 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
583 RX_VLAN_STRIP_ON);
584 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
585 TX_VLAN_TAG_ON);
586 } else {
587 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
588 RX_VLAN_STRIP_OFF);
589 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
590 TX_VLAN_TAG_OFF);
591 }
592}
593
2d586421
SZ
594static int
595msk_init_rx_ring(struct msk_if_softc *sc_if)
596{
597 struct msk_ring_data *rd;
598 struct msk_rxdesc *rxd;
599 int i, prod;
600
601 sc_if->msk_cdata.msk_rx_cons = 0;
602 sc_if->msk_cdata.msk_rx_prod = 0;
603 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
604
605 rd = &sc_if->msk_rdata;
606 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT);
607 prod = sc_if->msk_cdata.msk_rx_prod;
608 for (i = 0; i < MSK_RX_RING_CNT; i++) {
609 rxd = &sc_if->msk_cdata.msk_rxdesc[prod];
610 rxd->rx_m = NULL;
611 rxd->rx_le = &rd->msk_rx_ring[prod];
2499c577 612 if (msk_newbuf(sc_if, prod, 1) != 0)
2d586421
SZ
613 return (ENOBUFS);
614 MSK_INC(prod, MSK_RX_RING_CNT);
615 }
616
2d586421
SZ
617 /* Update prefetch unit. */
618 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1;
619 CSR_WRITE_2(sc_if->msk_softc,
620 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
621 sc_if->msk_cdata.msk_rx_prod);
622
623 return (0);
624}
625
626#ifdef MSK_JUMBO
627static int
628msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if)
629{
630 struct msk_ring_data *rd;
631 struct msk_rxdesc *rxd;
632 int i, prod;
633
634 MSK_IF_LOCK_ASSERT(sc_if);
635
636 sc_if->msk_cdata.msk_rx_cons = 0;
637 sc_if->msk_cdata.msk_rx_prod = 0;
638 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM;
639
640 rd = &sc_if->msk_rdata;
641 bzero(rd->msk_jumbo_rx_ring,
642 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT);
643 prod = sc_if->msk_cdata.msk_rx_prod;
644 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
645 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod];
646 rxd->rx_m = NULL;
647 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod];
648 if (msk_jumbo_newbuf(sc_if, prod) != 0)
649 return (ENOBUFS);
650 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT);
651 }
652
653 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
654 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
655 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
656
657 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1;
658 CSR_WRITE_2(sc_if->msk_softc,
659 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG),
660 sc_if->msk_cdata.msk_rx_prod);
661
662 return (0);
663}
664#endif
665
666static void
667msk_init_tx_ring(struct msk_if_softc *sc_if)
668{
669 struct msk_ring_data *rd;
670 struct msk_txdesc *txd;
671 int i;
672
673 sc_if->msk_cdata.msk_tx_prod = 0;
674 sc_if->msk_cdata.msk_tx_cons = 0;
675 sc_if->msk_cdata.msk_tx_cnt = 0;
676
677 rd = &sc_if->msk_rdata;
678 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT);
679 for (i = 0; i < MSK_TX_RING_CNT; i++) {
680 txd = &sc_if->msk_cdata.msk_txdesc[i];
681 txd->tx_m = NULL;
682 txd->tx_le = &rd->msk_tx_ring[i];
683 }
2d586421
SZ
684}
685
686static __inline void
687msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx)
688{
689 struct msk_rx_desc *rx_le;
690 struct msk_rxdesc *rxd;
691 struct mbuf *m;
692
693 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
694 m = rxd->rx_m;
695 rx_le = rxd->rx_le;
696 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
697}
698
699#ifdef MSK_JUMBO
700static __inline void
701msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx)
702{
703 struct msk_rx_desc *rx_le;
704 struct msk_rxdesc *rxd;
705 struct mbuf *m;
706
707 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
708 m = rxd->rx_m;
709 rx_le = rxd->rx_le;
710 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER);
711}
712#endif
713
714static int
2499c577 715msk_newbuf(struct msk_if_softc *sc_if, int idx, int init)
2d586421
SZ
716{
717 struct msk_rx_desc *rx_le;
718 struct msk_rxdesc *rxd;
719 struct mbuf *m;
2d586421
SZ
720 bus_dma_segment_t seg;
721 bus_dmamap_t map;
2499c577 722 int error, nseg;
2d586421 723
2499c577 724 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR);
2d586421
SZ
725 if (m == NULL)
726 return (ENOBUFS);
727
728 m->m_len = m->m_pkthdr.len = MCLBYTES;
2a9b20a4
SZ
729 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
730 m_adj(m, ETHER_ALIGN);
2d586421 731
2499c577
SZ
732 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag,
733 sc_if->msk_cdata.msk_rx_sparemap,
734 m, &seg, 1, &nseg, BUS_DMA_NOWAIT);
735 if (error) {
2d586421 736 m_freem(m);
2499c577
SZ
737 if (init)
738 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n");
739 return (error);
2d586421 740 }
2d586421
SZ
741
742 rxd = &sc_if->msk_cdata.msk_rxdesc[idx];
743 if (rxd->rx_m != NULL) {
744 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap,
745 BUS_DMASYNC_POSTREAD);
746 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap);
747 }
2499c577 748
2d586421
SZ
749 map = rxd->rx_dmamap;
750 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap;
751 sc_if->msk_cdata.msk_rx_sparemap = map;
2499c577 752
2d586421
SZ
753 rxd->rx_m = m;
754 rx_le = rxd->rx_le;
755 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr));
2499c577 756 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER);
2d586421
SZ
757
758 return (0);
759}
760
761#ifdef MSK_JUMBO
762static int
763msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
764{
765 struct msk_rx_desc *rx_le;
766 struct msk_rxdesc *rxd;
767 struct mbuf *m;
768 bus_dma_segment_t segs[1];
769 bus_dmamap_t map;
770 int nsegs;
771 void *buf;
772
773 MGETHDR(m, M_DONTWAIT, MT_DATA);
774 if (m == NULL)
775 return (ENOBUFS);
776 buf = msk_jalloc(sc_if);
777 if (buf == NULL) {
778 m_freem(m);
779 return (ENOBUFS);
780 }
781 /* Attach the buffer to the mbuf. */
782 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0,
783 EXT_NET_DRV);
784 if ((m->m_flags & M_EXT) == 0) {
785 m_freem(m);
786 return (ENOBUFS);
787 }
788 m->m_pkthdr.len = m->m_len = MSK_JLEN;
789 m_adj(m, ETHER_ALIGN);
790
791 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag,
792 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs,
793 BUS_DMA_NOWAIT) != 0) {
794 m_freem(m);
795 return (ENOBUFS);
796 }
797 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
798
799 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx];
800 if (rxd->rx_m != NULL) {
801 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
802 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
803 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
804 rxd->rx_dmamap);
805 }
806 map = rxd->rx_dmamap;
807 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap;
808 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map;
809 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap,
810 BUS_DMASYNC_PREREAD);
811 rxd->rx_m = m;
812 rx_le = rxd->rx_le;
813 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr));
814 rx_le->msk_control =
815 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER);
816
817 return (0);
818}
819#endif
820
821/*
822 * Set media options.
823 */
824static int
825msk_mediachange(struct ifnet *ifp)
826{
827 struct msk_if_softc *sc_if = ifp->if_softc;
828 struct mii_data *mii;
18804a77 829 int error;
2d586421
SZ
830
831 mii = device_get_softc(sc_if->msk_miibus);
18804a77 832 error = mii_mediachg(mii);
2d586421 833
18804a77 834 return (error);
2d586421
SZ
835}
836
837/*
838 * Report current media status.
839 */
840static void
841msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
842{
843 struct msk_if_softc *sc_if = ifp->if_softc;
844 struct mii_data *mii;
845
846 mii = device_get_softc(sc_if->msk_miibus);
847 mii_pollstat(mii);
848
849 ifmr->ifm_active = mii->mii_media_active;
850 ifmr->ifm_status = mii->mii_media_status;
851}
852
853static int
854msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr)
855{
856 struct msk_if_softc *sc_if;
857 struct ifreq *ifr;
858 struct mii_data *mii;
859 int error, mask;
860
861 sc_if = ifp->if_softc;
862 ifr = (struct ifreq *)data;
863 error = 0;
864
865 switch(command) {
866 case SIOCSIFMTU:
867#ifdef MSK_JUMBO
868 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) {
869 error = EINVAL;
870 break;
871 }
872 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE &&
873 ifr->ifr_mtu > MSK_MAX_FRAMELEN) {
874 error = EINVAL;
875 break;
876 }
877 ifp->if_mtu = ifr->ifr_mtu;
878 if ((ifp->if_flags & IFF_RUNNING) != 0)
879 msk_init(sc_if);
880#else
881 error = EOPNOTSUPP;
882#endif
883 break;
884
885 case SIOCSIFFLAGS:
886 if (ifp->if_flags & IFF_UP) {
887 if (ifp->if_flags & IFF_RUNNING) {
888 if (((ifp->if_flags ^ sc_if->msk_if_flags)
dc7303ff
SZ
889 & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
890 msk_rxfilter(sc_if);
2d586421
SZ
891 } else {
892 if (sc_if->msk_detach == 0)
893 msk_init(sc_if);
894 }
895 } else {
896 if (ifp->if_flags & IFF_RUNNING)
897 msk_stop(sc_if);
898 }
899 sc_if->msk_if_flags = ifp->if_flags;
900 break;
901
902 case SIOCADDMULTI:
903 case SIOCDELMULTI:
904 if (ifp->if_flags & IFF_RUNNING)
dc7303ff 905 msk_rxfilter(sc_if);
2d586421
SZ
906 break;
907
908 case SIOCGIFMEDIA:
909 case SIOCSIFMEDIA:
910 mii = device_get_softc(sc_if->msk_miibus);
911 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
912 break;
913
914 case SIOCSIFCAP:
915 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
916 if ((mask & IFCAP_TXCSUM) != 0) {
917 ifp->if_capenable ^= IFCAP_TXCSUM;
918 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 &&
919 (IFCAP_TXCSUM & ifp->if_capabilities) != 0)
920 ifp->if_hwassist |= MSK_CSUM_FEATURES;
921 else
922 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
923 }
924#ifdef notyet
925 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) {
926 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
927 msk_setvlan(sc_if, ifp);
928 }
929#endif
930
931 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
932 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
933 /*
934 * In Yukon EC Ultra, TSO & checksum offload is not
935 * supported for jumbo frame.
936 */
937 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
938 ifp->if_capenable &= ~IFCAP_TXCSUM;
939 }
940 break;
941
942 default:
943 error = ether_ioctl(ifp, command, data);
944 break;
945 }
946
947 return (error);
948}
949
950static int
951mskc_probe(device_t dev)
952{
953 const struct msk_product *mp;
954 uint16_t vendor, devid;
955
956 vendor = pci_get_vendor(dev);
957 devid = pci_get_device(dev);
958 for (mp = msk_products; mp->msk_name != NULL; ++mp) {
959 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) {
960 device_set_desc(dev, mp->msk_name);
961 return (0);
962 }
963 }
964 return (ENXIO);
965}
966
967static int
968mskc_setup_rambuffer(struct msk_softc *sc)
969{
970 int next;
971 int i;
2d586421
SZ
972
973 /* Get adapter SRAM size. */
2a9b20a4 974 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4;
2d586421
SZ
975 if (bootverbose) {
976 device_printf(sc->msk_dev,
977 "RAM buffer size : %dKB\n", sc->msk_ramsize);
978 }
2a9b20a4
SZ
979 if (sc->msk_ramsize == 0)
980 return (0);
981 sc->msk_pflags |= MSK_FLAG_RAMBUF;
982
2d586421
SZ
983 /*
984 * Give receiver 2/3 of memory and round down to the multiple
985 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple
986 * of 1024.
987 */
988 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024);
989 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize;
990 for (i = 0, next = 0; i < sc->msk_num_port; i++) {
991 sc->msk_rxqstart[i] = next;
992 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1;
993 next = sc->msk_rxqend[i] + 1;
994 sc->msk_txqstart[i] = next;
995 sc->msk_txqend[i] = next + sc->msk_txqsize - 1;
996 next = sc->msk_txqend[i] + 1;
997 if (bootverbose) {
998 device_printf(sc->msk_dev,
999 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i,
1000 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i],
1001 sc->msk_rxqend[i]);
1002 device_printf(sc->msk_dev,
1003 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i,
1004 sc->msk_txqsize / 1024, sc->msk_txqstart[i],
1005 sc->msk_txqend[i]);
1006 }
1007 }
1008
1009 return (0);
1010}
1011
1012static void
1013mskc_phy_power(struct msk_softc *sc, int mode)
1014{
60ad6a1f 1015 uint32_t our, val;
2d586421
SZ
1016 int i;
1017
1018 switch (mode) {
1019 case MSK_PHY_POWERUP:
1020 /* Switch power to VCC (WA for VAUX problem). */
1021 CSR_WRITE_1(sc, B0_POWER_CTRL,
1022 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
1023 /* Disable Core Clock Division, set Clock Select to 0. */
1024 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
1025
1026 val = 0;
1027 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1028 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1029 /* Enable bits are inverted. */
1030 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1031 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1032 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1033 }
1034 /*
1035 * Enable PCI & Core Clock, enable clock gating for both Links.
1036 */
1037 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1038
60ad6a1f
SZ
1039 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
1040 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
d9e919c4 1041 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
e7e20cee
SZ
1042 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1043 /* Deassert Low Power for 1st PHY. */
60ad6a1f 1044 our |= PCI_Y2_PHY1_COMA;
e7e20cee 1045 if (sc->msk_num_port > 1)
60ad6a1f 1046 our |= PCI_Y2_PHY2_COMA;
e7e20cee 1047 }
d9e919c4 1048 }
60ad6a1f
SZ
1049 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U ||
1050 sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1051 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) {
1052 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4);
1053 val &= (PCI_FORCE_ASPM_REQUEST |
1054 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY |
1055 PCI_ASPM_CLKRUN_REQUEST);
2d586421 1056 /* Set all bits to 0 except bits 15..12. */
60ad6a1f
SZ
1057 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val);
1058 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5);
1059 val &= PCI_CTL_TIM_VMAIN_AV_MSK;
1060 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val);
21437cae 1061 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0);
60ad6a1f 1062 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON);
d9e919c4
SZ
1063 /*
1064 * Disable status race, workaround for
1065 * Yukon EC Ultra & Yukon EX.
1066 */
1067 val = CSR_READ_4(sc, B2_GP_IO);
1068 val |= GLB_GPIO_STAT_RACE_DIS;
1069 CSR_WRITE_4(sc, B2_GP_IO, val);
1070 CSR_READ_4(sc, B2_GP_IO);
2d586421 1071 }
60ad6a1f
SZ
1072 /* Release PHY from PowerDown/COMA mode. */
1073 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our);
1074
2d586421
SZ
1075 for (i = 0; i < sc->msk_num_port; i++) {
1076 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1077 GMLC_RST_SET);
1078 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL),
1079 GMLC_RST_CLR);
1080 }
1081 break;
1082 case MSK_PHY_POWERDOWN:
21437cae 1083 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
2d586421
SZ
1084 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD;
1085 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1086 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1087 val &= ~PCI_Y2_PHY1_COMA;
1088 if (sc->msk_num_port > 1)
1089 val &= ~PCI_Y2_PHY2_COMA;
1090 }
21437cae 1091 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
2d586421
SZ
1092
1093 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
1094 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
1095 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS;
1096 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1097 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) {
1098 /* Enable bits are inverted. */
1099 val = 0;
1100 }
1101 /*
1102 * Disable PCI & Core Clock, disable clock gating for
1103 * both Links.
1104 */
1105 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val);
1106 CSR_WRITE_1(sc, B0_POWER_CTRL,
1107 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
1108 break;
1109 default:
1110 break;
1111 }
1112}
1113
1114static void
1115mskc_reset(struct msk_softc *sc)
1116{
1117 bus_addr_t addr;
1118 uint16_t status;
1119 uint32_t val;
1120 int i;
1121
2d586421 1122 /* Disable ASF. */
39f451b4
SZ
1123 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL &&
1124 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) {
1125 if (sc->msk_hw_id == CHIP_ID_YUKON_EX ||
1126 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) {
1127 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1128 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR);
1129 /* Clear AHB bridge & microcontroller reset. */
1130 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST |
1131 Y2_ASF_HCU_CCSR_CPU_RST_MODE);
1132 /* Clear ASF microcontroller state. */
1133 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK;
1134 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK;
1135 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status);
1136 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0);
1137 } else {
1138 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
1139 }
1140 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE);
1141 /*
1142 * Since we disabled ASF, S/W reset is required for
1143 * Power Management.
1144 */
1145 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1146 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
2d586421 1147 }
2d586421
SZ
1148
1149 /* Clear all error bits in the PCI status register. */
1150 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
1151 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1152
1153 pci_write_config(sc->msk_dev, PCIR_STATUS, status |
1154 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
1155 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
1156 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR);
1157
1158 switch (sc->msk_bustype) {
1159 case MSK_PEX_BUS:
1160 /* Clear all PEX errors. */
1161 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
1162 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
1163 if ((val & PEX_RX_OV) != 0) {
1164 sc->msk_intrmask &= ~Y2_IS_HW_ERR;
1165 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
1166 }
1167 break;
1168 case MSK_PCI_BUS:
1169 case MSK_PCIX_BUS:
1170 /* Set Cache Line Size to 2(8bytes) if configured to 0. */
1171 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1);
1172 if (val == 0)
1173 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1);
1174 if (sc->msk_bustype == MSK_PCIX_BUS) {
1175 /* Set Cache Line Size opt. */
21437cae 1176 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1);
2d586421 1177 val |= PCI_CLS_OPT;
21437cae 1178 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val);
2d586421
SZ
1179 }
1180 break;
1181 }
1182 /* Set PHY power state. */
1183 mskc_phy_power(sc, MSK_PHY_POWERUP);
1184
1185 /* Reset GPHY/GMAC Control */
1186 for (i = 0; i < sc->msk_num_port; i++) {
1187 /* GPHY Control reset. */
86d08fa7
SZ
1188 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET);
1189 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR);
2d586421
SZ
1190 /* GMAC Control reset. */
1191 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET);
1192 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR);
1193 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF);
d9e919c4
SZ
1194 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
1195 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL),
1196 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
1197 GMC_BYP_RETR_ON);
1198 }
2d586421 1199 }
8e916b42
SZ
1200 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) {
1201 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */
1202 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080);
1203 }
2d586421
SZ
1204 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1205
1206 /* LED On. */
1207 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON);
1208
1209 /* Clear TWSI IRQ. */
1210 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ);
1211
1212 /* Turn off hardware timer. */
1213 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP);
1214 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ);
1215
1216 /* Turn off descriptor polling. */
1217 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP);
1218
1219 /* Turn off time stamps. */
1220 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP);
1221 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
1222
02eb431e
SZ
1223 if (sc->msk_hw_id == CHIP_ID_YUKON_XL ||
1224 sc->msk_hw_id == CHIP_ID_YUKON_EC ||
1225 sc->msk_hw_id == CHIP_ID_YUKON_FE) {
1226 /* Configure timeout values. */
1227 for (i = 0; i < sc->msk_num_port; i++) {
1228 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1229 RI_RST_SET);
1230 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL),
1231 RI_RST_CLR);
1232 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1),
1233 MSK_RI_TO_53);
1234 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1),
1235 MSK_RI_TO_53);
1236 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1),
1237 MSK_RI_TO_53);
1238 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1),
1239 MSK_RI_TO_53);
1240 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1),
1241 MSK_RI_TO_53);
1242 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1),
1243 MSK_RI_TO_53);
1244 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2),
1245 MSK_RI_TO_53);
1246 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2),
1247 MSK_RI_TO_53);
1248 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2),
1249 MSK_RI_TO_53);
1250 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2),
1251 MSK_RI_TO_53);
1252 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2),
1253 MSK_RI_TO_53);
1254 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2),
1255 MSK_RI_TO_53);
1256 }
2d586421
SZ
1257 }
1258
1259 /* Disable all interrupts. */
1260 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1261 CSR_READ_4(sc, B0_HWE_IMSK);
1262 CSR_WRITE_4(sc, B0_IMSK, 0);
1263 CSR_READ_4(sc, B0_IMSK);
1264
1265 /*
1266 * On dual port PCI-X card, there is an problem where status
1267 * can be received out of order due to split transactions.
1268 */
78c8aca9 1269 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) {
2d586421 1270 uint16_t pcix_cmd;
2d586421 1271
78c8aca9
SZ
1272 pcix_cmd = pci_read_config(sc->msk_dev,
1273 sc->msk_pcixcap + PCIXR_COMMAND, 2);
2d586421 1274 /* Clear Max Outstanding Split Transactions. */
78c8aca9 1275 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS;
2d586421 1276 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
78c8aca9
SZ
1277 pci_write_config(sc->msk_dev,
1278 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2);
2d586421 1279 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
78c8aca9
SZ
1280 }
1281 if (sc->msk_pciecap != 0) {
1282 /* Change Max. Read Request Size to 2048 bytes. */
1283 if (pcie_get_max_readrq(sc->msk_dev) ==
1284 PCIEM_DEVCTL_MAX_READRQ_512) {
1285 pcie_set_max_readrq(sc->msk_dev,
1286 PCIEM_DEVCTL_MAX_READRQ_2048);
2d586421
SZ
1287 }
1288 }
1289
1290 /* Clear status list. */
1291 bzero(sc->msk_stat_ring,
1292 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT);
1293 sc->msk_stat_cons = 0;
2d586421
SZ
1294 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET);
1295 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR);
1296 /* Set the status list base address. */
1297 addr = sc->msk_stat_ring_paddr;
1298 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr));
1299 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr));
1300 /* Set the status list last index. */
1301 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1);
1302 if (sc->msk_hw_id == CHIP_ID_YUKON_EC &&
1303 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) {
1304 /* WA for dev. #4.3 */
1305 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
1306 /* WA for dev. #4.18 */
1307 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21);
1308 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07);
1309 } else {
1310 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a);
1311 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10);
1312 if (sc->msk_hw_id == CHIP_ID_YUKON_XL &&
1313 sc->msk_hw_rev == CHIP_REV_YU_XL_A0)
1314 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04);
1315 else
1316 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10);
1317 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190);
1318 }
1319 /*
1320 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI.
1321 */
1322 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000));
1323
1324 /* Enable status unit. */
1325 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON);
1326
1327 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START);
1328 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START);
1329 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START);
1330}
1331
1332static int
1333msk_probe(device_t dev)
1334{
1335 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1336 char desc[100];
1337
1338 /*
1339 * Not much to do here. We always know there will be
1340 * at least one GMAC present, and if there are two,
1341 * mskc_attach() will create a second device instance
1342 * for us.
1343 */
1344 ksnprintf(desc, sizeof(desc),
1345 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
1346 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
1347 sc->msk_hw_rev);
1348 device_set_desc_copy(dev, desc);
1349
1350 return (0);
1351}
1352
1353static int
1354msk_attach(device_t dev)
1355{
1356 struct msk_softc *sc = device_get_softc(device_get_parent(dev));
1357 struct msk_if_softc *sc_if = device_get_softc(dev);
1358 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1359 int i, port, error;
1360 uint8_t eaddr[ETHER_ADDR_LEN];
1361
1362 port = *(int *)device_get_ivars(dev);
1363 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B);
1364
1365 kfree(device_get_ivars(dev), M_DEVBUF);
1366 device_set_ivars(dev, NULL);
1367
1368 callout_init(&sc_if->msk_tick_ch);
1369 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1370
1371 sc_if->msk_if_dev = dev;
1372 sc_if->msk_port = port;
1373 sc_if->msk_softc = sc;
1374 sc_if->msk_ifp = ifp;
2a9b20a4 1375 sc_if->msk_flags = sc->msk_pflags;
2d586421
SZ
1376 sc->msk_if[port] = sc_if;
1377
1378 /* Setup Tx/Rx queue register offsets. */
1379 if (port == MSK_PORT_A) {
1380 sc_if->msk_txq = Q_XA1;
1381 sc_if->msk_txsq = Q_XS1;
1382 sc_if->msk_rxq = Q_R1;
1383 } else {
1384 sc_if->msk_txq = Q_XA2;
1385 sc_if->msk_txsq = Q_XS2;
1386 sc_if->msk_rxq = Q_R2;
1387 }
1388
1389 error = msk_txrx_dma_alloc(sc_if);
1390 if (error)
1391 goto fail;
1392
1393 ifp->if_softc = sc_if;
1394 ifp->if_mtu = ETHERMTU;
1395 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1396 ifp->if_init = msk_init;
1397 ifp->if_ioctl = msk_ioctl;
1398 ifp->if_start = msk_start;
1399 ifp->if_watchdog = msk_watchdog;
1400 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1);
1401 ifq_set_ready(&ifp->if_snd);
1402
1403#ifdef notyet
1404 /*
1405 * IFCAP_RXCSUM capability is intentionally disabled as the hardware
1406 * has serious bug in Rx checksum offload for all Yukon II family
1407 * hardware. It seems there is a workaround to make it work somtimes.
1408 * However, the workaround also have to check OP code sequences to
1409 * verify whether the OP code is correct. Sometimes it should compute
1410 * IP/TCP/UDP checksum in driver in order to verify correctness of
1411 * checksum computed by hardware. If you have to compute checksum
1412 * with software to verify the hardware's checksum why have hardware
1413 * compute the checksum? I think there is no reason to spend time to
1414 * make Rx checksum offload work on Yukon II hardware.
1415 */
1416 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU |
1417 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
1418 ifp->if_hwassist = MSK_CSUM_FEATURES;
1419 ifp->if_capenable = ifp->if_capabilities;
1420#endif
1421
1422 /*
1423 * Get station address for this interface. Note that
1424 * dual port cards actually come with three station
1425 * addresses: one for each port, plus an extra. The
1426 * extra one is used by the SysKonnect driver software
1427 * as a 'virtual' station address for when both ports
1428 * are operating in failover mode. Currently we don't
1429 * use this extra address.
1430 */
1431 for (i = 0; i < ETHER_ADDR_LEN; i++)
1432 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i);
1433
1434 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
1435
1436 /*
1437 * Do miibus setup.
1438 */
1439 error = mii_phy_probe(dev, &sc_if->msk_miibus,
1440 msk_mediachange, msk_mediastatus);
1441 if (error) {
1442 device_printf(sc_if->msk_if_dev, "no PHY found!\n");
1443 goto fail;
1444 }
1445
1446 /*
1447 * Call MI attach routine. Can't hold locks when calling into ether_*.
1448 */
1449 ether_ifattach(ifp, eaddr, &sc->msk_serializer);
1450#if 0
1451 /*
1452 * Tell the upper layer(s) we support long frames.
1453 * Must appear after the call to ether_ifattach() because
1454 * ether_ifattach() sets ifi_hdrlen to the default value.
1455 */
1456 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
1457#endif
1458
1459 return 0;
1460fail:
1461 msk_detach(dev);
1462 sc->msk_if[port] = NULL;
1463 return (error);
1464}
1465
1466/*
1467 * Attach the interface. Allocate softc structures, do ifmedia
1468 * setup and ethernet/BPF attach.
1469 */
1470static int
1471mskc_attach(device_t dev)
1472{
1473 struct msk_softc *sc;
9db4b353 1474 int error, *port, cpuid;
2d586421
SZ
1475
1476 sc = device_get_softc(dev);
1477 sc->msk_dev = dev;
1478 lwkt_serialize_init(&sc->msk_serializer);
1479
f59f1081
SZ
1480 /*
1481 * Initailize sysctl variables
1482 */
1483 sc->msk_process_limit = mskc_process_limit;
1484 sc->msk_intr_rate = mskc_intr_rate;
1485
2d586421
SZ
1486#ifndef BURN_BRIDGES
1487 /*
1488 * Handle power management nonsense.
1489 */
1490 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
1491 uint32_t irq, bar0, bar1;
1492
1493 /* Save important PCI config data. */
1494 bar0 = pci_read_config(dev, PCIR_BAR(0), 4);
1495 bar1 = pci_read_config(dev, PCIR_BAR(1), 4);
1496 irq = pci_read_config(dev, PCIR_INTLINE, 4);
1497
1498 /* Reset the power state. */
1499 device_printf(dev, "chip is in D%d power mode "
1500 "-- setting to D0\n", pci_get_powerstate(dev));
1501
1502 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
1503
1504 /* Restore PCI config data. */
1505 pci_write_config(dev, PCIR_BAR(0), bar0, 4);
1506 pci_write_config(dev, PCIR_BAR(1), bar1, 4);
1507 pci_write_config(dev, PCIR_INTLINE, irq, 4);
1508 }
1509#endif /* BURN_BRIDGES */
1510
1511 /*
1512 * Map control/status registers.
1513 */
1514 pci_enable_busmaster(dev);
1515
1516 /*
1517 * Allocate I/O resource
1518 */
1519#ifdef MSK_USEIOSPACE
1520 sc->msk_res_type = SYS_RES_IOPORT;
1521 sc->msk_res_rid = PCIR_BAR(1);
1522#else
1523 sc->msk_res_type = SYS_RES_MEMORY;
1524 sc->msk_res_rid = PCIR_BAR(0);
1525#endif
1526 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1527 &sc->msk_res_rid, RF_ACTIVE);
1528 if (sc->msk_res == NULL) {
1529 if (sc->msk_res_type == SYS_RES_MEMORY) {
1530 sc->msk_res_type = SYS_RES_IOPORT;
1531 sc->msk_res_rid = PCIR_BAR(1);
1532 } else {
1533 sc->msk_res_type = SYS_RES_MEMORY;
1534 sc->msk_res_rid = PCIR_BAR(0);
1535 }
1536 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type,
1537 &sc->msk_res_rid,
1538 RF_ACTIVE);
1539 if (sc->msk_res == NULL) {
1540 device_printf(dev, "couldn't allocate %s resources\n",
1541 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O");
1542 return (ENXIO);
1543 }
1544 }
1545 sc->msk_res_bt = rman_get_bustag(sc->msk_res);
1546 sc->msk_res_bh = rman_get_bushandle(sc->msk_res);
1547
1548 /*
1549 * Allocate IRQ
1550 */
1551 sc->msk_irq_rid = 0;
1552 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1553 &sc->msk_irq_rid,
1554 RF_SHAREABLE | RF_ACTIVE);
1555 if (sc->msk_irq == NULL) {
1556 device_printf(dev, "couldn't allocate IRQ resources\n");
1557 error = ENXIO;
1558 goto fail;
1559 }
1560
60ad6a1f
SZ
1561 /* Enable all clocks before accessing any registers. */
1562 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
1563
2d586421
SZ
1564 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR);
1565 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID);
1566 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f;
1567 /* Bail out if chip is not recognized. */
1568 if (sc->msk_hw_id < CHIP_ID_YUKON_XL ||
8e916b42
SZ
1569 sc->msk_hw_id > CHIP_ID_YUKON_OPT ||
1570 sc->msk_hw_id == CHIP_ID_YUKON_SUPR ||
1571 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) {
2d586421
SZ
1572 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n",
1573 sc->msk_hw_id, sc->msk_hw_rev);
1574 error = ENXIO;
1575 goto fail;
1576 }
1577
f59f1081
SZ
1578 /*
1579 * Create sysctl tree
1580 */
1581 sysctl_ctx_init(&sc->msk_sysctl_ctx);
1582 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx,
1583 SYSCTL_STATIC_CHILDREN(_hw),
1584 OID_AUTO,
1585 device_get_nameunit(dev),
1586 CTLFLAG_RD, 0, "");
1587 if (sc->msk_sysctl_tree == NULL) {
1588 device_printf(dev, "can't add sysctl node\n");
1589 error = ENXIO;
1590 goto fail;
2d586421 1591 }
f59f1081
SZ
1592
1593 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1594 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1595 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW,
1596 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit,
1597 "I", "max number of Rx events to process");
1598 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx,
1599 SYSCTL_CHILDREN(sc->msk_sysctl_tree),
1600 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW,
1601 sc, 0, mskc_sysctl_intr_rate,
1602 "I", "max number of interrupt per second");
5bda51d4
SZ
1603 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1604 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1605 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided,
1606 0, "# of avoided m_defrag on TX path");
1607 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1608 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1609 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied,
1610 0, "# of leading copies on TX path");
1611 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx,
1612 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO,
1613 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied,
1614 0, "# of trailing copies on TX path");
2d586421 1615
2d586421
SZ
1616 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP);
1617 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
1618 sc->msk_coppertype = 0;
1619 else
1620 sc->msk_coppertype = 1;
1621 /* Check number of MACs. */
1622 sc->msk_num_port = 1;
1623 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) ==
1624 CFG_DUAL_MAC_MSK) {
1625 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
1626 sc->msk_num_port++;
1627 }
1628
1629 /* Check bus type. */
78c8aca9 1630 if (pci_is_pcie(sc->msk_dev) == 0) {
2d586421 1631 sc->msk_bustype = MSK_PEX_BUS;
78c8aca9
SZ
1632 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev);
1633 } else if (pci_is_pcix(sc->msk_dev) == 0) {
2d586421 1634 sc->msk_bustype = MSK_PCIX_BUS;
78c8aca9
SZ
1635 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev);
1636 } else {
2d586421 1637 sc->msk_bustype = MSK_PCI_BUS;
78c8aca9 1638 }
2d586421
SZ
1639
1640 switch (sc->msk_hw_id) {
1641 case CHIP_ID_YUKON_EC:
1642 case CHIP_ID_YUKON_EC_U:
1643 sc->msk_clock = 125; /* 125 Mhz */
1644 break;
d9e919c4
SZ
1645 case CHIP_ID_YUKON_EX:
1646 sc->msk_clock = 125; /* 125 Mhz */
1647 break;
2d586421
SZ
1648 case CHIP_ID_YUKON_FE:
1649 sc->msk_clock = 100; /* 100 Mhz */
793a2c89 1650 sc->msk_pflags |= MSK_FLAG_FASTETHER;
2d586421 1651 break;
080bd27e
SZ
1652 case CHIP_ID_YUKON_FE_P:
1653 sc->msk_clock = 50; /* 50 Mhz */
1654 /* DESCV2 */
1655 sc->msk_pflags |= MSK_FLAG_FASTETHER;
1656 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
1657 /*
1658 * XXX
1659 * FE+ A0 has status LE writeback bug so msk(4)
1660 * does not rely on status word of received frame
1661 * in msk_rxeof() which in turn disables all
1662 * hardware assistance bits reported by the status
1663 * word as well as validity of the recevied frame.
1664 * Just pass received frames to upper stack with
1665 * minimal test and let upper stack handle them.
1666 */
1667 sc->msk_pflags |= MSK_FLAG_NORXCHK;
1668 }
1669 break;
2d586421
SZ
1670 case CHIP_ID_YUKON_XL:
1671 sc->msk_clock = 156; /* 156 Mhz */
1672 break;
3b7c5d2c
SZ
1673 case CHIP_ID_YUKON_UL_2:
1674 sc->msk_clock = 125; /* 125 Mhz */
1675 break;
8e916b42
SZ
1676 case CHIP_ID_YUKON_OPT:
1677 sc->msk_clock = 125; /* 125 MHz */
1678 break;
2d586421
SZ
1679 default:
1680 sc->msk_clock = 156; /* 156 Mhz */
1681 break;
1682 }
1683
1684 error = mskc_status_dma_alloc(sc);
1685 if (error)
1686 goto fail;
1687
1688 /* Set base interrupt mask. */
1689 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU;
1690 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR |
1691 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP;
1692
1693 /* Reset the adapter. */
1694 mskc_reset(sc);
1695
1696 error = mskc_setup_rambuffer(sc);
1697 if (error)
1698 goto fail;
1699
1700 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
1701 if (sc->msk_devs[MSK_PORT_A] == NULL) {
1702 device_printf(dev, "failed to add child for PORT_A\n");
1703 error = ENXIO;
1704 goto fail;
1705 }
1706 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1707 *port = MSK_PORT_A;
1708 device_set_ivars(sc->msk_devs[MSK_PORT_A], port);
1709
1710 if (sc->msk_num_port > 1) {
1711 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
1712 if (sc->msk_devs[MSK_PORT_B] == NULL) {
1713 device_printf(dev, "failed to add child for PORT_B\n");
1714 error = ENXIO;
1715 goto fail;
1716 }
1717 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK);
1718 *port = MSK_PORT_B;
1719 device_set_ivars(sc->msk_devs[MSK_PORT_B], port);
1720 }
1721
1722 bus_generic_attach(dev);
1723
1724 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE,
1725 mskc_intr, sc, &sc->msk_intrhand,
1726 &sc->msk_serializer);
1727 if (error) {
1728 device_printf(dev, "couldn't set up interrupt handler\n");
1729 goto fail;
1730 }
9db4b353
SZ
1731
1732 cpuid = ithread_cpuid(rman_get_start(sc->msk_irq));
1733 KKASSERT(cpuid >= 0 && cpuid < ncpus);
1734
1735 if (sc->msk_if[0] != NULL)
1736 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid;
1737 if (sc->msk_if[1] != NULL)
1738 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid;
2d586421
SZ
1739 return 0;
1740fail:
1741 mskc_detach(dev);
1742 return (error);
1743}
1744
1745/*
1746 * Shutdown hardware and free up resources. This can be called any
1747 * time after the mutex has been initialized. It is called in both
1748 * the error case in attach and the normal detach case so it needs
1749 * to be careful about only freeing resources that have actually been
1750 * allocated.
1751 */
1752static int
1753msk_detach(device_t dev)
1754{
1755 struct msk_if_softc *sc_if = device_get_softc(dev);
1756
1757 if (device_is_attached(dev)) {
1758 struct msk_softc *sc = sc_if->msk_softc;
1759 struct ifnet *ifp = &sc_if->arpcom.ac_if;
1760
1761 lwkt_serialize_enter(ifp->if_serializer);
1762
1763 if (sc->msk_intrhand != NULL) {
1764 if (sc->msk_if[MSK_PORT_A] != NULL)
1765 msk_stop(sc->msk_if[MSK_PORT_A]);
1766 if (sc->msk_if[MSK_PORT_B] != NULL)
1767 msk_stop(sc->msk_if[MSK_PORT_B]);
1768
1769 bus_teardown_intr(sc->msk_dev, sc->msk_irq,
1770 sc->msk_intrhand);
1771 sc->msk_intrhand = NULL;
1772 }
1773
1774 lwkt_serialize_exit(ifp->if_serializer);
1775
1776 ether_ifdetach(ifp);
1777 }
1778
1779 if (sc_if->msk_miibus != NULL)
1780 device_delete_child(dev, sc_if->msk_miibus);
1781
1782 msk_txrx_dma_free(sc_if);
1783 return (0);
1784}
1785
1786static int
1787mskc_detach(device_t dev)
1788{
1789 struct msk_softc *sc = device_get_softc(dev);
1790 int *port, i;
1791
1792#ifdef INVARIANTS
1793 if (device_is_attached(dev)) {
1794 KASSERT(sc->msk_intrhand == NULL,
1795 ("intr is not torn down yet\n"));
1796 }
1797#endif
1798
1799 for (i = 0; i < sc->msk_num_port; ++i) {
1800 if (sc->msk_devs[i] != NULL) {
1801 port = device_get_ivars(sc->msk_devs[i]);
1802 if (port != NULL) {
1803 kfree(port, M_DEVBUF);
1804 device_set_ivars(sc->msk_devs[i], NULL);
1805 }
1806 device_delete_child(dev, sc->msk_devs[i]);
1807 }
1808 }
1809
1810 /* Disable all interrupts. */
1811 CSR_WRITE_4(sc, B0_IMSK, 0);
1812 CSR_READ_4(sc, B0_IMSK);
1813 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
1814 CSR_READ_4(sc, B0_HWE_IMSK);
1815
1816 /* LED Off. */
1817 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF);
1818
1819 /* Put hardware reset. */
1820 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
1821
1822 mskc_status_dma_free(sc);
1823
1824 if (sc->msk_irq != NULL) {
1825 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid,
1826 sc->msk_irq);
1827 }
1828 if (sc->msk_res != NULL) {
1829 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid,
1830 sc->msk_res);
1831 }
1832
f59f1081
SZ
1833 if (sc->msk_sysctl_tree != NULL)
1834 sysctl_ctx_free(&sc->msk_sysctl_ctx);
1835
2d586421
SZ
1836 return (0);
1837}
1838
2d586421
SZ
1839/* Create status DMA region. */
1840static int
1841mskc_status_dma_alloc(struct msk_softc *sc)
1842{
c78f83cb 1843 bus_dmamem_t dmem;
2d586421
SZ
1844 int error;
1845
c78f83cb
SZ
1846 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0,
1847 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
1848 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem);
2d586421
SZ
1849 if (error) {
1850 device_printf(sc->msk_dev,
c78f83cb
SZ
1851 "failed to create status coherent DMA memory\n");
1852 return error;
2d586421 1853 }
c78f83cb
SZ
1854 sc->msk_stat_tag = dmem.dmem_tag;
1855 sc->msk_stat_map = dmem.dmem_map;
1856 sc->msk_stat_ring = dmem.dmem_addr;
1857 sc->msk_stat_ring_paddr = dmem.dmem_busaddr;
2d586421
SZ
1858
1859 return (0);
1860}
1861
1862static void
1863mskc_status_dma_free(struct msk_softc *sc)
1864{
1865 /* Destroy status block. */
1866 if (sc->msk_stat_tag) {
1867 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map);
1868 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring,
1869 sc->msk_stat_map);
1870 bus_dma_tag_destroy(sc->msk_stat_tag);
1871 sc->msk_stat_tag = NULL;
1872 }
1873}
1874
1875static int
1876msk_txrx_dma_alloc(struct msk_if_softc *sc_if)
1877{
1878 int error, i, j;
1879#ifdef MSK_JUMBO
1880 struct msk_rxdesc *jrxd;
1881 struct msk_jpool_entry *entry;
1882 uint8_t *ptr;
1883#endif
2a9b20a4 1884 bus_size_t rxalign;
2d586421
SZ
1885
1886 /* Create parent DMA tag. */
1887 /*
1888 * XXX
1889 * It seems that Yukon II supports full 64bits DMA operations. But
1890 * it needs two descriptors(list elements) for 64bits DMA operations.
1891 * Since we don't know what DMA address mappings(32bits or 64bits)
1892 * would be used in advance for each mbufs, we limits its DMA space
1893 * to be in range of 32bits address space. Otherwise, we should check
1894 * what DMA address is used and chain another descriptor for the
1895 * 64bits DMA operation. This also means descriptor ring size is
1896 * variable. Limiting DMA address to be in 32bit address space greatly
1897 * simplyfies descriptor handling and possibly would increase
1898 * performance a bit due to efficient handling of descriptors.
1899 * Apart from harassing checksum offloading mechanisms, it seems
1900 * it's really bad idea to use a seperate descriptor for 64bit
1901 * DMA operation to save small descriptor memory. Anyway, I've
1902 * never seen these exotic scheme on ethernet interface hardware.
1903 */
1904 error = bus_dma_tag_create(
1905 NULL, /* parent */
1906 1, 0, /* alignment, boundary */
1907 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1908 BUS_SPACE_MAXADDR, /* highaddr */
1909 NULL, NULL, /* filter, filterarg */
1910 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1911 0, /* nsegments */
1912 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1913 0, /* flags */
1914 &sc_if->msk_cdata.msk_parent_tag);
1915 if (error) {
1916 device_printf(sc_if->msk_if_dev,
1917 "failed to create parent DMA tag\n");
1918 return error;
1919 }
1920
1921 /* Create DMA stuffs for Tx ring. */
1922 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ,
1923 &sc_if->msk_cdata.msk_tx_ring_tag,
da44240f 1924 (void *)&sc_if->msk_rdata.msk_tx_ring,
2d586421
SZ
1925 &sc_if->msk_rdata.msk_tx_ring_paddr,
1926 &sc_if->msk_cdata.msk_tx_ring_map);
1927 if (error) {
1928 device_printf(sc_if->msk_if_dev,
1929 "failed to create TX ring DMA stuffs\n");
1930 return error;
1931 }
1932
1933 /* Create DMA stuffs for Rx ring. */
1934 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ,
1935 &sc_if->msk_cdata.msk_rx_ring_tag,
da44240f 1936 (void *)&sc_if->msk_rdata.msk_rx_ring,
2d586421
SZ
1937 &sc_if->msk_rdata.msk_rx_ring_paddr,
1938 &sc_if->msk_cdata.msk_rx_ring_map);
1939 if (error) {
1940 device_printf(sc_if->msk_if_dev,
1941 "failed to create RX ring DMA stuffs\n");
1942 return error;
1943 }
1944
1945 /* Create tag for Tx buffers. */
1946 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
1947 1, 0, /* alignment, boundary */
1948 BUS_SPACE_MAXADDR, /* lowaddr */
1949 BUS_SPACE_MAXADDR, /* highaddr */
1950 NULL, NULL, /* filter, filterarg */
ad3a1ee4 1951 MSK_JUMBO_FRAMELEN, /* maxsize */
2d586421 1952 MSK_MAXTXSEGS, /* nsegments */
ad3a1ee4
SZ
1953 MSK_MAXSGSIZE, /* maxsegsize */
1954 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK |
1955 BUS_DMA_ONEBPAGE, /* flags */
2d586421
SZ
1956 &sc_if->msk_cdata.msk_tx_tag);
1957 if (error) {
1958 device_printf(sc_if->msk_if_dev,
1959 "failed to create Tx DMA tag\n");
1960 return error;
1961 }
1962
1963 /* Create DMA maps for Tx buffers. */
1964 for (i = 0; i < MSK_TX_RING_CNT; i++) {
1965 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i];
1966
ad3a1ee4
SZ
1967 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag,
1968 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE,
1969 &txd->tx_dmamap);
2d586421
SZ
1970 if (error) {
1971 device_printf(sc_if->msk_if_dev,
1972 "failed to create %dth Tx dmamap\n", i);
1973
1974 for (j = 0; j < i; ++j) {
1975 txd = &sc_if->msk_cdata.msk_txdesc[j];
1976 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
1977 txd->tx_dmamap);
1978 }
1979 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
1980 sc_if->msk_cdata.msk_tx_tag = NULL;
1981
1982 return error;
1983 }
1984 }
1985
2a9b20a4
SZ
1986 /*
1987 * Workaround hardware hang which seems to happen when Rx buffer
1988 * is not aligned on multiple of FIFO word(8 bytes).
1989 */
1990 if (sc_if->msk_flags & MSK_FLAG_RAMBUF)
1991 rxalign = MSK_RX_BUF_ALIGN;
1992 else
1993 rxalign = 1;
1994
2d586421
SZ
1995 /* Create tag for Rx buffers. */
1996 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2a9b20a4 1997 rxalign, 0, /* alignment, boundary */
2d586421
SZ
1998 BUS_SPACE_MAXADDR, /* lowaddr */
1999 BUS_SPACE_MAXADDR, /* highaddr */
2000 NULL, NULL, /* filter, filterarg */
2001 MCLBYTES, /* maxsize */
2002 1, /* nsegments */
2003 MCLBYTES, /* maxsegsize */
2a9b20a4
SZ
2004 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED |
2005 BUS_DMA_WAITOK, /* flags */
2d586421
SZ
2006 &sc_if->msk_cdata.msk_rx_tag);
2007 if (error) {
2008 device_printf(sc_if->msk_if_dev,
2009 "failed to create Rx DMA tag\n");
2010 return error;
2011 }
2012
2013 /* Create DMA maps for Rx buffers. */
ad3a1ee4 2014 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK,
2d586421
SZ
2015 &sc_if->msk_cdata.msk_rx_sparemap);
2016 if (error) {
2017 device_printf(sc_if->msk_if_dev,
2018 "failed to create spare Rx dmamap\n");
2019 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2020 sc_if->msk_cdata.msk_rx_tag = NULL;
2021 return error;
2022 }
2023 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2024 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2025
ad3a1ee4
SZ
2026 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag,
2027 BUS_DMA_WAITOK, &rxd->rx_dmamap);
2d586421
SZ
2028 if (error) {
2029 device_printf(sc_if->msk_if_dev,
2030 "failed to create %dth Rx dmamap\n", i);
2031
2032 for (j = 0; j < i; ++j) {
2033 rxd = &sc_if->msk_cdata.msk_rxdesc[j];
2034 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2035 rxd->rx_dmamap);
2036 }
7f582564
SZ
2037 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2038 sc_if->msk_cdata.msk_rx_sparemap);
2d586421
SZ
2039 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2040 sc_if->msk_cdata.msk_rx_tag = NULL;
2041
2042 return error;
2043 }
2044 }
2045
2046#ifdef MSK_JUMBO
2047 SLIST_INIT(&sc_if->msk_jfree_listhead);
2048 SLIST_INIT(&sc_if->msk_jinuse_listhead);
2049
2050 /* Create tag for jumbo Rx ring. */
2051 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2052 MSK_RING_ALIGN, 0, /* alignment, boundary */
2053 BUS_SPACE_MAXADDR, /* lowaddr */
2054 BUS_SPACE_MAXADDR, /* highaddr */
2055 NULL, NULL, /* filter, filterarg */
2056 MSK_JUMBO_RX_RING_SZ, /* maxsize */
2057 1, /* nsegments */
2058 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */
2059 0, /* flags */
2060 NULL, NULL, /* lockfunc, lockarg */
2061 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2062 if (error != 0) {
2063 device_printf(sc_if->msk_if_dev,
2064 "failed to create jumbo Rx ring DMA tag\n");
2065 goto fail;
2066 }
2067
2068 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */
2069 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2070 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring,
2071 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2072 &sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2073 if (error != 0) {
2074 device_printf(sc_if->msk_if_dev,
2075 "failed to allocate DMA'able memory for jumbo Rx ring\n");
2076 goto fail;
2077 }
2078
2079 ctx.msk_busaddr = 0;
2080 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2081 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
2082 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ,
2083 msk_dmamap_cb, &ctx, 0);
2084 if (error != 0) {
2085 device_printf(sc_if->msk_if_dev,
2086 "failed to load DMA'able memory for jumbo Rx ring\n");
2087 goto fail;
2088 }
2089 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr;
2090
2091 /* Create tag for jumbo buffer blocks. */
2092 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2093 PAGE_SIZE, 0, /* alignment, boundary */
2094 BUS_SPACE_MAXADDR, /* lowaddr */
2095 BUS_SPACE_MAXADDR, /* highaddr */
2096 NULL, NULL, /* filter, filterarg */
2097 MSK_JMEM, /* maxsize */
2098 1, /* nsegments */
2099 MSK_JMEM, /* maxsegsize */
2100 0, /* flags */
2101 NULL, NULL, /* lockfunc, lockarg */
2102 &sc_if->msk_cdata.msk_jumbo_tag);
2103 if (error != 0) {
2104 device_printf(sc_if->msk_if_dev,
2105 "failed to create jumbo Rx buffer block DMA tag\n");
2106 goto fail;
2107 }
2108
2109 /* Create tag for jumbo Rx buffers. */
2110 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */
2111 PAGE_SIZE, 0, /* alignment, boundary */
2112 BUS_SPACE_MAXADDR, /* lowaddr */
2113 BUS_SPACE_MAXADDR, /* highaddr */
2114 NULL, NULL, /* filter, filterarg */
2115 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */
2116 MSK_MAXRXSEGS, /* nsegments */
2117 MSK_JLEN, /* maxsegsize */
2118 0, /* flags */
2119 NULL, NULL, /* lockfunc, lockarg */
2120 &sc_if->msk_cdata.msk_jumbo_rx_tag);
2121 if (error != 0) {
2122 device_printf(sc_if->msk_if_dev,
2123 "failed to create jumbo Rx DMA tag\n");
2124 goto fail;
2125 }
2126
2127 /* Create DMA maps for jumbo Rx buffers. */
2128 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2129 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) {
2130 device_printf(sc_if->msk_if_dev,
2131 "failed to create spare jumbo Rx dmamap\n");
2132 goto fail;
2133 }
2134 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2135 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2136 jrxd->rx_m = NULL;
2137 jrxd->rx_dmamap = NULL;
2138 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0,
2139 &jrxd->rx_dmamap);
2140 if (error != 0) {
2141 device_printf(sc_if->msk_if_dev,
2142 "failed to create jumbo Rx dmamap\n");
2143 goto fail;
2144 }
2145 }
2146
2147 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */
2148 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag,
2149 (void **)&sc_if->msk_rdata.msk_jumbo_buf,
2150 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2151 &sc_if->msk_cdata.msk_jumbo_map);
2152 if (error != 0) {
2153 device_printf(sc_if->msk_if_dev,
2154 "failed to allocate DMA'able memory for jumbo buf\n");
2155 goto fail;
2156 }
2157
2158 ctx.msk_busaddr = 0;
2159 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag,
2160 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf,
2161 MSK_JMEM, msk_dmamap_cb, &ctx, 0);
2162 if (error != 0) {
2163 device_printf(sc_if->msk_if_dev,
2164 "failed to load DMA'able memory for jumbobuf\n");
2165 goto fail;
2166 }
2167 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr;
2168
2169 /*
2170 * Now divide it up into 9K pieces and save the addresses
2171 * in an array.
2172 */
2173 ptr = sc_if->msk_rdata.msk_jumbo_buf;
2174 for (i = 0; i < MSK_JSLOTS; i++) {
2175 sc_if->msk_cdata.msk_jslots[i] = ptr;
2176 ptr += MSK_JLEN;
2177 entry = malloc(sizeof(struct msk_jpool_entry),
2178 M_DEVBUF, M_WAITOK);
2179 if (entry == NULL) {
2180 device_printf(sc_if->msk_if_dev,
2181 "no memory for jumbo buffers!\n");
2182 error = ENOMEM;
2183 goto fail;
2184 }
2185 entry->slot = i;
2186 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2187 jpool_entries);
2188 }
2189#endif
2190 return 0;
2191}
2192
2193static void
2194msk_txrx_dma_free(struct msk_if_softc *sc_if)
2195{
2196 struct msk_txdesc *txd;
2197 struct msk_rxdesc *rxd;
2198#ifdef MSK_JUMBO
2199 struct msk_rxdesc *jrxd;
2200 struct msk_jpool_entry *entry;
2201#endif
2202 int i;
2203
2204#ifdef MSK_JUMBO
2205 MSK_JLIST_LOCK(sc_if);
2206 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) {
2207 device_printf(sc_if->msk_if_dev,
2208 "asked to free buffer that is in use!\n");
2209 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2210 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry,
2211 jpool_entries);
2212 }
2213
2214 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) {
2215 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2216 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2217 free(entry, M_DEVBUF);
2218 }
2219 MSK_JLIST_UNLOCK(sc_if);
2220
2221 /* Destroy jumbo buffer block. */
2222 if (sc_if->msk_cdata.msk_jumbo_map)
2223 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag,
2224 sc_if->msk_cdata.msk_jumbo_map);
2225
2226 if (sc_if->msk_rdata.msk_jumbo_buf) {
2227 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag,
2228 sc_if->msk_rdata.msk_jumbo_buf,
2229 sc_if->msk_cdata.msk_jumbo_map);
2230 sc_if->msk_rdata.msk_jumbo_buf = NULL;
2231 sc_if->msk_cdata.msk_jumbo_map = NULL;
2232 }
2233
2234 /* Jumbo Rx ring. */
2235 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) {
2236 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map)
2237 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2238 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2239 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map &&
2240 sc_if->msk_rdata.msk_jumbo_rx_ring)
2241 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
2242 sc_if->msk_rdata.msk_jumbo_rx_ring,
2243 sc_if->msk_cdata.msk_jumbo_rx_ring_map);
2244 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL;
2245 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL;
2246 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag);
2247 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL;
2248 }
2249
2250 /* Jumbo Rx buffers. */
2251 if (sc_if->msk_cdata.msk_jumbo_rx_tag) {
2252 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
2253 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
2254 if (jrxd->rx_dmamap) {
2255 bus_dmamap_destroy(
2256 sc_if->msk_cdata.msk_jumbo_rx_tag,
2257 jrxd->rx_dmamap);
2258 jrxd->rx_dmamap = NULL;
2259 }
2260 }
2261 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) {
2262 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag,
2263 sc_if->msk_cdata.msk_jumbo_rx_sparemap);
2264 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0;
2265 }
2266 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag);
2267 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL;
2268 }
2269#endif
2270
2271 /* Tx ring. */
2272 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag,
2273 sc_if->msk_rdata.msk_tx_ring,
2274 sc_if->msk_cdata.msk_tx_ring_map);
2275
2276 /* Rx ring. */
2277 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag,
2278 sc_if->msk_rdata.msk_rx_ring,
2279 sc_if->msk_cdata.msk_rx_ring_map);
2280
2281 /* Tx buffers. */
2282 if (sc_if->msk_cdata.msk_tx_tag) {
2283 for (i = 0; i < MSK_TX_RING_CNT; i++) {
2284 txd = &sc_if->msk_cdata.msk_txdesc[i];
2285 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag,
2286 txd->tx_dmamap);
2287 }
2288 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag);
2289 sc_if->msk_cdata.msk_tx_tag = NULL;
2290 }
2291
2292 /* Rx buffers. */
2293 if (sc_if->msk_cdata.msk_rx_tag) {
2294 for (i = 0; i < MSK_RX_RING_CNT; i++) {
2295 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
2296 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2297 rxd->rx_dmamap);
2298 }
2299 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag,
2300 sc_if->msk_cdata.msk_rx_sparemap);
2301 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag);
2302 sc_if->msk_cdata.msk_rx_tag = NULL;
2303 }
2304
2305 if (sc_if->msk_cdata.msk_parent_tag) {
2306 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag);
2307 sc_if->msk_cdata.msk_parent_tag = NULL;
2308 }
2309}
2310
2311#ifdef MSK_JUMBO
2312/*
2313 * Allocate a jumbo buffer.
2314 */
2315static void *
2316msk_jalloc(struct msk_if_softc *sc_if)
2317{
2318 struct msk_jpool_entry *entry;
2319
2320 MSK_JLIST_LOCK(sc_if);
2321
2322 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead);
2323
2324 if (entry == NULL) {
2325 MSK_JLIST_UNLOCK(sc_if);
2326 return (NULL);
2327 }
2328
2329 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries);
2330 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries);
2331
2332 MSK_JLIST_UNLOCK(sc_if);
2333
2334 return (sc_if->msk_cdata.msk_jslots[entry->slot]);
2335}
2336
2337/*
2338 * Release a jumbo buffer.
2339 */
2340static void
2341msk_jfree(void *buf, void *args)
2342{
2343 struct msk_if_softc *sc_if;
2344 struct msk_jpool_entry *entry;
2345 int i;
2346
2347 /* Extract the softc struct pointer. */
2348 sc_if = (struct msk_if_softc *)args;
2349 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__));
2350
2351 MSK_JLIST_LOCK(sc_if);
2352 /* Calculate the slot this buffer belongs to. */
2353 i = ((vm_offset_t)buf
2354 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN;
2355 KASSERT(i >= 0 && i < MSK_JSLOTS,
2356 ("%s: asked to free buffer that we don't manage!", __func__));
2357
2358 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead);
2359 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__));
2360 entry->slot = i;
2361 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries);
2362 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries);
2363 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead))
2364 wakeup(sc_if);
2365
2366 MSK_JLIST_UNLOCK(sc_if);
2367}
2368#endif
2369
2d586421
SZ
2370static int
2371msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
2372{
2373 struct msk_txdesc *txd, *txd_last;
2374 struct msk_tx_desc *tx_le;
2375 struct mbuf *m;
2376 bus_dmamap_t map;
2d586421
SZ
2377 bus_dma_segment_t txsegs[MSK_MAXTXSEGS];
2378 uint32_t control, prod, si;
2379 uint16_t offset, tcp_offset;
5bda51d4 2380 int error, i, nsegs, maxsegs, defrag;
def0e148
SZ
2381
2382 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt -
2383 MSK_RESERVED_TX_DESC_CNT;
2384 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT,
2385 ("not enough spare TX desc\n"));
2386 if (maxsegs > MSK_MAXTXSEGS)
2387 maxsegs = MSK_MAXTXSEGS;
2d586421 2388
5bda51d4 2389 /*
e71dee4b 2390 * Align TX buffer to 64bytes boundary. This greately improves
5bda51d4
SZ
2391 * bulk data TX performance on my 88E8053 (+100Mbps) at least.
2392 * Try avoiding m_defrag(), if the mbufs are not chained together
2393 * by m_next (i.e. m->m_len == m->m_pkthdr.len).
5bda51d4
SZ
2394 */
2395
e71dee4b 2396#define MSK_TXBUF_ALIGN 64
5bda51d4
SZ
2397#define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1)
2398
2399 defrag = 1;
2d586421 2400 m = *m_head;
5bda51d4
SZ
2401 if (m->m_len == m->m_pkthdr.len) {
2402 int space;
2403
2404 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK);
2405 if (space) {
2406 if (M_WRITABLE(m)) {
2407 if (M_TRAILINGSPACE(m) >= space) {
2408 /* e.g. TCP ACKs */
2409 bcopy(m->m_data, m->m_data + space,
2410 m->m_len);
2411 m->m_data += space;
2412 defrag = 0;
2413 sc_if->msk_softc->msk_trailing_copied++;
2414 } else {
2415 space = MSK_TXBUF_ALIGN - space;
2416 if (M_LEADINGSPACE(m) >= space) {
2417 /* e.g. Small UDP datagrams */
2418 bcopy(m->m_data,
2419 m->m_data - space,
2420 m->m_len);
2421 m->m_data -= space;
2422 defrag = 0;
2423 sc_if->msk_softc->
2424 msk_leading_copied++;
2425 }
2426 }
2427 }
2428 } else {
2429 /* e.g. on forwarding path */
2430 defrag = 0;
2431 }
2432 }
2433 if (defrag) {
2434 m = m_defrag(*m_head, MB_DONTWAIT);
2435 if (m == NULL) {
2436 m_freem(*m_head);
2437 *m_head = NULL;
2438 return ENOBUFS;
2439 }
2440 *m_head = m;
2441 } else {
2442 sc_if->msk_softc->msk_defrag_avoided++;
2443 }
2444
2445#undef MSK_TXBUF_MASK
2446#undef MSK_TXBUF_ALIGN
2447
2448 tcp_offset = offset = 0;
2d586421
SZ
2449 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2450 /*
2451 * Since mbuf has no protocol specific structure information
2452 * in it we have to inspect protocol information here to
2453 * setup TSO and checksum offload. I don't know why Marvell
2454 * made a such decision in chip design because other GigE
2455 * hardwares normally takes care of all these chores in
2456 * hardware. However, TSO performance of Yukon II is very
2457 * good such that it's worth to implement it.
2458 */
2459 struct ether_header *eh;
2460 struct ip *ip;
2461
2462 /* TODO check for M_WRITABLE(m) */
2463
2464 offset = sizeof(struct ether_header);
2465 m = m_pullup(m, offset);
2466 if (m == NULL) {
2467 *m_head = NULL;
2468 return (ENOBUFS);
2469 }
2470 eh = mtod(m, struct ether_header *);
2471 /* Check if hardware VLAN insertion is off. */
2472 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2473 offset = sizeof(struct ether_vlan_header);
2474 m = m_pullup(m, offset);
2475 if (m == NULL) {
2476 *m_head = NULL;
2477 return (ENOBUFS);
2478 }
2479 }
2480 m = m_pullup(m, offset + sizeof(struct ip));
2481 if (m == NULL) {
2482 *m_head = NULL;
2483 return (ENOBUFS);
2484 }
2485 ip = (struct ip *)(mtod(m, char *) + offset);
2486 offset += (ip->ip_hl << 2);
2487 tcp_offset = offset;
2488 /*
2489 * It seems that Yukon II has Tx checksum offload bug for
2490 * small TCP packets that's less than 60 bytes in size
2491 * (e.g. TCP window probe packet, pure ACK packet).
2492 * Common work around like padding with zeros to make the
2493 * frame minimum ethernet frame size didn't work at all.
2494 * Instead of disabling checksum offload completely we
2495 * resort to S/W checksum routine when we encounter short
2496 * TCP frames.
2497 * Short UDP packets appear to be handled correctly by
2498 * Yukon II.
2499 */
2500 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN &&
2501 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) {
2502 uint16_t csum;
2503
2504 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset -
2505 (ip->ip_hl << 2), offset);
2506 *(uint16_t *)(m->m_data + offset +
2507 m->m_pkthdr.csum_data) = csum;
2508 m->m_pkthdr.csum_flags &= ~CSUM_TCP;
2509 }
2510 *m_head = m;
2511 }
2512
2513 prod = sc_if->msk_cdata.msk_tx_prod;
2514 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2515 txd_last = txd;
2516 map = txd->tx_dmamap;
2d586421 2517
def0e148
SZ
2518 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map,
2519 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT);
2520 if (error) {
2521 m_freem(*m_head);
2522 *m_head = NULL;
2523 return error;
2d586421 2524 }
def0e148 2525 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE);
2d586421 2526
def0e148 2527 m = *m_head;
2d586421
SZ
2528 control = 0;
2529 tx_le = NULL;
2530
2531#ifdef notyet
2532 /* Check if we have a VLAN tag to insert. */
2533 if ((m->m_flags & M_VLANTAG) != 0) {
2534 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2535 tx_le->msk_addr = htole32(0);
2536 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER |
2537 htons(m->m_pkthdr.ether_vtag));
2538 sc_if->msk_cdata.msk_tx_cnt++;
2539 MSK_INC(prod, MSK_TX_RING_CNT);
2540 control |= INS_VLAN;
2541 }
2542#endif
2543 /* Check if we have to handle checksum offload. */
2544 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) {
2545 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2546 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data)
2547 & 0xffff) | ((uint32_t)tcp_offset << 16));
2548 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER));
2549 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
2550 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0)
2551 control |= UDPTCP;
2552 sc_if->msk_cdata.msk_tx_cnt++;
2553 MSK_INC(prod, MSK_TX_RING_CNT);
2554 }
2555
2556 si = prod;
2557 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2558 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr));
2559 tx_le->msk_control = htole32(txsegs[0].ds_len | control |
2560 OP_PACKET);
2561 sc_if->msk_cdata.msk_tx_cnt++;
2562 MSK_INC(prod, MSK_TX_RING_CNT);
2563
def0e148 2564 for (i = 1; i < nsegs; i++) {
2d586421
SZ
2565 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2566 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr));
2567 tx_le->msk_control = htole32(txsegs[i].ds_len | control |
2568 OP_BUFFER | HW_OWNER);
2569 sc_if->msk_cdata.msk_tx_cnt++;
2570 MSK_INC(prod, MSK_TX_RING_CNT);
2571 }
2572 /* Update producer index. */
2573 sc_if->msk_cdata.msk_tx_prod = prod;
2574
2575 /* Set EOP on the last desciptor. */
2576 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT;
2577 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod];
2578 tx_le->msk_control |= htole32(EOP);
2579
2580 /* Turn the first descriptor ownership to hardware. */
2581 tx_le = &sc_if->msk_rdata.msk_tx_ring[si];
2582 tx_le->msk_control |= htole32(HW_OWNER);
2583
2584 txd = &sc_if->msk_cdata.msk_txdesc[prod];
2585 map = txd_last->tx_dmamap;
2586 txd_last->tx_dmamap = txd->tx_dmamap;
2587 txd->tx_dmamap = map;
2588 txd->tx_m = m;
2589
2d586421
SZ
2590 return (0);
2591}
2592
2593static void
2594msk_start(struct ifnet *ifp)
2595{
2596 struct msk_if_softc *sc_if;
2597 struct mbuf *m_head;
2598 int enq;
2599
2600 sc_if = ifp->if_softc;
2601
2602 ASSERT_SERIALIZED(ifp->if_serializer);
2603
9db4b353
SZ
2604 if (!sc_if->msk_link) {
2605 ifq_purge(&ifp->if_snd);
2606 return;
2607 }
2608
2609 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
2d586421
SZ
2610 return;
2611
def0e148
SZ
2612 enq = 0;
2613 while (!ifq_is_empty(&ifp->if_snd)) {
2614 if (MSK_IS_OACTIVE(sc_if)) {
2615 ifp->if_flags |= IFF_OACTIVE;
2616 break;
2617 }
2618
2d586421
SZ
2619 m_head = ifq_dequeue(&ifp->if_snd, NULL);
2620 if (m_head == NULL)
2621 break;
2622
2623 /*
2624 * Pack the data into the transmit ring. If we
2625 * don't have room, set the OACTIVE flag and wait
2626 * for the NIC to drain the ring.
2627 */
2628 if (msk_encap(sc_if, &m_head) != 0) {
5bda51d4 2629 ifp->if_oerrors++;
def0e148
SZ
2630 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2631 continue;
2632 } else {
2633 ifp->if_flags |= IFF_OACTIVE;
2d586421 2634 break;
def0e148 2635 }
2d586421 2636 }
def0e148 2637 enq = 1;
2d586421 2638
2d586421
SZ
2639 /*
2640 * If there's a BPF listener, bounce a copy of this frame
2641 * to him.
2642 */
2643 BPF_MTAP(ifp, m_head);
2644 }
2645
def0e148 2646 if (enq) {
2d586421
SZ
2647 /* Transmit */
2648 CSR_WRITE_2(sc_if->msk_softc,
2649 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG),
2650 sc_if->msk_cdata.msk_tx_prod);
2651
2652 /* Set a timeout in case the chip goes out to lunch. */
2653 ifp->if_timer = MSK_TX_TIMEOUT;
2654 }
2655}
2656
2657static void
2658msk_watchdog(struct ifnet *ifp)
2659{
2660 struct msk_if_softc *sc_if = ifp->if_softc;
2661 uint32_t ridx;
2662 int idx;
2663
2664 ASSERT_SERIALIZED(ifp->if_serializer);
2665
2666 if (sc_if->msk_link == 0) {
2667 if (bootverbose)
2668 if_printf(sc_if->msk_ifp, "watchdog timeout "
2669 "(missed link)\n");
2670 ifp->if_oerrors++;
2671 msk_init(sc_if);
2672 return;
2673 }
2674
2675 /*
2676 * Reclaim first as there is a possibility of losing Tx completion
2677 * interrupts.
2678 */
2679 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX;
2680 idx = CSR_READ_2(sc_if->msk_softc, ridx);
2681 if (sc_if->msk_cdata.msk_tx_cons != idx) {
2682 msk_txeof(sc_if, idx);
2683 if (sc_if->msk_cdata.msk_tx_cnt == 0) {
2684 if_printf(ifp, "watchdog timeout (missed Tx interrupts) "
2685 "-- recovering\n");
2686 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 2687 if_devstart(ifp);
2d586421
SZ
2688 return;
2689 }
2690 }
2691
2692 if_printf(ifp, "watchdog timeout\n");
2693 ifp->if_oerrors++;
2694 msk_init(sc_if);
2695 if (!ifq_is_empty(&ifp->if_snd))
9db4b353 2696 if_devstart(ifp);
2d586421
SZ
2697}
2698
2699static int
2700mskc_shutdown(device_t dev)
2701{
2702 struct msk_softc *sc = device_get_softc(dev);
2703 int i;
2704
2705 lwkt_serialize_enter(&sc->msk_serializer);
2706
2707 for (i = 0; i < sc->msk_num_port; i++) {
2708 if (sc->msk_if[i] != NULL)
2709 msk_stop(sc->msk_if[i]);
2710 }
2711
2d586421
SZ
2712 /* Put hardware reset. */
2713 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2714
2715 lwkt_serialize_exit(&sc->msk_serializer);
2716 return (0);
2717}
2718
2719static int
2720mskc_suspend(device_t dev)
2721{
2722 struct msk_softc *sc = device_get_softc(dev);
2723 int i;
2724
2725 lwkt_serialize_enter(&sc->msk_serializer);
2726
2727 for (i = 0; i < sc->msk_num_port; i++) {
2728 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2729 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0))
2730 msk_stop(sc->msk_if[i]);
2731 }
2732
2733 /* Disable all interrupts. */
2734 CSR_WRITE_4(sc, B0_IMSK, 0);
2735 CSR_READ_4(sc, B0_IMSK);
2736 CSR_WRITE_4(sc, B0_HWE_IMSK, 0);
2737 CSR_READ_4(sc, B0_HWE_IMSK);
2738
2739 mskc_phy_power(sc, MSK_PHY_POWERDOWN);
2740
2741 /* Put hardware reset. */
2742 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET);
2743 sc->msk_suspended = 1;
2744
2745 lwkt_serialize_exit(&sc->msk_serializer);
2746
2747 return (0);
2748}
2749
2750static int
2751mskc_resume(device_t dev)
2752{
2753 struct msk_softc *sc = device_get_softc(dev);
2754 int i;
2755
2756 lwkt_serialize_enter(&sc->msk_serializer);
2757
60ad6a1f
SZ
2758 /* Enable all clocks before accessing any registers. */
2759 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0);
2d586421
SZ
2760 mskc_reset(sc);
2761 for (i = 0; i < sc->msk_num_port; i++) {
2762 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL &&
2763 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0))
2764 msk_init(sc->msk_if[i]);
2765 }
2766 sc->msk_suspended = 0;
2767
2768 lwkt_serialize_exit(&sc->msk_serializer);
2769
2770 return (0);
2771}
2772
2773static void
0ae155c2
SZ
2774msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len,
2775 struct mbuf_chain *chain)
2d586421
SZ
2776{
2777 struct mbuf *m;
2778 struct ifnet *ifp;
2779 struct msk_rxdesc *rxd;
2780 int cons, rxlen;
2781
2782 ifp = sc_if->msk_ifp;
2783
2784 cons = sc_if->msk_cdata.msk_rx_cons;
2785 do {
2786 rxlen = status >> 16;
2787 if ((status & GMR_FS_VLAN) != 0 &&
2788 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2789 rxlen -= EVL_ENCAPLEN;
080bd27e
SZ
2790 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) {
2791 /*
2792 * For controllers that returns bogus status code
2793 * just do minimal check and let upper stack
2794 * handle this frame.
2795 */
2796 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) {
2797 ifp->if_ierrors++;
2798 msk_discard_rxbuf(sc_if, cons);
2799 break;
2800 }
2801 } else if (len > sc_if->msk_framesize ||
2d586421
SZ
2802 ((status & GMR_FS_ANY_ERR) != 0) ||
2803 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2804 /* Don't count flow-control packet as errors. */
2805 if ((status & GMR_FS_GOOD_FC) == 0)
2806 ifp->if_ierrors++;
2807 msk_discard_rxbuf(sc_if, cons);
2808 break;
2809 }
2810 rxd = &sc_if->msk_cdata.msk_rxdesc[cons];
2811 m = rxd->rx_m;
2499c577 2812 if (msk_newbuf(sc_if, cons, 0) != 0) {
2d586421
SZ
2813 ifp->if_iqdrops++;
2814 /* Reuse old buffer. */
2815 msk_discard_rxbuf(sc_if, cons);
2816 break;
2817 }
2818 m->m_pkthdr.rcvif = ifp;
2819 m->m_pkthdr.len = m->m_len = len;
2820 ifp->if_ipackets++;
2821#ifdef notyet
2822 /* Check for VLAN tagged packets. */
2823 if ((status & GMR_FS_VLAN) != 0 &&
2824 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2825 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2826 m->m_flags |= M_VLANTAG;
2827 }
2828#endif
0ae155c2 2829
2eb0d069 2830 ether_input_chain(ifp, m, NULL, chain);
2d586421
SZ
2831 } while (0);
2832
2833 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT);
2834 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT);
2835}
2836
2837#ifdef MSK_JUMBO
2838static void
2839msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len)
2840{
2841 struct mbuf *m;
2842 struct ifnet *ifp;
2843 struct msk_rxdesc *jrxd;
2844 int cons, rxlen;
2845
2846 ifp = sc_if->msk_ifp;
2847
2848 MSK_IF_LOCK_ASSERT(sc_if);
2849
2850 cons = sc_if->msk_cdata.msk_rx_cons;
2851 do {
2852 rxlen = status >> 16;
2853 if ((status & GMR_FS_VLAN) != 0 &&
2854 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0)
2855 rxlen -= ETHER_VLAN_ENCAP_LEN;
2856 if (len > sc_if->msk_framesize ||
2857 ((status & GMR_FS_ANY_ERR) != 0) ||
2858 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) {
2859 /* Don't count flow-control packet as errors. */
2860 if ((status & GMR_FS_GOOD_FC) == 0)
2861 ifp->if_ierrors++;
2862 msk_discard_jumbo_rxbuf(sc_if, cons);
2863 break;
2864 }
2865 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons];
2866 m = jrxd->rx_m;
2867 if (msk_jumbo_newbuf(sc_if, cons) != 0) {
2868 ifp->if_iqdrops++;
2869 /* Reuse old buffer. */
2870 msk_discard_jumbo_rxbuf(sc_if, cons);
2871 break;
2872 }
2873 m->m_pkthdr.rcvif = ifp;
2874 m->m_pkthdr.len = m->m_len = len;
2875 ifp->if_ipackets++;
2876 /* Check for VLAN tagged packets. */
2877 if ((status & GMR_FS_VLAN) != 0 &&
2878 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
2879 m->m_pkthdr.ether_vtag = sc_if->msk_vtag;
2880 m->m_flags |= M_VLANTAG;
2881 }
2882 MSK_IF_UNLOCK(sc_if);
2883 (*ifp->if_input)(ifp, m);
2884 MSK_IF_LOCK(sc_if);
2885 } while (0);
2886
2887 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT);
2888 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT);
2889}
2890#endif
2891
2892static void
2893msk_txeof(struct msk_if_softc *sc_if, int idx)
2894{
2895 struct msk_txdesc *txd;
2896 struct msk_tx_desc *cur_tx;
2897 struct ifnet *ifp;
2898 uint32_t control;
2899 int cons, prog;
2900
2901 ifp = sc_if->msk_ifp;
2902
2d586421
SZ
2903 /*
2904 * Go through our tx ring and free mbufs for those
2905 * frames that have been sent.
2906 */
2907 cons = sc_if->msk_cdata.msk_tx_cons;
2908 prog = 0;
2909 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) {
2910 if (sc_if->msk_cdata.msk_tx_cnt <= 0)
2911 break;
2912 prog++;
2913 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons];
2914 control = le32toh(cur_tx->msk_control);
2915 sc_if->msk_cdata.msk_tx_cnt--;
2d586421
SZ
2916 if ((control & EOP) == 0)
2917 continue;
2918 txd = &sc_if->msk_cdata.msk_txdesc[cons];
2d586421
SZ
2919 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap);
2920
2921 ifp->if_opackets++;
2922 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!",
2923 __func__));
2924 m_freem(txd->tx_m);
2925 txd->tx_m = NULL;
2926 }
2927
2928 if (prog > 0) {
2929 sc_if->msk_cdata.msk_tx_cons = cons;
def0e148
SZ
2930 if (!MSK_IS_OACTIVE(sc_if))
2931 ifp->if_flags &= ~IFF_OACTIVE;
2d586421
SZ
2932 if (sc_if->msk_cdata.msk_tx_cnt == 0)
2933 ifp->if_timer = 0;
2934 /* No need to sync LEs as we didn't update LEs. */
2935 }
2936}
2937
2938static void
2939msk_tick(void *xsc_if)
2940{
2941 struct msk_if_softc *sc_if = xsc_if;
2942 struct ifnet *ifp = &sc_if->arpcom.ac_if;
2943 struct mii_data *mii;
2944
2945 lwkt_serialize_enter(ifp->if_serializer);
2946
2947 mii = device_get_softc(sc_if->msk_miibus);
2948
2949 mii_tick(mii);
cd237572
SZ
2950 if (!sc_if->msk_link)
2951 msk_miibus_statchg(sc_if->msk_if_dev);
2d586421
SZ
2952 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
2953
2954 lwkt_serialize_exit(ifp->if_serializer);
2955}
2956
2957static void
2958msk_intr_phy(struct msk_if_softc *sc_if)
2959{
2960 uint16_t status;
2961
2962 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2963 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT);
2964 /* Handle FIFO Underrun/Overflow? */
2965 if (status & PHY_M_IS_FIFO_ERROR) {
2966 device_printf(sc_if->msk_if_dev,
2967 "PHY FIFO underrun/overflow.\n");
2968 }
2969}
2970
2971static void
2972msk_intr_gmac(struct msk_if_softc *sc_if)
2973{
2974 struct msk_softc *sc;
2975 uint8_t status;
2976
2977 sc = sc_if->msk_softc;
2978 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
2979
2980 /* GMAC Rx FIFO overrun. */
2981 if ((status & GM_IS_RX_FF_OR) != 0) {
2982 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
2983 GMF_CLI_RX_FO);
2d586421
SZ
2984 }
2985 /* GMAC Tx FIFO underrun. */
2986 if ((status & GM_IS_TX_FF_UR) != 0) {
2987 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
2988 GMF_CLI_TX_FU);
2989 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n");
2990 /*
2991 * XXX
2992 * In case of Tx underrun, we may need to flush/reset
2993 * Tx MAC but that would also require resynchronization
2994 * with status LEs. Reintializing status LEs would
2995 * affect other port in dual MAC configuration so it
2996 * should be avoided as possible as we can.
2997 * Due to lack of documentation it's all vague guess but
2998 * it needs more investigation.
2999 */
3000 }
3001}
3002
3003static void
3004msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status)
3005{
3006 struct msk_softc *sc;
3007
3008 sc = sc_if->msk_softc;
3009 if ((status & Y2_IS_PAR_RD1) != 0) {
3010 device_printf(sc_if->msk_if_dev,
3011 "RAM buffer read parity error\n");
3012 /* Clear IRQ. */
3013 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3014 RI_CLR_RD_PERR);
3015 }
3016 if ((status & Y2_IS_PAR_WR1) != 0) {
3017 device_printf(sc_if->msk_if_dev,
3018 "RAM buffer write parity error\n");
3019 /* Clear IRQ. */
3020 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL),
3021 RI_CLR_WR_PERR);
3022 }
3023 if ((status & Y2_IS_PAR_MAC1) != 0) {
3024 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n");
3025 /* Clear IRQ. */
3026 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3027 GMF_CLI_TX_PE);
3028 }
3029 if ((status & Y2_IS_PAR_RX1) != 0) {
3030 device_printf(sc_if->msk_if_dev, "Rx parity error\n");
3031 /* Clear IRQ. */
3032 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR);
3033 }
3034 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) {
3035 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n");
3036 /* Clear IRQ. */
3037 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP);
3038 }
3039}
3040
3041static void
3042mskc_intr_hwerr(struct msk_softc *sc)
3043{
3044 uint32_t status;
3045 uint32_t tlphead[4];
3046
3047 status = CSR_READ_4(sc, B0_HWE_ISRC);
3048 /* Time Stamp timer overflow. */
3049 if ((status & Y2_IS_TIST_OV) != 0)
3050 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
3051 if ((status & Y2_IS_PCI_NEXP) != 0) {
3052 /*
3053 * PCI Express Error occured which is not described in PEX
3054 * spec.
3055 * This error is also mapped either to Master Abort(
3056 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and
3057 * can only be cleared there.
3058 */
3059 device_printf(sc->msk_dev,
3060 "PCI Express protocol violation error\n");
3061 }
3062
3063 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) {
3064 uint16_t v16;
3065
3066 if ((status & Y2_IS_MST_ERR) != 0)
3067 device_printf(sc->msk_dev,
3068 "unexpected IRQ Status error\n");
3069 else
3070 device_printf(sc->msk_dev,
3071 "unexpected IRQ Master error\n");
3072 /* Reset all bits in the PCI status register. */
3073 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2);
3074 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3075 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 |
3076 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT |
3077 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2);
3078 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3079 }
3080
3081 /* Check for PCI Express Uncorrectable Error. */
3082 if ((status & Y2_IS_PCI_EXP) != 0) {
3083 uint32_t v32;
3084
3085 /*
3086 * On PCI Express bus bridges are called root complexes (RC).
3087 * PCI Express errors are recognized by the root complex too,
3088 * which requests the system to handle the problem. After
3089 * error occurence it may be that no access to the adapter
3090 * may be performed any longer.
3091 */
3092
3093 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT);
3094 if ((v32 & PEX_UNSUP_REQ) != 0) {
3095 /* Ignore unsupported request error. */
3096 if (bootverbose) {
3097 device_printf(sc->msk_dev,
3098 "Uncorrectable PCI Express error\n");
3099 }
3100 }
3101 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) {
3102 int i;
3103
3104 /* Get TLP header form Log Registers. */
3105 for (i = 0; i < 4; i++)
3106 tlphead[i] = CSR_PCI_READ_4(sc,
3107 PEX_HEADER_LOG + i * 4);
3108 /* Check for vendor defined broadcast message. */
3109 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) {
3110 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP;
3111 CSR_WRITE_4(sc, B0_HWE_IMSK,
3112 sc->msk_intrhwemask);
3113 CSR_READ_4(sc, B0_HWE_IMSK);
3114 }
3115 }
3116 /* Clear the interrupt. */
3117 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3118 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff);
3119 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3120 }
3121
3122 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL)
3123 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status);
3124 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL)
3125 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8);
3126}
3127
3128static __inline void
3129msk_rxput(struct msk_if_softc *sc_if)
3130{
3131 struct msk_softc *sc;
3132
3133 sc = sc_if->msk_softc;
3134#ifdef MSK_JUMBO
3135 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3136 bus_dmamap_sync(
3137 sc_if->msk_cdata.msk_jumbo_rx_ring_tag,
3138 sc_if->msk_cdata.msk_jumbo_rx_ring_map,
3139 BUS_DMASYNC_PREWRITE);
2d586421 3140 }
c78f83cb 3141#endif
2d586421
SZ
3142 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq,
3143 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod);
3144}
3145
3146static int
3147mskc_handle_events(struct msk_softc *sc)
3148{
3149 struct msk_if_softc *sc_if;
3150 int rxput[2];
3151 struct msk_stat_desc *sd;
3152 uint32_t control, status;
3153 int cons, idx, len, port, rxprog;
a75a1559 3154 struct mbuf_chain chain[MAXCPU];
2d586421
SZ
3155
3156 idx = CSR_READ_2(sc, STAT_PUT_IDX);
3157 if (idx == sc->msk_stat_cons)
3158 return (0);
3159
0ae155c2 3160 ether_input_chain_init(chain);
0ae155c2 3161
2d586421
SZ
3162 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0;
3163
3164 rxprog = 0;
3165 for (cons = sc->msk_stat_cons; cons != idx;) {
3166 sd = &sc->msk_stat_ring[cons];
3167 control = le32toh(sd->msk_control);
3168 if ((control & HW_OWNER) == 0)
3169 break;
3170 /*
3171 * Marvell's FreeBSD driver updates status LE after clearing
3172 * HW_OWNER. However we don't have a way to sync single LE
3173 * with bus_dma(9) API. bus_dma(9) provides a way to sync
3174 * an entire DMA map. So don't sync LE until we have a better
3175 * way to sync LEs.
3176 */
3177 control &= ~HW_OWNER;
3178 sd->msk_control = htole32(control);
3179 status = le32toh(sd->msk_status);
3180 len = control & STLE_LEN_MASK;
3181 port = (control >> 16) & 0x01;
3182 sc_if = sc->msk_if[port];
3183 if (sc_if == NULL) {
3184 device_printf(sc->msk_dev, "invalid port opcode "
3185 "0x%08x\n", control & STLE_OP_MASK);
3186 continue;
3187 }
3188
3189 switch (control & STLE_OP_MASK) {
3190 case OP_RXVLAN:
3191 sc_if->msk_vtag = ntohs(len);
3192 break;
3193 case OP_RXCHKSVLAN:
3194 sc_if->msk_vtag = ntohs(len);
3195 break;
3196 case OP_RXSTAT:
f308b0ac
SZ
3197 if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0)
3198 break;
2d586421
SZ
3199#ifdef MSK_JUMBO
3200 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN))
3201 msk_jumbo_rxeof(sc_if, status, len);
3202 else
3203#endif
0ae155c2 3204 msk_rxeof(sc_if, status, len, chain);
2d586421
SZ
3205 rxprog++;
3206 /*
3207 * Because there is no way to sync single Rx LE
3208 * put the DMA sync operation off until the end of
3209 * event processing.
3210 */
3211 rxput[port]++;
3212 /* Update prefetch unit if we've passed water mark. */
3213 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) {
3214 msk_rxput(sc_if);
3215 rxput[port] = 0;
3216 }
3217 break;
3218 case OP_TXINDEXLE:
3219 if (sc->msk_if[MSK_PORT_A] != NULL) {
3220 msk_txeof(sc->msk_if[MSK_PORT_A],
3221 status & STLE_TXA1_MSKL);
3222 }
3223 if (sc->msk_if[MSK_PORT_B] != NULL) {
3224 msk_txeof(sc->msk_if[MSK_PORT_B],
3225 ((status & STLE_TXA2_MSKL) >>
3226 STLE_TXA2_SHIFTL) |
3227 ((len & STLE_TXA2_MSKH) <<
3228 STLE_TXA2_SHIFTH));
3229 }
3230 break;
3231 default:
3232 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n",
3233 control & STLE_OP_MASK);
3234 break;
3235 }
3236 MSK_INC(cons, MSK_STAT_RING_CNT);
3237 if (rxprog > sc->msk_process_limit)
3238 break;
3239 }
3240
0ae155c2
SZ
3241 if (rxprog > 0)
3242 ether_input_dispatch(chain);
0ae155c2 3243
2d586421
SZ
3244 sc->msk_stat_cons = cons;
3245 /* XXX We should sync status LEs here. See above notes. */
3246
3247 if (rxput[MSK_PORT_A] > 0)
3248 msk_rxput(sc->msk_if[MSK_PORT_A]);
3249 if (rxput[MSK_PORT_B] > 0)
3250 msk_rxput(sc->msk_if[MSK_PORT_B]);
3251
3252 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX));
3253}
3254
3255/* Legacy interrupt handler for shared interrupt. */
3256static void
3257mskc_intr(void *xsc)
3258{
3259 struct msk_softc *sc;
3260 struct msk_if_softc *sc_if0, *sc_if1;
3261 struct ifnet *ifp0, *ifp1;
3262 uint32_t status;
3263
3264 sc = xsc;
3265 ASSERT_SERIALIZED(&sc->msk_serializer);
3266
3267 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */
3268 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2);
3269 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 ||
3270 (status & sc->msk_intrmask) == 0) {
3271 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3272 return;
3273 }
3274
3275 sc_if0 = sc->msk_if[MSK_PORT_A];
3276 sc_if1 = sc->msk_if[MSK_PORT_B];
3277 ifp0 = ifp1 = NULL;
3278 if (sc_if0 != NULL)
3279 ifp0 = sc_if0->msk_ifp;
3280 if (sc_if1 != NULL)
3281 ifp1 = sc_if1->msk_ifp;
3282
3283 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL)
3284 msk_intr_phy(sc_if0);
3285 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL)
3286 msk_intr_phy(sc_if1);
3287 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL)
3288 msk_intr_gmac(sc_if0);
3289 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL)
3290 msk_intr_gmac(sc_if1);
3291 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) {
3292 device_printf(sc->msk_dev, "Rx descriptor error\n");
3293 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2);
3294 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3295 CSR_READ_4(sc, B0_IMSK);
3296 }
3297 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) {
3298 device_printf(sc->msk_dev, "Tx descriptor error\n");
3299 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2);
3300 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3301 CSR_READ_4(sc, B0_IMSK);
3302 }
3303 if ((status & Y2_IS_HW_ERR) != 0)
3304 mskc_intr_hwerr(sc);
3305
3306 while (mskc_handle_events(sc) != 0)
3307 ;
3308 if ((status & Y2_IS_STAT_BMU) != 0)
3309 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ);
3310
3311 /* Reenable interrupts. */
3312 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2);
3313
3314 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 &&
3315 !ifq_is_empty(&ifp0->if_snd))
9db4b353 3316 if_devstart(ifp0);
2d586421
SZ
3317 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 &&
3318 !ifq_is_empty(&ifp1->if_snd))
9db4b353 3319 if_devstart(ifp1);
2d586421
SZ
3320}
3321
3322static void
d9e919c4
SZ
3323msk_set_tx_stfwd(struct msk_if_softc *sc_if)
3324{
3325 struct msk_softc *sc = sc_if->msk_softc;
3326 struct ifnet *ifp = sc_if->msk_ifp;
3327
7b7b65f4
SZ
3328 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX &&
3329 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) ||
3330 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) {
3331 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3332 TX_STFW_ENA);
3333 } else {
d9e919c4
SZ
3334 if (ifp->if_mtu > ETHERMTU) {
3335 /* Set Tx GMAC FIFO Almost Empty Threshold. */
3336 CSR_WRITE_4(sc,
3337 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR),
3338 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR);
3339 /* Disable Store & Forward mode for Tx. */
7b7b65f4
SZ
3340 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3341 TX_STFW_DIS);
d9e919c4 3342 } else {
7b7b65f4
SZ
3343 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T),
3344 TX_STFW_ENA);
d9e919c4 3345 }
d9e919c4
SZ
3346 }
3347}
3348
3349static void
2d586421
SZ
3350msk_init(void *xsc)
3351{
3352 struct msk_if_softc *sc_if = xsc;
3353 struct msk_softc *sc = sc_if->msk_softc;
3354 struct ifnet *ifp = sc_if->msk_ifp;
3355 struct mii_data *mii;
3356 uint16_t eaddr[ETHER_ADDR_LEN / 2];
3357 uint16_t gmac;
080bd27e 3358 uint32_t reg;
2d586421
SZ
3359 int error, i;
3360
3361 ASSERT_SERIALIZED(ifp->if_serializer);
3362
3363 mii = device_get_softc(sc_if->msk_miibus);
3364
3365 error = 0;
3366 /* Cancel pending I/O and free all Rx/Tx buffers. */
3367 msk_stop(sc_if);
3368
3369 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN;
3370 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN &&
3371 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) {
3372 /*
3373 * In Yukon EC Ultra, TSO & checksum offload is not
3374 * supported for jumbo frame.
3375 */
3376 ifp->if_hwassist &= ~MSK_CSUM_FEATURES;
3377 ifp->if_capenable &= ~IFCAP_TXCSUM;
3378 }
3379
55223f24
SZ
3380 /* GMAC Control reset. */
3381 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET);
3382 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR);
3383 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF);
d9e919c4
SZ
3384 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
3385 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL),
3386 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON |
3387 GMC_BYP_RETR_ON);
3388 }
55223f24 3389
2d586421 3390 /*
55223f24
SZ
3391 * Initialize GMAC first such that speed/duplex/flow-control
3392 * parameters are renegotiated when interface is brought up.
2d586421 3393 */
55223f24 3394 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0);
2d586421
SZ
3395
3396 /* Dummy read the Interrupt Source Register. */
3397 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC));
3398
3399 /* Set MIB Clear Counter Mode. */
3400 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR);
3401 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR);
3402 /* Read all MIB Counters with Clear Mode set. */
3403 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
3404 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i);
3405 /* Clear MIB Clear Counter Mode. */
3406 gmac &= ~GM_PAR_MIB_CLR;
3407 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac);
3408
3409 /* Disable FCS. */
3410 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS);
3411
3412 /* Setup Transmit Control Register. */
3413 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
3414
3415 /* Setup Transmit Flow Control Register. */
3416 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff);
3417
3418 /* Setup Transmit Parameter Register. */
3419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM,
3420 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
3421 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
3422
3423 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) |
3424 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
3425
3426 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN)
3427 gmac |= GM_SMOD_JUMBO_ENA;
3428 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac);
3429
3430 /* Set station address. */
3431 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN);
3432 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3433 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4,
3434 eaddr[i]);
3435 for (i = 0; i < ETHER_ADDR_LEN /2; i++)
3436 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4,
3437 eaddr[i]);
3438
3439 /* Disable interrupts for counter overflows. */
3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0);
3441 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0);
3442 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0);
3443
3444 /* Configure Rx MAC FIFO. */
3445 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3446 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR);
080bd27e 3447 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
d9e919c4
SZ
3448 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P ||
3449 sc->msk_hw_id == CHIP_ID_YUKON_EX)
080bd27e
SZ
3450 reg |= GMF_RX_OVER_ON;
3451 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg);
2d586421 3452
dc7303ff
SZ
3453 /* Set receive filter. */
3454 msk_rxfilter(sc_if);
2d586421 3455
10cc281d
SZ
3456 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) {
3457 /* Clear flush mask - HW bug. */
3458 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0);
3459 } else {
3460 /* Flush Rx MAC FIFO on any flow control or error. */
3461 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK),
3462 GMR_FS_ANY_ERR);
3463 }
2d586421 3464
8510fba4
SZ
3465 /*
3466 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word
3467 * due to hardware hang on receipt of pause frames.
3468 */
080bd27e
SZ
3469 reg = RX_GMF_FL_THR_DEF + 1;
3470 /* Another magic for Yukon FE+ - From Linux. */
3471 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3472 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0)
3473 reg = 0x178;
3474 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg);
3475
2d586421
SZ
3476
3477 /* Configure Tx MAC FIFO. */
3478 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3479 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR);
3480 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON);
3481
3482 /* Configure hardware VLAN tag insertion/stripping. */
3483 msk_setvlan(sc_if, ifp);
3484
2a9b20a4 3485 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) {
2d586421 3486 /* Set Rx Pause threshould. */
ab5e50d3 3487 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR),
2d586421 3488 MSK_ECU_LLPP);
ab5e50d3 3489 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR),
2d586421 3490 MSK_ECU_ULPP);
d9e919c4
SZ
3491 /* Configure store-and-forward for Tx. */
3492 msk_set_tx_stfwd(sc_if);
2d586421
SZ
3493 }
3494
080bd27e
SZ
3495 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P &&
3496 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) {
3497 /* Disable dynamic watermark - from Linux. */
3498 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA));
3499 reg &= ~0x03;
3500 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg);
3501 }
3502
2d586421
SZ
3503 /*
3504 * Disable Force Sync bit and Alloc bit in Tx RAM interface
3505 * arbiter as we don't use Sync Tx queue.
3506 */
3507 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL),
3508 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
3509 /* Enable the RAM Interface Arbiter. */
3510 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB);
3511
3512 /* Setup RAM buffer. */
3513 msk_set_rambuffer(sc_if);
3514
3515 /* Disable Tx sync Queue. */
3516 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET);
3517
3518 /* Setup Tx Queue Bus Memory Interface. */
3519 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET);
3520 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT);
3521 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON);
3522 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM);
d9e919c4
SZ
3523 switch (sc->msk_hw_id) {
3524 case CHIP_ID_YUKON_EC_U:
3525 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) {
3526 /* Fix for Yukon-EC Ultra: set BMU FIFO level */
3527 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL),
3528 MSK_ECU_TXFF_LEV);
3529 }
3530 break;
3531 case CHIP_ID_YUKON_EX:
3532 /*
3533 * Yukon Extreme seems to have silicon bug for
3534 * automatic Tx checksum calculation capability.
3535 */
3536 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) {
3537 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F),
3538 F_TX_CHK_AUTO_OFF);
3539 }
3540 break;
3541 }
2d586421
SZ
3542
3543 /* Setup Rx Queue Bus Memory Interface. */
3544 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET);
3545 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT);
3546 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON);
3547 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM);
3548 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U &&
3549 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) {
3550 /* MAC Rx RAM Read is controlled by hardware. */
3551 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS);
3552 }
3553
3554 msk_set_prefetch(sc, sc_if->msk_txq,
3555 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1);
3556 msk_init_tx_ring(sc_if);
3557
3558 /* Disable Rx checksum offload and RSS hash. */
3559 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3560 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH);
3561#ifdef MSK_JUMBO
3562 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) {
3563 msk_set_prefetch(sc, sc_if->msk_rxq,
3564 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr,
3565 MSK_JUMBO_RX_RING_CNT - 1);
3566 error = msk_init_jumbo_rx_ring(sc_if);
3567 } else
3568#endif
3569 {
3570 msk_set_prefetch(sc, sc_if->msk_rxq,
3571 sc_if->msk_rdata.msk_rx_ring_paddr,
3572 MSK_RX_RING_CNT - 1);
3573 error = msk_init_rx_ring(sc_if);
3574 }
3575 if (error != 0) {
3576 device_printf(sc_if->msk_if_dev,
3577 "initialization failed: no memory for Rx buffers\n");
3578 msk_stop(sc_if);
3579 return;
3580 }
441c9e87
SZ
3581 if (sc->msk_hw_id == CHIP_ID_YUKON_EX) {
3582 /* Disable flushing of non-ASF packets. */
3583 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T),
3584 GMF_RX_MACSEC_FLUSH_OFF);
3585 }
2d586421
SZ
3586
3587 /* Configure interrupt handling. */
3588 if (sc_if->msk_port == MSK_PORT_A) {
3589 sc->msk_intrmask |= Y2_IS_PORT_A;
3590 sc->msk_intrhwemask |= Y2_HWE_L1_MASK;
3591 } else {
3592 sc->msk_intrmask |= Y2_IS_PORT_B;
3593 sc->msk_intrhwemask |= Y2_HWE_L2_MASK;
3594 }
3595 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3596 CSR_READ_4(sc, B0_HWE_IMSK);
3597 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3598 CSR_READ_4(sc, B0_IMSK);
3599
3600 sc_if->msk_link = 0;
3601 mii_mediachg(mii);
3602
f59f1081
SZ
3603 mskc_set_imtimer(sc);
3604
2d586421
SZ
3605 ifp->if_flags |= IFF_RUNNING;
3606 ifp->if_flags &= ~IFF_OACTIVE;
3607
3608 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if);
3609}
3610
3611static void
3612msk_set_rambuffer(struct msk_if_softc *sc_if)
3613{
3614 struct msk_softc *sc;
3615 int ltpp, utpp;
3616
2a9b20a4
SZ
3617 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0)
3618 return;
3619
2d586421
SZ
3620 sc = sc_if->msk_softc;
3621
3622 /* Setup Rx Queue. */
3623 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR);
3624 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START),
3625 sc->msk_rxqstart[sc_if->msk_port] / 8);
3626 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END),
3627 sc->msk_rxqend[sc_if->msk_port] / 8);
3628 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP),
3629 sc->msk_rxqstart[sc_if->msk_port] / 8);
3630 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP),
3631 sc->msk_rxqstart[sc_if->msk_port] / 8);
3632
3633 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3634 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8;
3635 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 -
3636 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8;
3637 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE)
3638 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8;
3639 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp);
3640 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp);
3641 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */
3642
3643 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD);
3644 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL));
3645
3646 /* Setup Tx Queue. */
3647 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR);
3648 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START),
3649 sc->msk_txqstart[sc_if->msk_port] / 8);
3650 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END),
3651 sc->msk_txqend[sc_if->msk_port] / 8);
3652 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP),
3653 sc->msk_txqstart[sc_if->msk_port] / 8);
3654 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP),
3655 sc->msk_txqstart[sc_if->msk_port] / 8);
3656 /* Enable Store & Forward for Tx side. */
3657 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD);
3658 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD);
3659 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL));
3660}
3661
3662static void
3663msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr,
3664 uint32_t count)
3665{
3666
3667 /* Reset the prefetch unit. */
3668 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3669 PREF_UNIT_RST_SET);
3670 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3671 PREF_UNIT_RST_CLR);
3672 /* Set LE base address. */
3673 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG),
3674 MSK_ADDR_LO(addr));
3675 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG),
3676 MSK_ADDR_HI(addr));
3677 /* Set the list last index. */
3678 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG),
3679 count);
3680 /* Turn on prefetch unit. */
3681 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG),
3682 PREF_UNIT_OP_ON);
3683 /* Dummy read to ensure write. */
3684 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG));
3685}
3686
3687static void
3688msk_stop(struct msk_if_softc *sc_if)
3689{
3690 struct msk_softc *sc = sc_if->msk_softc;
3691 struct ifnet *ifp = sc_if->msk_ifp;
3692 struct msk_txdesc *txd;
3693 struct msk_rxdesc *rxd;
3694#ifdef MSK_JUMBO
3695 struct msk_rxdesc *jrxd;
3696#endif
3697 uint32_t val;
3698 int i;
3699
3700 ASSERT_SERIALIZED(ifp->if_serializer);
3701
3702 callout_stop(&sc_if->msk_tick_ch);
3703 ifp->if_timer = 0;
3704
3705 /* Disable interrupts. */
3706 if (sc_if->msk_port == MSK_PORT_A) {
3707 sc->msk_intrmask &= ~Y2_IS_PORT_A;
3708 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK;
3709 } else {
3710 sc->msk_intrmask &= ~Y2_IS_PORT_B;
3711 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK;
3712 }
3713 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask);
3714 CSR_READ_4(sc, B0_HWE_IMSK);
3715 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask);
3716 CSR_READ_4(sc, B0_IMSK);
3717
3718 /* Disable Tx/Rx MAC. */
3719 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3720 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
3721 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val);
3722 /* Read again to ensure writing. */
3723 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL);
3724
3725 /* Stop Tx BMU. */
3726 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP);
3727 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
3728 for (i = 0; i < MSK_TIMEOUT; i++) {
3729 if ((val & (BMU_STOP | BMU_IDLE)) == 0) {
3730 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3731 BMU_STOP);
69853fa0 3732 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR));
2d586421
SZ
3733 } else
3734 break;
3735 DELAY(1);
3736 }
3737 if (i == MSK_TIMEOUT)
3738 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n");
3739 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL),
3740 RB_RST_SET | RB_DIS_OP_MD);
3741
3742 /* Disable all GMAC interrupt. */
3743 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0);
3744 /* Disable PHY interrupt. */
3745 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0);
3746
3747 /* Disable the RAM Interface Arbiter. */
3748 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB);
3749
3750 /* Reset the PCI FIFO of the async Tx queue */
3751 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR),
3752 BMU_RST_SET | BMU_FIFO_RST);
3753
3754 /* Reset the Tx prefetch units. */
3755 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG),
3756 PREF_UNIT_RST_SET);
3757
3758 /* Reset the RAM Buffer async Tx queue. */
3759 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET);
3760
3761 /* Reset Tx MAC FIFO. */
3762 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET);
3763 /* Set Pause Off. */
3764 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF);
3765
3766 /*
3767 * The Rx Stop command will not work for Yukon-2 if the BMU does not
3768 * reach the end of packet and since we can't make sure that we have
3769 * incoming data, we must reset the BMU while it is not during a DMA
3770 * transfer. Since it is possible that the Rx path is still active,
3771 * the Rx RAM buffer will be stopped first, so any possible incoming
3772 * data will not trigger a DMA. After the RAM buffer is stopped, the
3773 * BMU is polled until any DMA in progress is ended and only then it
3774 * will be reset.
3775 */
3776
3777 /* Disable the RAM Buffer receive queue. */
3778 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD);
3779 for (i = 0; i < MSK_TIMEOUT; i++) {
3780 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) ==
3781 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL)))
3782 break;
3783 DELAY(1);
3784 }
3785 if (i == MSK_TIMEOUT)
3786 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n");
3787 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR),
3788 BMU_RST_SET | BMU_FIFO_RST);
3789 /* Reset the Rx prefetch unit. */
3790 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG),
3791 PREF_UNIT_RST_SET);
3792 /* Reset the RAM Buffer receive queue. */
3793 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET);
3794 /* Reset Rx MAC FIFO. */
3795 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET);
3796
3797 /* Free Rx and Tx mbufs still in the queues. */
3798 for (i = 0; i < MSK_RX_RING_CNT; i++) {
3799 rxd = &sc_if->msk_cdata.msk_rxdesc[i];
3800 if (rxd->rx_m != NULL) {
2d586421
SZ
3801 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag,
3802 rxd->rx_dmamap);
3803 m_freem(rxd->rx_m);
3804 rxd->rx_m = NULL;
3805 }
3806 }
3807#ifdef MSK_JUMBO
3808 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) {
3809 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i];
3810 if (jrxd->rx_m != NULL) {
3811 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag,
3812 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD);
3813 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag,
3814 jrxd->rx_dmamap);
3815 m_freem(jrxd->rx_m);
3816 jrxd->rx_m = NULL;
3817 }
3818 }
3819#endif
3820 for (i = 0; i < MSK_TX_RING_CNT; i++) {
3821 txd = &sc_if->msk_cdata.msk_txdesc[i];
3822 if (txd->tx_m != NULL) {
2d586421
SZ
3823 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag,
3824 txd->tx_dmamap);
3825 m_freem(txd->tx_m);
3826 txd->tx_m = NULL;
3827 }
3828 }
3829
3830 /*
3831 * Mark the interface down.
3832 */
3833 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3834 sc_if->msk_link = 0;
3835}
3836
2d586421 3837static int
f59f1081 3838mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS)
2d586421 3839{
f59f1081
SZ
3840 return sysctl_int_range(oidp, arg1, arg2, req,
3841 MSK_PROC_MIN, MSK_PROC_MAX);
2d586421
SZ
3842}
3843
f59f1081
SZ
3844static int
3845mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS)
3846{
3847 struct msk_softc *sc = arg1;
3848 struct lwkt_serialize *serializer = &sc->msk_serializer;
3849 int error = 0, v;
3850
3851 lwkt_serialize_enter(serializer);
3852
3853 v = sc->msk_intr_rate;
3854 error = sysctl_handle_int(oidp, &v, 0, req);
3855 if (error || req->newptr == NULL)
3856 goto back;
3857 if (v < 0) {
3858 error = EINVAL;
3859 goto back;
3860 }
3861
3862 if (sc->msk_intr_rate != v) {
3863 int flag = 0, i;
3864
3865 sc->msk_intr_rate = v;
3866 for (i = 0; i < 2; ++i) {
3867 if (sc->msk_if[i] != NULL) {
3868 flag |= sc->msk_if[i]->
3869 arpcom.ac_if.if_flags & IFF_RUNNING;
3870 }
3871 }
3872 if (flag)
3873 mskc_set_imtimer(sc);
3874 }
3875back:
3876 lwkt_serialize_exit(serializer);
3877 return error;
3878}
2d586421
SZ
3879
3880static int
3881msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag,
3882 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap)
3883{
3884 struct msk_if_softc *sc_if = device_get_softc(dev);
c78f83cb 3885 bus_dmamem_t dmem;