2 * Copyright (c) 2013 Tsubai Masanari
3 * Copyright (c) 2013 Bryan Venteicher <bryanv@FreeBSD.org>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $
18 * $FreeBSD: head/sys/dev/vmware/vmxnet3/if_vmx.c 318867 2017-05-25 10:49:56Z avg $
21 /* Driver for VMware vmxnet3 virtual ethernet devices. */
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/eventhandler.h>
27 #include <sys/kernel.h>
28 #include <sys/endian.h>
29 #include <sys/sockio.h>
31 #include <sys/malloc.h>
32 #include <sys/module.h>
33 #include <sys/socket.h>
34 #include <sys/sysctl.h>
35 #include <sys/taskqueue.h>
39 #include <net/ethernet.h>
41 #include <net/if_var.h>
42 #include <net/ifq_var.h>
43 #include <net/if_arp.h>
44 #include <net/if_dl.h>
45 #include <net/if_types.h>
46 #include <net/if_media.h>
47 #include <net/vlan/if_vlan_ether.h>
48 #include <net/vlan/if_vlan_var.h>
52 #include <netinet/in_systm.h>
53 #include <netinet/in.h>
54 #include <netinet/ip.h>
55 #include <netinet/ip6.h>
56 #include <netinet6/ip6_var.h>
57 #include <netinet/udp.h>
58 #include <netinet/tcp.h>
60 #include <sys/in_cksum.h>
65 #include <bus/pci/pcireg.h>
66 #include <bus/pci/pcivar.h>
68 #define VMXNET3_LEGACY_TX 1 /* XXX we need this at the moment */
69 #include "if_vmxreg.h"
70 #include "if_vmxvar.h"
73 #include "opt_inet6.h"
75 #ifdef VMXNET3_FAILPOINTS
77 static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0,
78 "vmxnet3 fail points");
79 #define VMXNET3_FP _debug_fail_point_vmxnet3
82 static int vmxnet3_probe(device_t);
83 static int vmxnet3_attach(device_t);
84 static int vmxnet3_detach(device_t);
85 static int vmxnet3_shutdown(device_t);
87 static int vmxnet3_alloc_resources(struct vmxnet3_softc *);
88 static void vmxnet3_free_resources(struct vmxnet3_softc *);
89 static int vmxnet3_check_version(struct vmxnet3_softc *);
90 static void vmxnet3_initial_config(struct vmxnet3_softc *);
91 static void vmxnet3_check_multiqueue(struct vmxnet3_softc *);
94 static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *);
95 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
97 static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *);
99 static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *);
100 static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int,
101 struct vmxnet3_interrupt *);
102 static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *);
104 static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *);
106 static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *);
107 static int vmxnet3_setup_interrupts(struct vmxnet3_softc *);
108 static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *);
110 static void vmxnet3_free_interrupt(struct vmxnet3_softc *,
111 struct vmxnet3_interrupt *);
112 static void vmxnet3_free_interrupts(struct vmxnet3_softc *);
114 #ifndef VMXNET3_LEGACY_TX
115 static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *);
116 static void vmxnet3_start_taskqueue(struct vmxnet3_softc *);
117 static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *);
118 static void vmxnet3_free_taskqueue(struct vmxnet3_softc *);
121 static int vmxnet3_init_rxq(struct vmxnet3_softc *, int);
122 static int vmxnet3_init_txq(struct vmxnet3_softc *, int);
123 static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *);
124 static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *);
125 static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *);
126 static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *);
128 static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *);
129 static void vmxnet3_free_shared_data(struct vmxnet3_softc *);
130 static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *);
131 static void vmxnet3_free_txq_data(struct vmxnet3_softc *);
132 static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *);
133 static void vmxnet3_free_rxq_data(struct vmxnet3_softc *);
134 static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *);
135 static void vmxnet3_free_queue_data(struct vmxnet3_softc *);
136 static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *);
137 static void vmxnet3_init_shared_data(struct vmxnet3_softc *);
138 static void vmxnet3_init_hwassist(struct vmxnet3_softc *);
139 static void vmxnet3_reinit_interface(struct vmxnet3_softc *);
140 static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *);
141 static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *);
142 static int vmxnet3_alloc_data(struct vmxnet3_softc *);
143 static void vmxnet3_free_data(struct vmxnet3_softc *);
144 static int vmxnet3_setup_interface(struct vmxnet3_softc *);
146 static void vmxnet3_evintr(struct vmxnet3_softc *);
147 static void vmxnet3_txq_eof(struct vmxnet3_txqueue *);
148 static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *);
149 static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *);
150 static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *,
151 struct vmxnet3_rxring *, int);
152 static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *);
153 static void vmxnet3_legacy_intr(void *);
155 static void vmxnet3_txq_intr(void *);
156 static void vmxnet3_rxq_intr(void *);
157 static void vmxnet3_event_intr(void *);
160 static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
161 static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
162 static void vmxnet3_stop(struct vmxnet3_softc *);
164 static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *);
165 static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *);
166 static int vmxnet3_reinit_queues(struct vmxnet3_softc *);
167 static int vmxnet3_enable_device(struct vmxnet3_softc *);
168 static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *);
169 static int vmxnet3_reinit(struct vmxnet3_softc *);
170 static void vmxnet3_init_locked(struct vmxnet3_softc *);
171 static void vmxnet3_init(void *);
173 static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *,
174 int *, int *, int *);
175 static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **,
176 bus_dmamap_t, bus_dma_segment_t [], int *);
177 static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t);
178 static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **);
179 #ifdef VMXNET3_LEGACY_TX
180 static void vmxnet3_start_locked(struct ifnet *);
181 static void vmxnet3_start(struct ifnet *, struct ifaltq_subque *);
183 static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *,
185 static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *);
186 static void vmxnet3_txq_tq_deferred(void *, int);
188 static void vmxnet3_txq_start(struct vmxnet3_txqueue *);
189 static void vmxnet3_tx_start_all(struct vmxnet3_softc *);
191 static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int,
193 static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t);
194 static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t);
195 static void vmxnet3_set_rxfilter(struct vmxnet3_softc *);
196 static int vmxnet3_change_mtu(struct vmxnet3_softc *, int);
197 static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
199 #ifndef VMXNET3_LEGACY_TX
200 static void vmxnet3_qflush(struct ifnet *);
203 static int vmxnet3_watchdog(struct vmxnet3_txqueue *);
204 static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *);
205 static void vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *,
206 struct vmxnet3_txq_stats *);
207 static void vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *,
208 struct vmxnet3_rxq_stats *);
209 static void vmxnet3_tick(void *);
210 static void vmxnet3_link_status(struct vmxnet3_softc *);
211 static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *);
212 static int vmxnet3_media_change(struct ifnet *);
213 static void vmxnet3_set_lladdr(struct vmxnet3_softc *);
214 static void vmxnet3_get_lladdr(struct vmxnet3_softc *);
216 static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *,
217 struct sysctl_ctx_list *, struct sysctl_oid_list *);
218 static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *,
219 struct sysctl_ctx_list *, struct sysctl_oid_list *);
220 static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *,
221 struct sysctl_ctx_list *, struct sysctl_oid_list *);
222 static void vmxnet3_setup_sysctl(struct vmxnet3_softc *);
224 static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t,
226 static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t);
227 static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t,
229 static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t);
230 static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t);
232 static void vmxnet3_enable_intr(struct vmxnet3_softc *, int);
233 static void vmxnet3_disable_intr(struct vmxnet3_softc *, int);
234 static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *);
235 static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *);
237 static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t,
238 bus_size_t, struct vmxnet3_dma_alloc *);
239 static void vmxnet3_dma_free(struct vmxnet3_softc *,
240 struct vmxnet3_dma_alloc *);
241 static int vmxnet3_tunable_int(struct vmxnet3_softc *,
247 VMXNET3_BARRIER_RDWR,
250 static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t);
253 static int vmxnet3_mq_disable = 0;
254 TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable);
255 static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES;
256 TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue);
257 static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES;
258 TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue);
259 static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC;
260 TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc);
261 static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC;
262 TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc);
264 static device_method_t vmxnet3_methods[] = {
265 /* Device interface. */
266 DEVMETHOD(device_probe, vmxnet3_probe),
267 DEVMETHOD(device_attach, vmxnet3_attach),
268 DEVMETHOD(device_detach, vmxnet3_detach),
269 DEVMETHOD(device_shutdown, vmxnet3_shutdown),
274 static driver_t vmxnet3_driver = {
275 "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc)
278 static devclass_t vmxnet3_devclass;
279 DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, NULL, NULL);
281 MODULE_DEPEND(vmx, pci, 1, 1, 1);
282 MODULE_DEPEND(vmx, ether, 1, 1, 1);
284 #define VMXNET3_VMWARE_VENDOR_ID 0x15AD
285 #define VMXNET3_VMWARE_DEVICE_ID 0x07B0
288 vmxnet3_probe(device_t dev)
291 if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID &&
292 pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) {
293 device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter");
294 return (BUS_PROBE_DEFAULT);
301 vmxnet3_attach(device_t dev)
303 struct vmxnet3_softc *sc;
306 sc = device_get_softc(dev);
309 pci_enable_busmaster(dev);
311 VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev));
312 callout_init_lk(&sc->vmx_tick, &sc->vmx_lock);
314 vmxnet3_initial_config(sc);
316 error = vmxnet3_alloc_resources(sc);
320 error = vmxnet3_check_version(sc);
324 error = vmxnet3_alloc_rxtx_queues(sc);
328 #ifndef VMXNET3_LEGACY_TX
329 error = vmxnet3_alloc_taskqueue(sc);
334 error = vmxnet3_alloc_interrupts(sc);
338 vmxnet3_check_multiqueue(sc);
340 error = vmxnet3_alloc_data(sc);
344 error = vmxnet3_setup_interface(sc);
348 error = vmxnet3_setup_interrupts(sc);
350 ether_ifdetach(sc->vmx_ifp);
351 device_printf(dev, "could not set up interrupt\n");
355 vmxnet3_setup_sysctl(sc);
356 #ifndef VMXNET3_LEGACY_TX
357 vmxnet3_start_taskqueue(sc);
368 vmxnet3_detach(device_t dev)
370 struct vmxnet3_softc *sc;
373 sc = device_get_softc(dev);
376 if (device_is_attached(dev)) {
377 VMXNET3_CORE_LOCK(sc);
379 VMXNET3_CORE_UNLOCK(sc);
381 callout_terminate(&sc->vmx_tick);
382 #ifndef VMXNET3_LEGACY_TX
383 vmxnet3_drain_taskqueue(sc);
389 if (sc->vmx_vlan_attach != NULL) {
390 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach);
391 sc->vmx_vlan_attach = NULL;
393 if (sc->vmx_vlan_detach != NULL) {
394 EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach);
395 sc->vmx_vlan_detach = NULL;
398 #ifndef VMXNET3_LEGACY_TX
399 vmxnet3_free_taskqueue(sc);
401 vmxnet3_free_interrupts(sc);
408 ifmedia_removeall(&sc->vmx_media);
410 vmxnet3_free_data(sc);
411 vmxnet3_free_resources(sc);
412 vmxnet3_free_rxtx_queues(sc);
414 VMXNET3_CORE_LOCK_DESTROY(sc);
420 vmxnet3_shutdown(device_t dev)
427 vmxnet3_alloc_resources(struct vmxnet3_softc *sc)
435 sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
437 if (sc->vmx_res0 == NULL) {
439 "could not map BAR0 memory\n");
443 sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0);
444 sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0);
447 sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
449 if (sc->vmx_res1 == NULL) {
451 "could not map BAR1 memory\n");
455 sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1);
456 sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1);
458 if (pci_find_extcap(dev, PCIY_MSIX, NULL) == 0) {
460 sc->vmx_msix_res = bus_alloc_resource_any(dev,
461 SYS_RES_MEMORY, &rid, RF_ACTIVE);
464 if (sc->vmx_msix_res == NULL)
465 sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX;
471 vmxnet3_free_resources(struct vmxnet3_softc *sc)
478 if (sc->vmx_res0 != NULL) {
480 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0);
484 if (sc->vmx_res1 != NULL) {
486 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1);
490 if (sc->vmx_msix_res != NULL) {
492 bus_release_resource(dev, SYS_RES_MEMORY, rid,
494 sc->vmx_msix_res = NULL;
499 vmxnet3_check_version(struct vmxnet3_softc *sc)
506 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS);
507 if ((version & 0x01) == 0) {
508 device_printf(dev, "unsupported hardware version %#x\n",
512 vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1);
514 version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS);
515 if ((version & 0x01) == 0) {
516 device_printf(dev, "unsupported UPT version %#x\n", version);
519 vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1);
525 trunc_powerof2(int val)
528 return (1U << (fls(val) - 1));
532 vmxnet3_initial_config(struct vmxnet3_softc *sc)
536 nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue);
537 if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1)
538 nqueue = VMXNET3_DEF_TX_QUEUES;
541 sc->vmx_max_ntxqueues = trunc_powerof2(nqueue);
543 nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue);
544 if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1)
545 nqueue = VMXNET3_DEF_RX_QUEUES;
548 sc->vmx_max_nrxqueues = trunc_powerof2(nqueue);
550 if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) {
551 sc->vmx_max_nrxqueues = 1;
552 sc->vmx_max_ntxqueues = 1;
555 ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc);
556 if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC)
557 ndesc = VMXNET3_DEF_TX_NDESC;
558 if (ndesc & VMXNET3_MASK_TX_NDESC)
559 ndesc &= ~VMXNET3_MASK_TX_NDESC;
560 sc->vmx_ntxdescs = ndesc;
562 ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc);
563 if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC)
564 ndesc = VMXNET3_DEF_RX_NDESC;
565 if (ndesc & VMXNET3_MASK_RX_NDESC)
566 ndesc &= ~VMXNET3_MASK_RX_NDESC;
567 sc->vmx_nrxdescs = ndesc;
568 sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS;
572 vmxnet3_check_multiqueue(struct vmxnet3_softc *sc)
575 if (sc->vmx_intr_type != VMXNET3_IT_MSIX)
578 /* BMV: Just use the maximum configured for now. */
579 sc->vmx_nrxqueues = sc->vmx_max_nrxqueues;
580 sc->vmx_ntxqueues = sc->vmx_max_ntxqueues;
582 if (sc->vmx_nrxqueues > 1)
583 sc->vmx_flags |= VMXNET3_FLAG_RSS;
588 sc->vmx_ntxqueues = 1;
589 sc->vmx_nrxqueues = 1;
594 vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc)
597 int nmsix, cnt, required;
601 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX)
604 /* Allocate an additional vector for the events interrupt. */
605 required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1;
607 nmsix = pci_msix_count(dev);
608 if (nmsix < required)
612 if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) {
613 sc->vmx_nintrs = required;
616 pci_release_msi(dev);
618 /* BMV TODO Fallback to sharing MSIX vectors if possible. */
624 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
627 int nmsi, cnt, required;
632 nmsi = pci_msi_count(dev);
637 if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) {
641 pci_release_msi(dev);
647 vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc)
652 sc->vmx_irq_type = pci_alloc_1intr(sc->vmx_dev, enable, &rid,
654 sc->vmx_irq_flags = irq_flags;
661 vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc)
669 vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags,
670 struct vmxnet3_interrupt *intr)
672 struct resource *irq;
674 irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid,
679 intr->vmxi_irq = irq;
680 intr->vmxi_rid = rid;
686 vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc)
688 int i, rid, flags, error;
693 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY)
694 flags |= RF_SHAREABLE;
698 for (i = 0; i < sc->vmx_nintrs; i++, rid++) {
699 error = vmxnet3_alloc_interrupt(sc, rid, flags,
710 vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc)
713 struct vmxnet3_txqueue *txq;
714 struct vmxnet3_rxqueue *rxq;
715 struct vmxnet3_interrupt *intr;
719 intr = &sc->vmx_intrs[0];
721 for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) {
722 txq = &sc->vmx_txq[i];
723 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
724 vmxnet3_txq_intr, txq, &intr->vmxi_handler, NULL);
727 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
729 txq->vxtxq_intr_idx = intr->vmxi_rid - 1;
732 for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) {
733 rxq = &sc->vmx_rxq[i];
734 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
735 vmxnet3_rxq_intr, rxq, &intr->vmxi_handler, NULL);
738 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler,
740 rxq->vxrxq_intr_idx = intr->vmxi_rid - 1;
743 error = bus_setup_intr(dev, intr->vmxi_irq, INTR_MPSAFE,
744 vmxnet3_event_intr, sc, &intr->vmxi_handler, NULL);
747 bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event");
748 sc->vmx_event_intr_idx = intr->vmxi_rid - 1;
755 vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc)
757 struct vmxnet3_interrupt *intr;
760 intr = &sc->vmx_intrs[0];
761 error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq,
762 INTR_MPSAFE, vmxnet3_legacy_intr, sc,
763 &intr->vmxi_handler, NULL);
765 for (i = 0; i < sc->vmx_ntxqueues; i++)
766 sc->vmx_txq[i].vxtxq_intr_idx = 0;
767 for (i = 0; i < sc->vmx_nrxqueues; i++)
768 sc->vmx_rxq[i].vxrxq_intr_idx = 0;
769 sc->vmx_event_intr_idx = 0;
775 vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc)
777 struct vmxnet3_txqueue *txq;
778 struct vmxnet3_txq_shared *txs;
779 struct vmxnet3_rxqueue *rxq;
780 struct vmxnet3_rxq_shared *rxs;
783 sc->vmx_ds->evintr = sc->vmx_event_intr_idx;
785 for (i = 0; i < sc->vmx_ntxqueues; i++) {
786 txq = &sc->vmx_txq[i];
788 txs->intr_idx = txq->vxtxq_intr_idx;
791 for (i = 0; i < sc->vmx_nrxqueues; i++) {
792 rxq = &sc->vmx_rxq[i];
794 rxs->intr_idx = rxq->vxrxq_intr_idx;
799 vmxnet3_setup_interrupts(struct vmxnet3_softc *sc)
803 error = vmxnet3_alloc_intr_resources(sc);
807 switch (sc->vmx_intr_type) {
808 case VMXNET3_IT_MSIX:
810 error = vmxnet3_setup_msix_interrupts(sc);
812 device_printf(sc->vmx_dev, "VMXNET3_IT_MSIX unsupported\n");
817 case VMXNET3_IT_LEGACY:
818 error = vmxnet3_setup_legacy_interrupt(sc);
821 panic("%s: invalid interrupt type %d", __func__,
826 vmxnet3_set_interrupt_idx(sc);
833 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
840 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
842 sc->vmx_intr_type = config & 0x03;
843 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
845 switch (sc->vmx_intr_type) {
846 case VMXNET3_IT_AUTO:
847 sc->vmx_intr_type = VMXNET3_IT_MSIX;
849 case VMXNET3_IT_MSIX:
850 error = vmxnet3_alloc_msix_interrupts(sc);
853 sc->vmx_intr_type = VMXNET3_IT_MSI;
856 error = vmxnet3_alloc_msi_interrupts(sc);
859 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
861 case VMXNET3_IT_LEGACY:
862 error = vmxnet3_alloc_legacy_interrupts(sc);
867 sc->vmx_intr_type = -1;
868 device_printf(dev, "cannot allocate any interrupt resources\n");
876 vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc)
883 config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG);
885 sc->vmx_intr_type = config & 0x03;
886 sc->vmx_intr_mask_mode = (config >> 2) & 0x03;
888 switch (sc->vmx_intr_type) {
889 case VMXNET3_IT_AUTO:
890 sc->vmx_intr_type = VMXNET3_IT_MSI;
893 error = vmxnet3_alloc_msi_interrupts(sc);
896 sc->vmx_intr_type = VMXNET3_IT_LEGACY;
897 case VMXNET3_IT_LEGACY:
898 error = vmxnet3_alloc_legacy_interrupts(sc);
902 case VMXNET3_IT_MSIX:
905 sc->vmx_intr_type = -1;
906 device_printf(dev, "cannot allocate any interrupt resources\n");
915 vmxnet3_free_interrupt(struct vmxnet3_softc *sc,
916 struct vmxnet3_interrupt *intr)
922 if (intr->vmxi_handler != NULL) {
923 bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler);
924 intr->vmxi_handler = NULL;
927 if (intr->vmxi_irq != NULL) {
928 bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid,
930 intr->vmxi_irq = NULL;
937 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
941 for (i = 0; i < sc->vmx_nintrs; i++)
942 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
944 if (sc->vmx_intr_type == VMXNET3_IT_MSI ||
945 sc->vmx_intr_type == VMXNET3_IT_MSIX)
946 pci_release_msi(sc->vmx_dev);
950 vmxnet3_free_interrupts(struct vmxnet3_softc *sc)
954 for (i = 0; i < sc->vmx_nintrs; i++)
955 vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]);
957 if (sc->vmx_irq_type == PCI_INTR_TYPE_MSI)
958 pci_release_msi(sc->vmx_dev);
962 #ifndef VMXNET3_LEGACY_TX
964 vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc)
970 sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT,
971 taskqueue_thread_enqueue, &sc->vmx_tq);
972 if (sc->vmx_tq == NULL)
979 vmxnet3_start_taskqueue(struct vmxnet3_softc *sc)
987 * The taskqueue is typically not frequently used, so a dedicated
988 * thread for each queue is unnecessary.
990 nthreads = MAX(1, sc->vmx_ntxqueues / 2);
993 * Most drivers just ignore the return value - it only fails
994 * with ENOMEM so an error is not likely. It is hard for us
995 * to recover from an error here.
997 error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET,
998 "%s taskq", device_get_nameunit(dev));
1000 device_printf(dev, "failed to start taskqueue: %d", error);
1004 vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc)
1006 struct vmxnet3_txqueue *txq;
1009 if (sc->vmx_tq != NULL) {
1010 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1011 txq = &sc->vmx_txq[i];
1012 taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask);
1018 vmxnet3_free_taskqueue(struct vmxnet3_softc *sc)
1020 if (sc->vmx_tq != NULL) {
1021 taskqueue_free(sc->vmx_tq);
1028 vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q)
1030 struct vmxnet3_rxqueue *rxq;
1031 struct vmxnet3_rxring *rxr;
1034 rxq = &sc->vmx_rxq[q];
1036 ksnprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d",
1037 device_get_nameunit(sc->vmx_dev), q);
1038 lockinit(&rxq->vxrxq_lock, rxq->vxrxq_name, 0, 0);
1043 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1044 rxr = &rxq->vxrxq_cmd_ring[i];
1046 rxr->vxrxr_ndesc = sc->vmx_nrxdescs;
1047 rxr->vxrxr_rxbuf = kmalloc(rxr->vxrxr_ndesc *
1048 sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_INTWAIT | M_ZERO);
1049 if (rxr->vxrxr_rxbuf == NULL)
1052 rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs;
1059 vmxnet3_init_txq(struct vmxnet3_softc *sc, int q)
1061 struct vmxnet3_txqueue *txq;
1062 struct vmxnet3_txring *txr;
1064 txq = &sc->vmx_txq[q];
1065 txr = &txq->vxtxq_cmd_ring;
1067 ksnprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d",
1068 device_get_nameunit(sc->vmx_dev), q);
1069 lockinit(&txq->vxtxq_lock, txq->vxtxq_name, 0, 0);
1074 txr->vxtxr_ndesc = sc->vmx_ntxdescs;
1075 txr->vxtxr_txbuf = kmalloc(txr->vxtxr_ndesc *
1076 sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_INTWAIT | M_ZERO);
1077 if (txr->vxtxr_txbuf == NULL)
1080 txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs;
1082 #ifndef VMXNET3_LEGACY_TX
1083 TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq);
1085 txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF,
1086 M_NOWAIT, &txq->vxtxq_lock);
1087 if (txq->vxtxq_br == NULL)
1095 vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc)
1100 * Only attempt to create multiple queues if MSIX is available. MSIX is
1101 * disabled by default because its apparently broken for devices passed
1102 * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable
1103 * must be set to zero for MSIX. This check prevents us from allocating
1104 * queue structures that we will not use.
1106 if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) {
1107 sc->vmx_max_nrxqueues = 1;
1108 sc->vmx_max_ntxqueues = 1;
1111 sc->vmx_rxq = kmalloc(sizeof(struct vmxnet3_rxqueue) *
1112 sc->vmx_max_nrxqueues, M_DEVBUF, M_INTWAIT | M_ZERO);
1113 sc->vmx_txq = kmalloc(sizeof(struct vmxnet3_txqueue) *
1114 sc->vmx_max_ntxqueues, M_DEVBUF, M_INTWAIT | M_ZERO);
1115 if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL)
1118 for (i = 0; i < sc->vmx_max_nrxqueues; i++) {
1119 error = vmxnet3_init_rxq(sc, i);
1124 for (i = 0; i < sc->vmx_max_ntxqueues; i++) {
1125 error = vmxnet3_init_txq(sc, i);
1134 vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq)
1136 struct vmxnet3_rxring *rxr;
1139 rxq->vxrxq_sc = NULL;
1142 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1143 rxr = &rxq->vxrxq_cmd_ring[i];
1145 if (rxr->vxrxr_rxbuf != NULL) {
1146 kfree(rxr->vxrxr_rxbuf, M_DEVBUF);
1147 rxr->vxrxr_rxbuf = NULL;
1152 if (mtx_initialized(&rxq->vxrxq_lock) != 0)
1154 lockuninit(&rxq->vxrxq_lock);
1158 vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq)
1160 struct vmxnet3_txring *txr;
1162 txr = &txq->vxtxq_cmd_ring;
1164 txq->vxtxq_sc = NULL;
1167 #ifndef VMXNET3_LEGACY_TX
1168 if (txq->vxtxq_br != NULL) {
1169 buf_ring_free(txq->vxtxq_br, M_DEVBUF);
1170 txq->vxtxq_br = NULL;
1174 if (txr->vxtxr_txbuf != NULL) {
1175 kfree(txr->vxtxr_txbuf, M_DEVBUF);
1176 txr->vxtxr_txbuf = NULL;
1180 if (mtx_initialized(&txq->vxtxq_lock) != 0)
1182 lockuninit(&txq->vxtxq_lock);
1186 vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc)
1190 if (sc->vmx_rxq != NULL) {
1191 for (i = 0; i < sc->vmx_max_nrxqueues; i++)
1192 vmxnet3_destroy_rxq(&sc->vmx_rxq[i]);
1193 kfree(sc->vmx_rxq, M_DEVBUF);
1197 if (sc->vmx_txq != NULL) {
1198 for (i = 0; i < sc->vmx_max_ntxqueues; i++)
1199 vmxnet3_destroy_txq(&sc->vmx_txq[i]);
1200 kfree(sc->vmx_txq, M_DEVBUF);
1206 vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc)
1215 size = sizeof(struct vmxnet3_driver_shared);
1216 error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma);
1218 device_printf(dev, "cannot alloc shared memory\n");
1221 sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr;
1223 size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) +
1224 sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared);
1225 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma);
1227 device_printf(dev, "cannot alloc queue shared memory\n");
1230 sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr;
1233 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1234 sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva;
1235 kva += sizeof(struct vmxnet3_txq_shared);
1237 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1238 sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva;
1239 kva += sizeof(struct vmxnet3_rxq_shared);
1242 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1243 size = sizeof(struct vmxnet3_rss_shared);
1244 error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma);
1246 device_printf(dev, "cannot alloc rss shared memory\n");
1250 (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr;
1257 vmxnet3_free_shared_data(struct vmxnet3_softc *sc)
1260 if (sc->vmx_rss != NULL) {
1261 vmxnet3_dma_free(sc, &sc->vmx_rss_dma);
1265 if (sc->vmx_qs != NULL) {
1266 vmxnet3_dma_free(sc, &sc->vmx_qs_dma);
1270 if (sc->vmx_ds != NULL) {
1271 vmxnet3_dma_free(sc, &sc->vmx_ds_dma);
1277 vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc)
1280 struct vmxnet3_txqueue *txq;
1281 struct vmxnet3_txring *txr;
1282 struct vmxnet3_comp_ring *txc;
1283 size_t descsz, compsz;
1288 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1289 txq = &sc->vmx_txq[q];
1290 txr = &txq->vxtxq_cmd_ring;
1291 txc = &txq->vxtxq_comp_ring;
1293 descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc);
1294 compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc);
1296 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1297 1, 0, /* alignment, boundary */
1298 BUS_SPACE_MAXADDR, /* lowaddr */
1299 BUS_SPACE_MAXADDR, /* highaddr */
1300 VMXNET3_TX_MAXSIZE, /* maxsize */
1301 VMXNET3_TX_MAXSEGS, /* nsegments */
1302 VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */
1307 "unable to create Tx buffer tag for queue %d\n", q);
1311 error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma);
1313 device_printf(dev, "cannot alloc Tx descriptors for "
1314 "queue %d error %d\n", q, error);
1318 (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr;
1320 error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma);
1322 device_printf(dev, "cannot alloc Tx comp descriptors "
1323 "for queue %d error %d\n", q, error);
1327 (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr;
1329 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1330 error = bus_dmamap_create(txr->vxtxr_txtag, 0,
1331 &txr->vxtxr_txbuf[i].vtxb_dmamap);
1333 device_printf(dev, "unable to create Tx buf "
1334 "dmamap for queue %d idx %d\n", q, i);
1344 vmxnet3_free_txq_data(struct vmxnet3_softc *sc)
1347 struct vmxnet3_txqueue *txq;
1348 struct vmxnet3_txring *txr;
1349 struct vmxnet3_comp_ring *txc;
1350 struct vmxnet3_txbuf *txb;
1355 for (q = 0; q < sc->vmx_ntxqueues; q++) {
1356 txq = &sc->vmx_txq[q];
1357 txr = &txq->vxtxq_cmd_ring;
1358 txc = &txq->vxtxq_comp_ring;
1360 for (i = 0; i < txr->vxtxr_ndesc; i++) {
1361 txb = &txr->vxtxr_txbuf[i];
1362 if (txb->vtxb_dmamap != NULL) {
1363 bus_dmamap_destroy(txr->vxtxr_txtag,
1365 txb->vtxb_dmamap = NULL;
1369 if (txc->vxcr_u.txcd != NULL) {
1370 vmxnet3_dma_free(sc, &txc->vxcr_dma);
1371 txc->vxcr_u.txcd = NULL;
1374 if (txr->vxtxr_txd != NULL) {
1375 vmxnet3_dma_free(sc, &txr->vxtxr_dma);
1376 txr->vxtxr_txd = NULL;
1379 if (txr->vxtxr_txtag != NULL) {
1380 bus_dma_tag_destroy(txr->vxtxr_txtag);
1381 txr->vxtxr_txtag = NULL;
1387 vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc)
1390 struct vmxnet3_rxqueue *rxq;
1391 struct vmxnet3_rxring *rxr;
1392 struct vmxnet3_comp_ring *rxc;
1398 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1399 rxq = &sc->vmx_rxq[q];
1400 rxc = &rxq->vxrxq_comp_ring;
1403 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1404 rxr = &rxq->vxrxq_cmd_ring[i];
1406 descsz = rxr->vxrxr_ndesc *
1407 sizeof(struct vmxnet3_rxdesc);
1408 compsz += rxr->vxrxr_ndesc *
1409 sizeof(struct vmxnet3_rxcompdesc);
1411 error = bus_dma_tag_create(bus_get_dma_tag(dev),
1412 1, 0, /* alignment, boundary */
1413 BUS_SPACE_MAXADDR, /* lowaddr */
1414 BUS_SPACE_MAXADDR, /* highaddr */
1415 MJUMPAGESIZE, /* maxsize */
1417 MJUMPAGESIZE, /* maxsegsize */
1422 "unable to create Rx buffer tag for "
1427 error = vmxnet3_dma_malloc(sc, descsz, 512,
1430 device_printf(dev, "cannot allocate Rx "
1431 "descriptors for queue %d/%d error %d\n",
1436 (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr;
1439 error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma);
1441 device_printf(dev, "cannot alloc Rx comp descriptors "
1442 "for queue %d error %d\n", q, error);
1446 (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr;
1448 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1449 rxr = &rxq->vxrxq_cmd_ring[i];
1451 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1452 &rxr->vxrxr_spare_dmap);
1454 device_printf(dev, "unable to create spare "
1455 "dmamap for queue %d/%d error %d\n",
1460 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1461 error = bus_dmamap_create(rxr->vxrxr_rxtag, 0,
1462 &rxr->vxrxr_rxbuf[j].vrxb_dmamap);
1464 device_printf(dev, "unable to create "
1465 "dmamap for queue %d/%d slot %d "
1478 vmxnet3_free_rxq_data(struct vmxnet3_softc *sc)
1481 struct vmxnet3_rxqueue *rxq;
1482 struct vmxnet3_rxring *rxr;
1483 struct vmxnet3_comp_ring *rxc;
1484 struct vmxnet3_rxbuf *rxb;
1489 for (q = 0; q < sc->vmx_nrxqueues; q++) {
1490 rxq = &sc->vmx_rxq[q];
1491 rxc = &rxq->vxrxq_comp_ring;
1493 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1494 rxr = &rxq->vxrxq_cmd_ring[i];
1496 if (rxr->vxrxr_spare_dmap != NULL) {
1497 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1498 rxr->vxrxr_spare_dmap);
1499 rxr->vxrxr_spare_dmap = NULL;
1502 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
1503 rxb = &rxr->vxrxr_rxbuf[j];
1504 if (rxb->vrxb_dmamap != NULL) {
1505 bus_dmamap_destroy(rxr->vxrxr_rxtag,
1507 rxb->vrxb_dmamap = NULL;
1512 if (rxc->vxcr_u.rxcd != NULL) {
1513 vmxnet3_dma_free(sc, &rxc->vxcr_dma);
1514 rxc->vxcr_u.rxcd = NULL;
1517 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
1518 rxr = &rxq->vxrxq_cmd_ring[i];
1520 if (rxr->vxrxr_rxd != NULL) {
1521 vmxnet3_dma_free(sc, &rxr->vxrxr_dma);
1522 rxr->vxrxr_rxd = NULL;
1525 if (rxr->vxrxr_rxtag != NULL) {
1526 bus_dma_tag_destroy(rxr->vxrxr_rxtag);
1527 rxr->vxrxr_rxtag = NULL;
1534 vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc)
1538 error = vmxnet3_alloc_txq_data(sc);
1542 error = vmxnet3_alloc_rxq_data(sc);
1550 vmxnet3_free_queue_data(struct vmxnet3_softc *sc)
1553 if (sc->vmx_rxq != NULL)
1554 vmxnet3_free_rxq_data(sc);
1556 if (sc->vmx_txq != NULL)
1557 vmxnet3_free_txq_data(sc);
1561 vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc)
1565 error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN,
1566 32, &sc->vmx_mcast_dma);
1568 device_printf(sc->vmx_dev, "unable to alloc multicast table\n");
1570 sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr;
1576 vmxnet3_free_mcast_table(struct vmxnet3_softc *sc)
1579 if (sc->vmx_mcast != NULL) {
1580 vmxnet3_dma_free(sc, &sc->vmx_mcast_dma);
1581 sc->vmx_mcast = NULL;
1586 vmxnet3_init_shared_data(struct vmxnet3_softc *sc)
1588 struct vmxnet3_driver_shared *ds;
1589 struct vmxnet3_txqueue *txq;
1590 struct vmxnet3_txq_shared *txs;
1591 struct vmxnet3_rxqueue *rxq;
1592 struct vmxnet3_rxq_shared *rxs;
1598 * Initialize fields of the shared data that remains the same across
1599 * reinits. Note the shared data is zero'd when allocated.
1602 ds->magic = VMXNET3_REV1_MAGIC;
1605 ds->version = VMXNET3_DRIVER_VERSION;
1606 ds->guest = VMXNET3_GOS_FREEBSD |
1612 ds->vmxnet3_revision = 1;
1613 ds->upt_version = 1;
1616 ds->driver_data = vtophys(sc);
1617 ds->driver_data_len = sizeof(struct vmxnet3_softc);
1618 ds->queue_shared = sc->vmx_qs_dma.dma_paddr;
1619 ds->queue_shared_len = sc->vmx_qs_dma.dma_size;
1620 ds->nrxsg_max = sc->vmx_max_rxsegs;
1623 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1624 ds->rss.version = 1;
1625 ds->rss.paddr = sc->vmx_rss_dma.dma_paddr;
1626 ds->rss.len = sc->vmx_rss_dma.dma_size;
1629 /* Interrupt control. */
1630 ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO;
1631 ds->nintr = sc->vmx_nintrs;
1632 ds->evintr = sc->vmx_event_intr_idx;
1633 ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL;
1635 for (i = 0; i < sc->vmx_nintrs; i++)
1636 ds->modlevel[i] = UPT1_IMOD_ADAPTIVE;
1638 /* Receive filter. */
1639 ds->mcast_table = sc->vmx_mcast_dma.dma_paddr;
1640 ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size;
1643 for (i = 0; i < sc->vmx_ntxqueues; i++) {
1644 txq = &sc->vmx_txq[i];
1645 txs = txq->vxtxq_ts;
1647 txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr;
1648 txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc;
1649 txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr;
1650 txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc;
1651 txs->driver_data = vtophys(txq);
1652 txs->driver_data_len = sizeof(struct vmxnet3_txqueue);
1656 for (i = 0; i < sc->vmx_nrxqueues; i++) {
1657 rxq = &sc->vmx_rxq[i];
1658 rxs = rxq->vxrxq_rs;
1660 rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr;
1661 rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc;
1662 rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr;
1663 rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc;
1664 rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr;
1665 rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc;
1666 rxs->driver_data = vtophys(rxq);
1667 rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue);
1672 vmxnet3_init_hwassist(struct vmxnet3_softc *sc)
1674 struct ifnet *ifp = sc->vmx_ifp;
1678 if (ifp->if_capenable & IFCAP_TXCSUM)
1679 hwassist |= VMXNET3_CSUM_OFFLOAD;
1680 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
1681 hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6;
1683 if (ifp->if_capenable & IFCAP_TSO4)
1684 hwassist |= CSUM_IP_TSO;
1685 if (ifp->if_capenable & IFCAP_TSO6)
1686 hwassist |= CSUM_IP6_TSO;
1688 ifp->if_hwassist = hwassist;
1692 vmxnet3_reinit_interface(struct vmxnet3_softc *sc)
1698 /* Use the current MAC address. */
1699 bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN);
1700 vmxnet3_set_lladdr(sc);
1702 vmxnet3_init_hwassist(sc);
1706 vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc)
1709 * Use the same key as the Linux driver until FreeBSD can do
1710 * RSS (presumably Toeplitz) in software.
1712 static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = {
1713 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
1714 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
1715 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
1716 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
1717 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
1720 struct vmxnet3_driver_shared *ds;
1721 struct vmxnet3_rss_shared *rss;
1728 UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 |
1729 UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6;
1730 rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ;
1731 rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE;
1732 rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE;
1733 memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE);
1735 for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++)
1736 rss->ind_table[i] = i % sc->vmx_nrxqueues;
1740 vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc)
1743 struct vmxnet3_driver_shared *ds;
1748 ds->mtu = ifp->if_mtu;
1749 ds->ntxqueue = sc->vmx_ntxqueues;
1750 ds->nrxqueue = sc->vmx_nrxqueues;
1752 ds->upt_features = 0;
1753 if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
1754 ds->upt_features |= UPT1_F_CSUM;
1755 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
1756 ds->upt_features |= UPT1_F_VLAN;
1758 if (ifp->if_capenable & IFCAP_LRO)
1759 ds->upt_features |= UPT1_F_LRO;
1762 if (sc->vmx_flags & VMXNET3_FLAG_RSS) {
1763 ds->upt_features |= UPT1_F_RSS;
1764 vmxnet3_reinit_rss_shared_data(sc);
1767 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr);
1768 vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH,
1769 (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32);
1773 vmxnet3_alloc_data(struct vmxnet3_softc *sc)
1777 error = vmxnet3_alloc_shared_data(sc);
1781 error = vmxnet3_alloc_queue_data(sc);
1785 error = vmxnet3_alloc_mcast_table(sc);
1789 vmxnet3_init_shared_data(sc);
1795 vmxnet3_free_data(struct vmxnet3_softc *sc)
1798 vmxnet3_free_mcast_table(sc);
1799 vmxnet3_free_queue_data(sc);
1800 vmxnet3_free_shared_data(sc);
1804 vmxnet3_setup_interface(struct vmxnet3_softc *sc)
1811 ifp = sc->vmx_ifp = if_alloc(IFT_ETHER);
1813 device_printf(dev, "cannot allocate ifnet structure\n");
1817 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
1818 ifp->if_baudrate = IF_Gbps(10ULL);
1820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1821 ifp->if_init = vmxnet3_init;
1822 ifp->if_ioctl = vmxnet3_ioctl;
1824 ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
1825 ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS;
1826 ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE;
1829 #ifdef VMXNET3_LEGACY_TX
1830 ifp->if_start = vmxnet3_start;
1831 ifq_set_maxlen(&ifp->if_snd, sc->vmx_ntxdescs - 1);
1832 ifq_set_ready(&ifp->if_snd);
1834 ifp->if_transmit = vmxnet3_txq_mq_start;
1835 ifp->if_qflush = vmxnet3_qflush;
1838 vmxnet3_get_lladdr(sc);
1839 ether_ifattach(ifp, sc->vmx_lladdr, NULL);
1841 ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM;
1842 ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6;
1844 ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6;
1846 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING |
1848 ifp->if_capenable = ifp->if_capabilities;
1850 #if 0 /* XXX LRO / VLAN_HWFILTER */
1851 /* These capabilities are not enabled by default. */
1852 ifp->if_capabilities |= /* IFCAP_LRO | */ IFCAP_VLAN_HWFILTER;
1855 sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
1856 vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
1857 sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config,
1858 vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
1860 ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change,
1861 vmxnet3_media_status);
1862 ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1863 ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO);
1869 vmxnet3_evintr(struct vmxnet3_softc *sc)
1873 struct vmxnet3_txq_shared *ts;
1874 struct vmxnet3_rxq_shared *rs;
1882 VMXNET3_CORE_LOCK(sc);
1885 event = sc->vmx_ds->event;
1886 vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event);
1888 if (event & VMXNET3_EVENT_LINK) {
1889 vmxnet3_link_status(sc);
1890 if (sc->vmx_link_active != 0)
1891 vmxnet3_tx_start_all(sc);
1894 if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) {
1896 vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS);
1897 ts = sc->vmx_txq[0].vxtxq_ts;
1898 if (ts->stopped != 0)
1899 device_printf(dev, "Tx queue error %#x\n", ts->error);
1900 rs = sc->vmx_rxq[0].vxrxq_rs;
1901 if (rs->stopped != 0)
1902 device_printf(dev, "Rx queue error %#x\n", rs->error);
1903 device_printf(dev, "Rx/Tx queue error event ... resetting\n");
1906 if (event & VMXNET3_EVENT_DIC)
1907 device_printf(dev, "device implementation change event\n");
1908 if (event & VMXNET3_EVENT_DEBUG)
1909 device_printf(dev, "debug event\n");
1912 ifp->if_flags &= ~IFF_RUNNING;
1913 vmxnet3_init_locked(sc);
1916 VMXNET3_CORE_UNLOCK(sc);
1920 vmxnet3_txq_eof(struct vmxnet3_txqueue *txq)
1922 struct vmxnet3_softc *sc;
1924 struct vmxnet3_txring *txr;
1925 struct vmxnet3_comp_ring *txc;
1926 struct vmxnet3_txcompdesc *txcd;
1927 struct vmxnet3_txbuf *txb;
1933 txr = &txq->vxtxq_cmd_ring;
1934 txc = &txq->vxtxq_comp_ring;
1936 VMXNET3_TXQ_LOCK_ASSERT(txq);
1939 txcd = &txc->vxcr_u.txcd[txc->vxcr_next];
1940 if (txcd->gen != txc->vxcr_gen)
1942 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
1944 if (++txc->vxcr_next == txc->vxcr_ndesc) {
1949 sop = txr->vxtxr_next;
1950 txb = &txr->vxtxr_txbuf[sop];
1952 if ((m = txb->vtxb_m) != NULL) {
1953 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
1954 BUS_DMASYNC_POSTWRITE);
1955 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
1957 txq->vxtxq_stats.vmtxs_opackets++;
1958 txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len;
1959 if (m->m_flags & M_MCAST)
1960 txq->vxtxq_stats.vmtxs_omcasts++;
1966 txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc;
1969 if (txr->vxtxr_head == txr->vxtxr_next)
1970 txq->vxtxq_watchdog = 0;
1974 vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr)
1978 struct vmxnet3_rxdesc *rxd;
1979 struct vmxnet3_rxbuf *rxb;
1982 bus_dma_segment_t segs[1];
1983 int idx, clsize, btype, flags, nsegs, error;
1986 tag = rxr->vxrxr_rxtag;
1987 dmap = rxr->vxrxr_spare_dmap;
1988 idx = rxr->vxrxr_fill;
1989 rxd = &rxr->vxrxr_rxd[idx];
1990 rxb = &rxr->vxrxr_rxbuf[idx];
1992 #ifdef VMXNET3_FAILPOINTS
1993 KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS);
1994 if (rxr->vxrxr_rid != 0)
1995 KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS);
1998 if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) {
2001 btype = VMXNET3_BTYPE_HEAD;
2004 clsize = MJUMPAGESIZE;
2005 btype = VMXNET3_BTYPE_BODY;
2008 m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize);
2010 sc->vmx_stats.vmst_mgetcl_failed++;
2014 if (btype == VMXNET3_BTYPE_HEAD) {
2015 m->m_len = m->m_pkthdr.len = clsize;
2016 m_adj(m, ETHER_ALIGN);
2020 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, &segs[0], 1, &nsegs,
2024 sc->vmx_stats.vmst_mbuf_load_failed++;
2028 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2029 if (btype == VMXNET3_BTYPE_BODY)
2030 m->m_flags &= ~M_PKTHDR;
2032 if (rxb->vrxb_m != NULL) {
2033 bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD);
2034 bus_dmamap_unload(tag, rxb->vrxb_dmamap);
2037 rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap;
2038 rxb->vrxb_dmamap = dmap;
2041 rxd->addr = segs[0].ds_addr;
2042 rxd->len = segs[0].ds_len;
2044 rxd->gen = rxr->vxrxr_gen;
2046 vmxnet3_rxr_increment_fill(rxr);
2051 vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq,
2052 struct vmxnet3_rxring *rxr, int idx)
2054 struct vmxnet3_rxdesc *rxd;
2056 rxd = &rxr->vxrxr_rxd[idx];
2057 rxd->gen = rxr->vxrxr_gen;
2058 vmxnet3_rxr_increment_fill(rxr);
2062 vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq)
2064 struct vmxnet3_softc *sc;
2065 struct vmxnet3_rxring *rxr;
2066 struct vmxnet3_comp_ring *rxc;
2067 struct vmxnet3_rxcompdesc *rxcd;
2071 rxc = &rxq->vxrxq_comp_ring;
2074 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2075 if (rxcd->gen != rxc->vxcr_gen)
2076 break; /* Not expected. */
2077 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2079 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2084 idx = rxcd->rxd_idx;
2086 if (rxcd->qid < sc->vmx_nrxqueues)
2087 rxr = &rxq->vxrxq_cmd_ring[0];
2089 rxr = &rxq->vxrxq_cmd_ring[1];
2090 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2095 vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2099 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
2100 if (rxcd->ipcsum_ok)
2101 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
2104 if (!rxcd->fragment) {
2105 if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) {
2106 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
2108 m->m_pkthdr.csum_data = 0xFFFF;
2114 vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
2115 struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m)
2117 struct vmxnet3_softc *sc;
2124 rxq->vxrxq_stats.vmrxs_ierrors++;
2131 switch (rxcd->rss_type) {
2132 case VMXNET3_RCD_RSS_TYPE_IPV4:
2133 m->m_pkthdr.flowid = rxcd->rss_hash;
2134 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4);
2136 case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
2137 m->m_pkthdr.flowid = rxcd->rss_hash;
2138 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4);
2140 case VMXNET3_RCD_RSS_TYPE_IPV6:
2141 m->m_pkthdr.flowid = rxcd->rss_hash;
2142 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
2144 case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
2145 m->m_pkthdr.flowid = rxcd->rss_hash;
2146 M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6);
2148 default: /* VMXNET3_RCD_RSS_TYPE_NONE */
2149 m->m_pkthdr.flowid = rxq->vxrxq_id;
2150 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2154 m->m_pkthdr.flowid = rxq->vxrxq_id;
2155 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
2160 vmxnet3_rx_csum(rxcd, m);
2162 m->m_flags |= M_VLANTAG;
2163 m->m_pkthdr.ether_vlantag = rxcd->vtag;
2166 rxq->vxrxq_stats.vmrxs_ipackets++;
2167 rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len;
2169 VMXNET3_RXQ_UNLOCK(rxq);
2170 (*ifp->if_input)(ifp, m, NULL, -1);
2171 VMXNET3_RXQ_LOCK(rxq);
2175 vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq)
2177 struct vmxnet3_softc *sc;
2179 struct vmxnet3_rxring *rxr;
2180 struct vmxnet3_comp_ring *rxc;
2181 struct vmxnet3_rxdesc *rxd;
2182 struct vmxnet3_rxcompdesc *rxcd;
2183 struct mbuf *m, *m_head, *m_tail;
2188 rxc = &rxq->vxrxq_comp_ring;
2190 VMXNET3_RXQ_LOCK_ASSERT(rxq);
2192 if ((ifp->if_flags & IFF_RUNNING) == 0)
2195 m_head = rxq->vxrxq_mhead;
2196 rxq->vxrxq_mhead = NULL;
2197 m_tail = rxq->vxrxq_mtail;
2198 rxq->vxrxq_mtail = NULL;
2199 KKASSERT(m_head == NULL || m_tail != NULL);
2202 rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next];
2203 if (rxcd->gen != rxc->vxcr_gen) {
2204 rxq->vxrxq_mhead = m_head;
2205 rxq->vxrxq_mtail = m_tail;
2208 vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
2210 if (++rxc->vxcr_next == rxc->vxcr_ndesc) {
2215 idx = rxcd->rxd_idx;
2217 if (rxcd->qid < sc->vmx_nrxqueues)
2218 rxr = &rxq->vxrxq_cmd_ring[0];
2220 rxr = &rxq->vxrxq_cmd_ring[1];
2221 rxd = &rxr->vxrxr_rxd[idx];
2223 m = rxr->vxrxr_rxbuf[idx].vrxb_m;
2224 KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf",
2225 __func__, rxcd->qid, idx));
2228 * The host may skip descriptors. We detect this when this
2229 * descriptor does not match the previous fill index. Catch
2230 * up with the host now.
2232 if (__predict_false(rxr->vxrxr_fill != idx)) {
2233 while (rxr->vxrxr_fill != idx) {
2234 rxr->vxrxr_rxd[rxr->vxrxr_fill].gen =
2236 vmxnet3_rxr_increment_fill(rxr);
2241 KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD,
2242 ("%s: start of frame w/o head buffer", __func__));
2243 KASSERT(rxr == &rxq->vxrxq_cmd_ring[0],
2244 ("%s: start of frame not in ring 0", __func__));
2245 KASSERT((idx % sc->vmx_rx_max_chain) == 0,
2246 ("%s: start of frame at unexcepted index %d (%d)",
2247 __func__, idx, sc->vmx_rx_max_chain));
2248 KASSERT(m_head == NULL,
2249 ("%s: duplicate start of frame?", __func__));
2252 /* Just ignore this descriptor. */
2253 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2257 if (vmxnet3_newbuf(sc, rxr) != 0) {
2258 rxq->vxrxq_stats.vmrxs_iqdrops++;
2259 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2261 vmxnet3_rxq_discard_chain(rxq);
2265 m->m_pkthdr.rcvif = ifp;
2266 m->m_pkthdr.len = m->m_len = length;
2267 m->m_pkthdr.csum_flags = 0;
2268 m_head = m_tail = m;
2271 KASSERT(rxd->btype == VMXNET3_BTYPE_BODY,
2272 ("%s: non start of frame w/o body buffer", __func__));
2274 if (m_head == NULL && m_tail == NULL) {
2276 * This is a continuation of a packet that we
2277 * started to drop, but could not drop entirely
2278 * because this segment was still owned by the
2279 * host. So, drop the remainder now.
2281 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2283 vmxnet3_rxq_discard_chain(rxq);
2287 KASSERT(m_head != NULL,
2288 ("%s: frame not started?", __func__));
2290 if (vmxnet3_newbuf(sc, rxr) != 0) {
2291 rxq->vxrxq_stats.vmrxs_iqdrops++;
2292 vmxnet3_rxq_eof_discard(rxq, rxr, idx);
2294 vmxnet3_rxq_discard_chain(rxq);
2296 m_head = m_tail = NULL;
2301 m_head->m_pkthdr.len += length;
2307 vmxnet3_rxq_input(rxq, rxcd, m_head);
2308 m_head = m_tail = NULL;
2310 /* Must recheck after dropping the Rx lock. */
2311 if ((ifp->if_flags & IFF_RUNNING) == 0)
2316 if (__predict_false(rxq->vxrxq_rs->update_rxhead)) {
2317 int qid = rxcd->qid;
2320 idx = (idx + 1) % rxr->vxrxr_ndesc;
2321 if (qid >= sc->vmx_nrxqueues) {
2322 qid -= sc->vmx_nrxqueues;
2323 r = VMXNET3_BAR0_RXH2(qid);
2325 r = VMXNET3_BAR0_RXH1(qid);
2326 vmxnet3_write_bar0(sc, r, idx);
2332 vmxnet3_legacy_intr(void *xsc)
2334 struct vmxnet3_softc *sc;
2335 struct vmxnet3_rxqueue *rxq;
2336 struct vmxnet3_txqueue *txq;
2339 rxq = &sc->vmx_rxq[0];
2340 txq = &sc->vmx_txq[0];
2342 if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) {
2343 if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0)
2346 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2347 vmxnet3_disable_all_intrs(sc);
2349 if (sc->vmx_ds->event != 0)
2352 VMXNET3_RXQ_LOCK(rxq);
2353 vmxnet3_rxq_eof(rxq);
2354 VMXNET3_RXQ_UNLOCK(rxq);
2356 VMXNET3_TXQ_LOCK(txq);
2357 vmxnet3_txq_eof(txq);
2358 vmxnet3_txq_start(txq);
2359 VMXNET3_TXQ_UNLOCK(txq);
2361 vmxnet3_enable_all_intrs(sc);
2366 vmxnet3_txq_intr(void *xtxq)
2368 struct vmxnet3_softc *sc;
2369 struct vmxnet3_txqueue *txq;
2374 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2375 vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx);
2377 VMXNET3_TXQ_LOCK(txq);
2378 vmxnet3_txq_eof(txq);
2379 vmxnet3_txq_start(txq);
2380 VMXNET3_TXQ_UNLOCK(txq);
2382 vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx);
2386 vmxnet3_rxq_intr(void *xrxq)
2388 struct vmxnet3_softc *sc;
2389 struct vmxnet3_rxqueue *rxq;
2394 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2395 vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx);
2397 VMXNET3_RXQ_LOCK(rxq);
2398 vmxnet3_rxq_eof(rxq);
2399 VMXNET3_RXQ_UNLOCK(rxq);
2401 vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx);
2405 vmxnet3_event_intr(void *xsc)
2407 struct vmxnet3_softc *sc;
2411 if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE)
2412 vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx);
2414 if (sc->vmx_ds->event != 0)
2417 vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx);
2422 vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2424 struct vmxnet3_txring *txr;
2425 struct vmxnet3_txbuf *txb;
2428 txr = &txq->vxtxq_cmd_ring;
2430 for (i = 0; i < txr->vxtxr_ndesc; i++) {
2431 txb = &txr->vxtxr_txbuf[i];
2433 if (txb->vtxb_m == NULL)
2436 bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap,
2437 BUS_DMASYNC_POSTWRITE);
2438 bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap);
2439 m_freem(txb->vtxb_m);
2445 vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2447 struct vmxnet3_rxring *rxr;
2448 struct vmxnet3_rxbuf *rxb;
2451 if (rxq->vxrxq_mhead != NULL) {
2452 m_freem(rxq->vxrxq_mhead);
2453 rxq->vxrxq_mhead = NULL;
2454 rxq->vxrxq_mtail = NULL;
2457 for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) {
2458 rxr = &rxq->vxrxq_cmd_ring[i];
2460 for (j = 0; j < rxr->vxrxr_ndesc; j++) {
2461 rxb = &rxr->vxrxr_rxbuf[j];
2463 if (rxb->vrxb_m == NULL)
2466 bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap,
2467 BUS_DMASYNC_POSTREAD);
2468 bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap);
2469 m_freem(rxb->vrxb_m);
2476 vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc)
2478 struct vmxnet3_rxqueue *rxq;
2479 struct vmxnet3_txqueue *txq;
2482 for (i = 0; i < sc->vmx_nrxqueues; i++) {
2483 rxq = &sc->vmx_rxq[i];
2484 VMXNET3_RXQ_LOCK(rxq);
2485 VMXNET3_RXQ_UNLOCK(rxq);
2488 for (i = 0; i < sc->vmx_ntxqueues; i++) {
2489 txq = &sc->vmx_txq[i];
2490 VMXNET3_TXQ_LOCK(txq);
2491 VMXNET3_TXQ_UNLOCK(txq);
2496 vmxnet3_stop(struct vmxnet3_softc *sc)
2502 VMXNET3_CORE_LOCK_ASSERT(sc);
2504 ifp->if_flags &= ~IFF_RUNNING;
2505 sc->vmx_link_active = 0;
2506 callout_stop(&sc->vmx_tick);
2508 /* Disable interrupts. */
2509 vmxnet3_disable_all_intrs(sc);
2510 vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE);
2512 vmxnet3_stop_rendezvous(sc);
2514 for (q = 0; q < sc->vmx_ntxqueues; q++)
2515 vmxnet3_txstop(sc, &sc->vmx_txq[q]);
2516 for (q = 0; q < sc->vmx_nrxqueues; q++)
2517 vmxnet3_rxstop(sc, &sc->vmx_rxq[q]);
2519 vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET);
2523 vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq)
2525 struct vmxnet3_txring *txr;
2526 struct vmxnet3_comp_ring *txc;
2528 txr = &txq->vxtxq_cmd_ring;
2529 txr->vxtxr_head = 0;
2530 txr->vxtxr_next = 0;
2531 txr->vxtxr_gen = VMXNET3_INIT_GEN;
2532 bzero(txr->vxtxr_txd,
2533 txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc));
2535 txc = &txq->vxtxq_comp_ring;
2537 txc->vxcr_gen = VMXNET3_INIT_GEN;
2538 bzero(txc->vxcr_u.txcd,
2539 txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc));
2543 vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq)
2546 struct vmxnet3_rxring *rxr;
2547 struct vmxnet3_comp_ring *rxc;
2548 int i, populate, idx, frame_size, error;
2551 frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) +
2555 * If the MTU causes us to exceed what a regular sized cluster can
2556 * handle, we allocate a second MJUMPAGESIZE cluster after it in
2557 * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters.
2559 * Keep rx_max_chain a divisor of the maximum Rx ring size to make
2560 * our life easier. We do not support changing the ring size after
2563 if (frame_size <= MCLBYTES)
2564 sc->vmx_rx_max_chain = 1;
2566 sc->vmx_rx_max_chain = 2;
2569 * Only populate ring 1 if the configuration will take advantage
2570 * of it. That is either when LRO is enabled or the frame size
2571 * exceeds what ring 0 can contain.
2574 if ((ifp->if_capenable & IFCAP_LRO) == 0 &&
2578 frame_size <= MCLBYTES + MJUMPAGESIZE)
2581 populate = VMXNET3_RXRINGS_PERQ;
2583 for (i = 0; i < populate; i++) {
2584 rxr = &rxq->vxrxq_cmd_ring[i];
2585 rxr->vxrxr_fill = 0;
2586 rxr->vxrxr_gen = VMXNET3_INIT_GEN;
2587 bzero(rxr->vxrxr_rxd,
2588 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2590 for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) {
2591 error = vmxnet3_newbuf(sc, rxr);
2597 for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) {
2598 rxr = &rxq->vxrxq_cmd_ring[i];
2599 rxr->vxrxr_fill = 0;
2601 bzero(rxr->vxrxr_rxd,
2602 rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc));
2605 rxc = &rxq->vxrxq_comp_ring;
2607 rxc->vxcr_gen = VMXNET3_INIT_GEN;
2608 bzero(rxc->vxcr_u.rxcd,
2609 rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc));
2615 vmxnet3_reinit_queues(struct vmxnet3_softc *sc)
2622 for (q = 0; q < sc->vmx_ntxqueues; q++)
2623 vmxnet3_txinit(sc, &sc->vmx_txq[q]);
2625 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2626 error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]);
2628 device_printf(dev, "cannot populate Rx queue %d\n", q);
2637 vmxnet3_enable_device(struct vmxnet3_softc *sc)
2641 if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) {
2642 device_printf(sc->vmx_dev, "device enable command failed!\n");
2646 /* Reset the Rx queue heads. */
2647 for (q = 0; q < sc->vmx_nrxqueues; q++) {
2648 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0);
2649 vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0);
2656 vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc)
2662 vmxnet3_set_rxfilter(sc);
2664 #if 0 /* VLAN_HWFILTER */
2665 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2666 bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter,
2667 sizeof(sc->vmx_ds->vlan_filter));
2670 bzero(sc->vmx_ds->vlan_filter,
2671 sizeof(sc->vmx_ds->vlan_filter));
2672 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
2676 vmxnet3_reinit(struct vmxnet3_softc *sc)
2679 vmxnet3_reinit_interface(sc);
2680 vmxnet3_reinit_shared_data(sc);
2682 if (vmxnet3_reinit_queues(sc) != 0)
2685 if (vmxnet3_enable_device(sc) != 0)
2688 vmxnet3_reinit_rxfilters(sc);
2694 vmxnet3_init_locked(struct vmxnet3_softc *sc)
2700 if (ifp->if_flags & IFF_RUNNING)
2705 if (vmxnet3_reinit(sc) != 0) {
2710 ifp->if_flags |= IFF_RUNNING;
2711 vmxnet3_link_status(sc);
2713 vmxnet3_enable_all_intrs(sc);
2714 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
2718 vmxnet3_init(void *xsc)
2720 struct vmxnet3_softc *sc;
2724 VMXNET3_CORE_LOCK(sc);
2725 vmxnet3_init_locked(sc);
2726 VMXNET3_CORE_UNLOCK(sc);
2730 * BMV: Much of this can go away once we finally have offsets in
2731 * the mbuf packet header. Bug andre@.
2734 vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m,
2735 int *etype, int *proto, int *start)
2737 struct ether_vlan_header *evh;
2740 struct ip *ip = NULL;
2743 struct ip6_hdr *ip6 = NULL;
2746 evh = mtod(m, struct ether_vlan_header *);
2747 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2748 /* BMV: We should handle nested VLAN tags too. */
2749 *etype = ntohs(evh->evl_proto);
2750 offset = sizeof(struct ether_vlan_header);
2752 *etype = ntohs(evh->evl_encap_proto);
2753 offset = sizeof(struct ether_header);
2759 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
2760 m = m_pullup(m, offset + sizeof(struct ip));
2765 ip = (struct ip *)(mtod(m, uint8_t *) + offset);
2767 *start = offset + (ip->ip_hl << 2);
2771 case ETHERTYPE_IPV6:
2772 if (__predict_false(m->m_len <
2773 offset + sizeof(struct ip6_hdr))) {
2774 m = m_pullup(m, offset + sizeof(struct ip6_hdr));
2779 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + offset);
2781 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
2782 /* Assert the network stack sent us a valid packet. */
2783 KASSERT(*start > offset,
2784 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
2785 *start, offset, *proto));
2793 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2796 if (__predict_false(*proto != IPPROTO_TCP)) {
2797 /* Likely failed to correctly parse the mbuf. */
2801 if (m->m_len < *start + sizeof(struct tcphdr)) {
2802 m = m_pullup(m, *start + sizeof(struct tcphdr));
2807 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + *start);
2808 *start += (tcp->th_off << 2);
2810 txq->vxtxq_stats.vmtxs_tso++;
2813 txq->vxtxq_stats.vmtxs_csum++;
2819 vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0,
2820 bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs)
2822 struct vmxnet3_txring *txr;
2827 txr = &txq->vxtxq_cmd_ring;
2829 tag = txr->vxtxr_txtag;
2831 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, segs, 1, nsegs,
2833 if (error == 0 || error != EFBIG)
2836 m = m_defrag(m, M_NOWAIT);
2839 error = bus_dmamap_load_mbuf_segment(tag, dmap, m, segs,
2840 1, nsegs, BUS_DMA_NOWAIT);
2847 txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++;
2849 txq->vxtxq_sc->vmx_stats.vmst_defragged++;
2855 vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap)
2857 struct vmxnet3_txring *txr;
2859 txr = &txq->vxtxq_cmd_ring;
2860 bus_dmamap_unload(txr->vxtxr_txtag, dmap);
2864 vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0)
2866 struct vmxnet3_softc *sc;
2867 struct vmxnet3_txring *txr;
2868 struct vmxnet3_txdesc *txd, *sop;
2871 bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS];
2872 int i, gen, nsegs, etype, proto, start, error;
2877 txr = &txq->vxtxq_cmd_ring;
2878 dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap;
2880 error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs);
2886 KASSERT(nsegs <= VMXNET3_TX_MAXSEGS,
2887 ("%s: mbuf %p with too many segments %d", __func__, m, nsegs));
2889 if (VMXNET3_TXRING_AVAIL(txr) < nsegs) {
2890 txq->vxtxq_stats.vmtxs_full++;
2891 vmxnet3_txq_unload_mbuf(txq, dmap);
2893 } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) {
2894 error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start);
2896 txq->vxtxq_stats.vmtxs_offload_failed++;
2897 vmxnet3_txq_unload_mbuf(txq, dmap);
2904 txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m;
2905 sop = &txr->vxtxr_txd[txr->vxtxr_head];
2906 gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */
2908 for (i = 0; i < nsegs; i++) {
2909 txd = &txr->vxtxr_txd[txr->vxtxr_head];
2911 txd->addr = segs[i].ds_addr;
2912 txd->len = segs[i].ds_len;
2915 txd->offload_mode = VMXNET3_OM_NONE;
2916 txd->offload_pos = 0;
2923 if (++txr->vxtxr_head == txr->vxtxr_ndesc) {
2924 txr->vxtxr_head = 0;
2925 txr->vxtxr_gen ^= 1;
2927 gen = txr->vxtxr_gen;
2932 if (m->m_flags & M_VLANTAG) {
2934 sop->vtag = m->m_pkthdr.ether_vlantag;
2939 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
2940 sop->offload_mode = VMXNET3_OM_TSO;
2942 sop->offload_pos = m->m_pkthdr.tso_segsz;
2945 if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD |
2946 VMXNET3_CSUM_OFFLOAD_IPV6)) {
2947 sop->offload_mode = VMXNET3_OM_CSUM;
2949 sop->offload_pos = start + m->m_pkthdr.csum_data;
2952 /* Finally, change the ownership. */
2953 vmxnet3_barrier(sc, VMXNET3_BARRIER_WR);
2956 txq->vxtxq_ts->npending += nsegs;
2957 if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) {
2958 txq->vxtxq_ts->npending = 0;
2959 vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id),
2966 #ifdef VMXNET3_LEGACY_TX
2969 vmxnet3_start_locked(struct ifnet *ifp)
2971 struct vmxnet3_softc *sc;
2972 struct vmxnet3_txqueue *txq;
2973 struct vmxnet3_txring *txr;
2974 struct mbuf *m_head;
2978 txq = &sc->vmx_txq[0];
2979 txr = &txq->vxtxq_cmd_ring;
2982 VMXNET3_TXQ_LOCK_ASSERT(txq);
2984 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
2985 sc->vmx_link_active == 0)
2988 while (!ifq_is_empty(&ifp->if_snd)) {
2989 if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2)
2992 m_head = ifq_dequeue(&ifp->if_snd);
2996 /* Assume worse case if this mbuf is the head of a chain. */
2997 if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
2998 ifq_prepend(&ifp->if_snd, m_head);
3002 if (vmxnet3_txq_encap(txq, &m_head) != 0) {
3004 ifq_prepend(&ifp->if_snd, m_head);
3009 ETHER_BPF_MTAP(ifp, m_head);
3013 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3017 vmxnet3_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
3019 struct vmxnet3_softc *sc;
3020 struct vmxnet3_txqueue *txq;
3023 txq = &sc->vmx_txq[0];
3025 VMXNET3_TXQ_LOCK(txq);
3026 vmxnet3_start_locked(ifp);
3027 VMXNET3_TXQ_UNLOCK(txq);
3030 #else /* !VMXNET3_LEGACY_TX */
3033 vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m)
3035 struct vmxnet3_softc *sc;
3036 struct vmxnet3_txring *txr;
3037 struct buf_ring *br;
3039 int tx, avail, error;
3044 txr = &txq->vxtxq_cmd_ring;
3048 VMXNET3_TXQ_LOCK_ASSERT(txq);
3050 if ((ifp->if_flags & IFF_RUNNING) == 0 ||
3051 sc->vmx_link_active == 0) {
3053 error = drbr_enqueue(ifp, br, m);
3058 error = drbr_enqueue(ifp, br, m);
3063 while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) {
3064 m = drbr_peek(ifp, br);
3068 /* Assume worse case if this mbuf is the head of a chain. */
3069 if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) {
3070 drbr_putback(ifp, br, m);
3074 if (vmxnet3_txq_encap(txq, &m) != 0) {
3076 drbr_putback(ifp, br, m);
3078 drbr_advance(ifp, br);
3081 drbr_advance(ifp, br);
3084 ETHER_BPF_MTAP(ifp, m);
3088 txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT;
3094 vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
3096 struct vmxnet3_softc *sc;
3097 struct vmxnet3_txqueue *txq;
3101 ntxq = sc->vmx_ntxqueues;
3103 /* check if flowid is set */
3104 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
3105 i = m->m_pkthdr.flowid % ntxq;
3109 txq = &sc->vmx_txq[i];
3111 if (VMXNET3_TXQ_TRYLOCK(txq) != 0) {
3112 error = vmxnet3_txq_mq_start_locked(txq, m);
3113 VMXNET3_TXQ_UNLOCK(txq);
3115 error = drbr_enqueue(ifp, txq->vxtxq_br, m);
3116 taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask);
3123 vmxnet3_txq_tq_deferred(void *xtxq, int pending)
3125 struct vmxnet3_softc *sc;
3126 struct vmxnet3_txqueue *txq;
3131 VMXNET3_TXQ_LOCK(txq);
3132 if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br))
3133 vmxnet3_txq_mq_start_locked(txq, NULL);
3134 VMXNET3_TXQ_UNLOCK(txq);
3137 #endif /* VMXNET3_LEGACY_TX */
3140 vmxnet3_txq_start(struct vmxnet3_txqueue *txq)
3142 struct vmxnet3_softc *sc;
3148 #ifdef VMXNET3_LEGACY_TX
3149 if (!ifq_is_empty(&ifp->if_snd))
3150 vmxnet3_start_locked(ifp);
3152 if (!drbr_empty(ifp, txq->vxtxq_br))
3153 vmxnet3_txq_mq_start_locked(txq, NULL);
3158 vmxnet3_tx_start_all(struct vmxnet3_softc *sc)
3160 struct vmxnet3_txqueue *txq;
3163 VMXNET3_CORE_LOCK_ASSERT(sc);
3165 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3166 txq = &sc->vmx_txq[i];
3168 VMXNET3_TXQ_LOCK(txq);
3169 vmxnet3_txq_start(txq);
3170 VMXNET3_TXQ_UNLOCK(txq);
3175 vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag)
3181 idx = (tag >> 5) & 0x7F;
3184 if (tag == 0 || tag > 4095)
3187 VMXNET3_CORE_LOCK(sc);
3189 /* Update our private VLAN bitvector. */
3191 sc->vmx_vlan_filter[idx] |= (1 << bit);
3193 sc->vmx_vlan_filter[idx] &= ~(1 << bit);
3195 #if 0 /* VLAN_HWFILTER */
3196 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3198 sc->vmx_ds->vlan_filter[idx] |= (1 << bit);
3200 sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit);
3201 vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER);
3205 VMXNET3_CORE_UNLOCK(sc);
3209 vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3212 if (ifp->if_softc == arg)
3213 vmxnet3_update_vlan_filter(arg, 1, tag);
3217 vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
3220 if (ifp->if_softc == arg)
3221 vmxnet3_update_vlan_filter(arg, 0, tag);
3225 vmxnet3_set_rxfilter(struct vmxnet3_softc *sc)
3228 struct vmxnet3_driver_shared *ds;
3229 struct ifmultiaddr *ifma;
3235 mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST;
3236 if (ifp->if_flags & IFF_PROMISC)
3237 mode |= VMXNET3_RXMODE_PROMISC;
3238 if (ifp->if_flags & IFF_ALLMULTI)
3239 mode |= VMXNET3_RXMODE_ALLMULTI;
3241 int cnt = 0, overflow = 0;
3243 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3244 if (ifma->ifma_addr->sa_family != AF_LINK)
3246 else if (cnt == VMXNET3_MULTICAST_MAX) {
3251 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
3252 &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN);
3256 if (overflow != 0) {
3258 mode |= VMXNET3_RXMODE_ALLMULTI;
3260 mode |= VMXNET3_RXMODE_MCAST;
3261 ds->mcast_tablelen = cnt * ETHER_ADDR_LEN;
3266 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER);
3267 vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE);
3271 vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu)
3277 if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU)
3282 if (ifp->if_flags & IFF_RUNNING) {
3283 ifp->if_flags &= ~IFF_RUNNING;
3284 vmxnet3_init_locked(sc);
3291 vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cred)
3293 struct vmxnet3_softc *sc;
3295 int reinit, mask, error;
3298 ifr = (struct ifreq *) data;
3303 if (ifp->if_mtu != ifr->ifr_mtu) {
3304 VMXNET3_CORE_LOCK(sc);
3305 error = vmxnet3_change_mtu(sc, ifr->ifr_mtu);
3306 VMXNET3_CORE_UNLOCK(sc);
3311 VMXNET3_CORE_LOCK(sc);
3312 if (ifp->if_flags & IFF_UP) {
3313 if ((ifp->if_flags & IFF_RUNNING)) {
3314 if ((ifp->if_flags ^ sc->vmx_if_flags) &
3315 (IFF_PROMISC | IFF_ALLMULTI)) {
3316 vmxnet3_set_rxfilter(sc);
3319 vmxnet3_init_locked(sc);
3321 if (ifp->if_flags & IFF_RUNNING)
3324 sc->vmx_if_flags = ifp->if_flags;
3325 VMXNET3_CORE_UNLOCK(sc);
3330 VMXNET3_CORE_LOCK(sc);
3331 if (ifp->if_flags & IFF_RUNNING)
3332 vmxnet3_set_rxfilter(sc);
3333 VMXNET3_CORE_UNLOCK(sc);
3338 error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd);
3342 VMXNET3_CORE_LOCK(sc);
3343 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
3345 if (mask & IFCAP_TXCSUM)
3346 ifp->if_capenable ^= IFCAP_TXCSUM;
3347 if (mask & IFCAP_TXCSUM_IPV6)
3348 ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
3350 if (mask & IFCAP_TSO4)
3351 ifp->if_capenable ^= IFCAP_TSO4;
3352 if (mask & IFCAP_TSO6)
3353 ifp->if_capenable ^= IFCAP_TSO6;
3356 if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | /* IFCAP_LRO | */
3357 IFCAP_VLAN_HWTAGGING /* | IFCAP_VLAN_HWFILTER */)) {
3358 /* Changing these features requires us to reinit. */
3361 if (mask & IFCAP_RXCSUM)
3362 ifp->if_capenable ^= IFCAP_RXCSUM;
3363 if (mask & IFCAP_RXCSUM_IPV6)
3364 ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
3366 if (mask & IFCAP_LRO)
3367 ifp->if_capenable ^= IFCAP_LRO;
3369 if (mask & IFCAP_VLAN_HWTAGGING)
3370 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
3371 #if 0 /* XXX VLAN_HWFILTER */
3372 if (mask & IFCAP_VLAN_HWFILTER)
3373 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
3379 if (mask & IFCAP_VLAN_HWTSO)
3380 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
3383 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
3384 ifp->if_flags &= ~IFF_RUNNING;
3385 vmxnet3_init_locked(sc);
3387 vmxnet3_init_hwassist(sc);
3390 VMXNET3_CORE_UNLOCK(sc);
3392 VLAN_CAPABILITIES(ifp);
3397 error = ether_ioctl(ifp, cmd, data);
3401 VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc);
3406 #ifndef VMXNET3_LEGACY_TX
3408 vmxnet3_qflush(struct ifnet *ifp)
3410 struct vmxnet3_softc *sc;
3411 struct vmxnet3_txqueue *txq;
3417 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3418 txq = &sc->vmx_txq[i];
3420 VMXNET3_TXQ_LOCK(txq);
3421 while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL)
3423 VMXNET3_TXQ_UNLOCK(txq);
3431 vmxnet3_watchdog(struct vmxnet3_txqueue *txq)
3433 struct vmxnet3_softc *sc;
3437 VMXNET3_TXQ_LOCK(txq);
3438 if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) {
3439 VMXNET3_TXQ_UNLOCK(txq);
3442 VMXNET3_TXQ_UNLOCK(txq);
3444 if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n",
3450 vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc)
3453 vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS);
3457 vmxnet3_txq_accum_stats(struct vmxnet3_txqueue *txq,
3458 struct vmxnet3_txq_stats *accum)
3460 struct vmxnet3_txq_stats *st;
3462 st = &txq->vxtxq_stats;
3464 accum->vmtxs_opackets += st->vmtxs_opackets;
3465 accum->vmtxs_obytes += st->vmtxs_obytes;
3466 accum->vmtxs_omcasts += st->vmtxs_omcasts;
3467 accum->vmtxs_csum += st->vmtxs_csum;
3468 accum->vmtxs_tso += st->vmtxs_tso;
3469 accum->vmtxs_full += st->vmtxs_full;
3470 accum->vmtxs_offload_failed += st->vmtxs_offload_failed;
3474 vmxnet3_rxq_accum_stats(struct vmxnet3_rxqueue *rxq,
3475 struct vmxnet3_rxq_stats *accum)
3477 struct vmxnet3_rxq_stats *st;
3479 st = &rxq->vxrxq_stats;
3481 accum->vmrxs_ipackets += st->vmrxs_ipackets;
3482 accum->vmrxs_ibytes += st->vmrxs_ibytes;
3483 accum->vmrxs_iqdrops += st->vmrxs_iqdrops;
3484 accum->vmrxs_ierrors += st->vmrxs_ierrors;
3488 vmxnet3_accumulate_stats(struct vmxnet3_softc *sc)
3491 struct vmxnet3_statistics *st;
3492 struct vmxnet3_txq_stats txaccum;
3493 struct vmxnet3_rxq_stats rxaccum;
3497 st = &sc->vmx_stats;
3499 bzero(&txaccum, sizeof(struct vmxnet3_txq_stats));
3500 bzero(&rxaccum, sizeof(struct vmxnet3_rxq_stats));
3502 for (i = 0; i < sc->vmx_ntxqueues; i++)
3503 vmxnet3_txq_accum_stats(&sc->vmx_txq[i], &txaccum);
3504 for (i = 0; i < sc->vmx_nrxqueues; i++)
3505 vmxnet3_rxq_accum_stats(&sc->vmx_rxq[i], &rxaccum);
3508 * With the exception of if_ierrors, these ifnet statistics are
3509 * only updated in the driver, so just set them to our accumulated
3510 * values. if_ierrors is updated in ether_input() for malformed
3511 * frames that we should have already discarded.
3513 ifp->if_ipackets = rxaccum.vmrxs_ipackets;
3514 ifp->if_iqdrops = rxaccum.vmrxs_iqdrops;
3515 ifp->if_ierrors = rxaccum.vmrxs_ierrors;
3516 ifp->if_opackets = txaccum.vmtxs_opackets;
3517 #ifndef VMXNET3_LEGACY_TX
3518 ifp->if_obytes = txaccum.vmtxs_obytes;
3519 ifp->if_omcasts = txaccum.vmtxs_omcasts;
3524 vmxnet3_tick(void *xsc)
3526 struct vmxnet3_softc *sc;
3534 VMXNET3_CORE_LOCK_ASSERT(sc);
3536 vmxnet3_accumulate_stats(sc);
3537 vmxnet3_refresh_host_stats(sc);
3539 for (i = 0; i < sc->vmx_ntxqueues; i++)
3540 timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]);
3542 if (timedout != 0) {
3543 ifp->if_flags &= ~IFF_RUNNING;
3544 vmxnet3_init_locked(sc);
3546 callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc);
3550 vmxnet3_link_is_up(struct vmxnet3_softc *sc)
3554 /* Also update the link speed while here. */
3555 status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK);
3556 sc->vmx_link_speed = status >> 16;
3557 return !!(status & 0x1);
3561 vmxnet3_link_status(struct vmxnet3_softc *sc)
3567 link = vmxnet3_link_is_up(sc);
3569 if (link != 0 && sc->vmx_link_active == 0) {
3570 sc->vmx_link_active = 1;
3571 ifp->if_link_state = LINK_STATE_UP;
3572 if_link_state_change(ifp);
3573 } else if (link == 0 && sc->vmx_link_active != 0) {
3574 sc->vmx_link_active = 0;
3575 ifp->if_link_state = LINK_STATE_DOWN;
3576 if_link_state_change(ifp);
3581 vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
3583 struct vmxnet3_softc *sc;
3587 ifmr->ifm_active = IFM_ETHER | IFM_AUTO;
3588 ifmr->ifm_status = IFM_AVALID;
3590 VMXNET3_CORE_LOCK(sc);
3591 if (vmxnet3_link_is_up(sc) != 0)
3592 ifmr->ifm_status |= IFM_ACTIVE;
3594 ifmr->ifm_status |= IFM_NONE;
3595 VMXNET3_CORE_UNLOCK(sc);
3599 vmxnet3_media_change(struct ifnet *ifp)
3607 vmxnet3_set_lladdr(struct vmxnet3_softc *sc)
3611 ml = sc->vmx_lladdr[0];
3612 ml |= sc->vmx_lladdr[1] << 8;
3613 ml |= sc->vmx_lladdr[2] << 16;
3614 ml |= sc->vmx_lladdr[3] << 24;
3615 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml);
3617 mh = sc->vmx_lladdr[4];
3618 mh |= sc->vmx_lladdr[5] << 8;
3619 vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh);
3623 vmxnet3_get_lladdr(struct vmxnet3_softc *sc)
3627 ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL);
3628 mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH);
3630 sc->vmx_lladdr[0] = ml;
3631 sc->vmx_lladdr[1] = ml >> 8;
3632 sc->vmx_lladdr[2] = ml >> 16;
3633 sc->vmx_lladdr[3] = ml >> 24;
3634 sc->vmx_lladdr[4] = mh;
3635 sc->vmx_lladdr[5] = mh >> 8;
3639 vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq,
3640 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3642 struct sysctl_oid *node, *txsnode;
3643 struct sysctl_oid_list *list, *txslist;
3644 struct vmxnet3_txq_stats *stats;
3645 struct UPT1_TxStats *txstats;
3648 stats = &txq->vxtxq_stats;
3649 txstats = &txq->vxtxq_ts->stats;
3651 ksnprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id);
3652 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3653 NULL, "Transmit Queue");
3654 txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node);
3656 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
3657 &stats->vmtxs_opackets, 0, "Transmit packets");
3658 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
3659 &stats->vmtxs_obytes, 0, "Transmit bytes");
3660 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
3661 &stats->vmtxs_omcasts, 0, "Transmit multicasts");
3662 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
3663 &stats->vmtxs_csum, 0, "Transmit checksum offloaded");
3664 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
3665 &stats->vmtxs_tso, 0, "Transmit TCP segmentation offloaded");
3666 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD,
3667 &stats->vmtxs_full, 0, "Transmit ring full");
3668 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD,
3669 &stats->vmtxs_offload_failed, 0, "Transmit checksum offload failed");
3672 * Add statistics reported by the host. These are updated once
3675 txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3676 NULL, "Host Statistics");
3677 txslist = SYSCTL_CHILDREN(txsnode);
3679 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD,
3680 &txstats->TSO_packets, 0, "TSO packets");
3681 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD,
3682 &txstats->TSO_bytes, 0, "TSO bytes");
3684 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3685 &txstats->ucast_packets, 0, "Unicast packets");
3686 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3687 &txstats->ucast_bytes, 0, "Unicast bytes");
3688 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3689 &txstats->mcast_packets, 0, "Multicast packets");
3690 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3691 &txstats->mcast_bytes, 0, "Multicast bytes");
3692 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD,
3693 &txstats->error, 0, "Errors");
3694 SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD,
3695 &txstats->discard, 0, "Discards");
3699 vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq,
3700 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3702 struct sysctl_oid *node, *rxsnode;
3703 struct sysctl_oid_list *list, *rxslist;
3704 struct vmxnet3_rxq_stats *stats;
3705 struct UPT1_RxStats *rxstats;
3708 stats = &rxq->vxrxq_stats;
3709 rxstats = &rxq->vxrxq_rs->stats;
3711 ksnprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id);
3712 node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD,
3713 NULL, "Receive Queue");
3714 rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node);
3716 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
3717 &stats->vmrxs_ipackets, 0, "Receive packets");
3718 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
3719 &stats->vmrxs_ibytes, 0, "Receive bytes");
3720 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
3721 &stats->vmrxs_iqdrops, 0, "Receive drops");
3722 SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
3723 &stats->vmrxs_ierrors, 0, "Receive errors");
3726 * Add statistics reported by the host. These are updated once
3729 rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD,
3730 NULL, "Host Statistics");
3731 rxslist = SYSCTL_CHILDREN(rxsnode);
3733 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD,
3734 &rxstats->LRO_packets, 0, "LRO packets");
3735 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD,
3736 &rxstats->LRO_bytes, 0, "LRO bytes");
3738 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD,
3739 &rxstats->ucast_packets, 0, "Unicast packets");
3740 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD,
3741 &rxstats->ucast_bytes, 0, "Unicast bytes");
3742 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD,
3743 &rxstats->mcast_packets, 0, "Multicast packets");
3744 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD,
3745 &rxstats->mcast_bytes, 0, "Multicast bytes");
3746 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD,
3747 &rxstats->bcast_packets, 0, "Broadcast packets");
3748 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD,
3749 &rxstats->bcast_bytes, 0, "Broadcast bytes");
3750 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD,
3751 &rxstats->nobuffer, 0, "No buffer");
3752 SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD,
3753 &rxstats->error, 0, "Errors");
3757 vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc,
3758 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3760 struct sysctl_oid *node;
3761 struct sysctl_oid_list *list;
3764 for (i = 0; i < sc->vmx_ntxqueues; i++) {
3765 struct vmxnet3_txqueue *txq = &sc->vmx_txq[i];
3767 node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO,
3768 "debug", CTLFLAG_RD, NULL, "");
3769 list = SYSCTL_CHILDREN(node);
3771 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD,
3772 &txq->vxtxq_cmd_ring.vxtxr_head, 0, "");
3773 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD,
3774 &txq->vxtxq_cmd_ring.vxtxr_next, 0, "");
3775 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD,
3776 &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, "");
3777 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD,
3778 &txq->vxtxq_cmd_ring.vxtxr_gen, 0, "");
3779 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3780 &txq->vxtxq_comp_ring.vxcr_next, 0, "");
3781 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3782 &txq->vxtxq_comp_ring.vxcr_ndesc, 0,"");
3783 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3784 &txq->vxtxq_comp_ring.vxcr_gen, 0, "");
3787 for (i = 0; i < sc->vmx_nrxqueues; i++) {
3788 struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i];
3790 node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO,
3791 "debug", CTLFLAG_RD, NULL, "");
3792 list = SYSCTL_CHILDREN(node);
3794 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD,
3795 &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, "");
3796 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD,
3797 &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, "");
3798 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD,
3799 &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, "");
3800 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD,
3801 &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, "");
3802 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD,
3803 &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, "");
3804 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD,
3805 &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, "");
3806 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD,
3807 &rxq->vxrxq_comp_ring.vxcr_next, 0, "");
3808 SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD,
3809 &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,"");
3810 SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD,
3811 &rxq->vxrxq_comp_ring.vxcr_gen, 0, "");
3816 vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc,
3817 struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child)
3821 for (i = 0; i < sc->vmx_ntxqueues; i++)
3822 vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child);
3823 for (i = 0; i < sc->vmx_nrxqueues; i++)
3824 vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child);
3826 vmxnet3_setup_debug_sysctl(sc, ctx, child);
3830 vmxnet3_setup_sysctl(struct vmxnet3_softc *sc)
3833 struct vmxnet3_statistics *stats;
3834 struct sysctl_ctx_list *ctx;
3835 struct sysctl_oid *tree;
3836 struct sysctl_oid_list *child;
3839 ctx = device_get_sysctl_ctx(dev);
3840 tree = device_get_sysctl_tree(dev);
3841 child = SYSCTL_CHILDREN(tree);
3843 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD,
3844 &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues");
3845 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD,
3846 &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues");
3847 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD,
3848 &sc->vmx_ntxqueues, 0, "Number of Tx queues");
3849 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD,
3850 &sc->vmx_nrxqueues, 0, "Number of Rx queues");
3852 stats = &sc->vmx_stats;
3853 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD,
3854 &stats->vmst_defragged, 0, "Tx mbuf chains defragged");
3855 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD,
3856 &stats->vmst_defrag_failed, 0,
3857 "Tx mbuf dropped because defrag failed");
3858 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD,
3859 &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed");
3860 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD,
3861 &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed");
3863 vmxnet3_setup_queue_sysctl(sc, ctx, child);
3867 vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3870 bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v);
3874 vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r)
3877 return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r));
3881 vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v)
3884 bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v);
3888 vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3891 vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd);
3895 vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd)
3898 vmxnet3_write_cmd(sc, cmd);
3899 bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0,
3900 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3901 return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD));
3905 vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq)
3908 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0);
3912 vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq)
3915 vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1);
3919 vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc)
3923 sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL;
3924 for (i = 0; i < sc->vmx_nintrs; i++)
3925 vmxnet3_enable_intr(sc, i);
3929 vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc)
3933 sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL;
3934 for (i = 0; i < sc->vmx_nintrs; i++)
3935 vmxnet3_disable_intr(sc, i);
3939 vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3941 bus_addr_t *baddr = arg;
3944 *baddr = segs->ds_addr;
3948 vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align,
3949 struct vmxnet3_dma_alloc *dma)
3955 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
3957 error = bus_dma_tag_create(bus_get_dma_tag(dev),
3958 align, 0, /* alignment, bounds */
3959 BUS_SPACE_MAXADDR, /* lowaddr */
3960 BUS_SPACE_MAXADDR, /* highaddr */
3963 size, /* maxsegsize */
3964 BUS_DMA_ALLOCNOW, /* flags */
3967 device_printf(dev, "bus_dma_tag_create failed: %d\n", error);
3971 error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
3972 BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map);
3974 device_printf(dev, "bus_dmamem_alloc failed: %d\n", error);
3978 error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
3979 size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT);
3981 device_printf(dev, "bus_dmamap_load failed: %d\n", error);
3985 dma->dma_size = size;
3989 vmxnet3_dma_free(sc, dma);
3995 vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma)
3998 if (dma->dma_tag != NULL) {
3999 if (dma->dma_paddr != 0) {
4000 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
4001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
4002 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
4005 if (dma->dma_vaddr != NULL) {
4006 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr,
4010 bus_dma_tag_destroy(dma->dma_tag);
4012 bzero(dma, sizeof(struct vmxnet3_dma_alloc));
4016 vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def)
4020 ksnprintf(path, sizeof(path),
4021 "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob);
4022 TUNABLE_INT_FETCH(path, &def);
4027 #define mb() __asm volatile("mfence" ::: "memory")
4028 #define wmb() __asm volatile("sfence" ::: "memory")
4029 #define rmb() __asm volatile("lfence" ::: "memory")
4032 * Since this is a purely paravirtualized device, we do not have
4033 * to worry about DMA coherency. But at times, we must make sure
4034 * both the compiler and CPU do not reorder memory operations.
4037 vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type)
4041 case VMXNET3_BARRIER_RD:
4044 case VMXNET3_BARRIER_WR:
4047 case VMXNET3_BARRIER_RDWR:
4051 panic("%s: bad barrier type %d", __func__, type);