2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include "opt_ifpoll.h"
31 #include <sys/cdefs.h>
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sockio.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/socket.h>
41 #include <sys/sysctl.h>
42 #include <sys/taskqueue.h>
43 #include <sys/random.h>
44 #include <sys/sglist.h>
45 #include <sys/serialize.h>
49 #include <machine/limits.h>
51 #include <net/ethernet.h>
53 #include <net/if_arp.h>
54 #include <net/if_dl.h>
55 #include <net/if_types.h>
56 #include <net/if_media.h>
57 #include <net/vlan/if_vlan_var.h>
58 #include <net/vlan/if_vlan_ether.h>
59 #include <net/if_poll.h>
60 #include <net/ifq_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet/udp.h>
69 #include <netinet/tcp.h>
71 #include <dev/virtual/virtio/virtio/virtio.h>
72 #include <dev/virtual/virtio/virtio/virtqueue.h>
73 #include <dev/virtual/virtio/net/virtio_net.h>
74 #include <dev/virtual/virtio/net/if_vtnetvar.h>
76 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
78 static int vtnet_probe(device_t);
79 static int vtnet_attach(device_t);
80 static int vtnet_detach(device_t);
81 static int vtnet_suspend(device_t);
82 static int vtnet_resume(device_t);
83 static int vtnet_shutdown(device_t);
85 static void vtnet_negotiate_features(struct vtnet_softc *);
87 static void vtnet_npoll(struct ifnet *, struct ifpoll_info *);
88 static void vtnet_npoll_status(struct ifnet *);
89 static void vtnet_npoll_rx(struct ifnet *, void *, int);
90 static void vtnet_npoll_tx(struct ifnet *, void *, int);
92 static void vtnet_serialize(struct ifnet *, enum ifnet_serialize);
93 static void vtnet_deserialize(struct ifnet *, enum ifnet_serialize);
94 static int vtnet_tryserialize(struct ifnet *, enum ifnet_serialize);
96 static void vtnet_serialize_assert(struct ifnet *, enum ifnet_serialize,
98 #endif /* INVARIANTS */
99 static int vtnet_alloc_intrs(struct vtnet_softc *);
100 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
101 static int vtnet_bind_intrs(struct vtnet_softc *);
102 static void vtnet_get_hwaddr(struct vtnet_softc *);
103 static void vtnet_set_hwaddr(struct vtnet_softc *);
104 static int vtnet_is_link_up(struct vtnet_softc *);
105 static void vtnet_update_link_status(struct vtnet_softc *);
106 static void vtnet_watchdog(struct ifaltq_subque *);
107 static int vtnet_setup_interface(struct vtnet_softc *);
108 static int vtnet_change_mtu(struct vtnet_softc *, int);
109 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
111 static int vtnet_init_rx_vq(struct vtnet_softc *);
112 static void vtnet_free_rx_mbufs(struct vtnet_softc *);
113 static void vtnet_free_tx_mbufs(struct vtnet_softc *);
114 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
116 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
118 static int vtnet_replace_rxbuf(struct vtnet_softc *,
120 static int vtnet_newbuf(struct vtnet_softc *);
121 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
122 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
123 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
124 static void vtnet_vlan_tag_remove(struct mbuf *);
125 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
126 struct virtio_net_hdr *);
127 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
128 static int vtnet_rxeof(struct vtnet_softc *, int, int *);
129 static void vtnet_rx_msix_intr(void *);
130 static void vtnet_rx_vq_intr(void *);
132 static void vtnet_enqueue_txhdr(struct vtnet_softc *,
133 struct vtnet_tx_header *);
134 static void vtnet_txeof(struct vtnet_softc *);
135 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
136 struct virtio_net_hdr *);
137 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
138 struct vtnet_tx_header *);
139 static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
140 static void vtnet_start(struct ifnet *, struct ifaltq_subque *);
142 static void vtnet_config_intr(void *);
143 static void vtnet_tx_msix_intr(void *);
144 static void vtnet_tx_vq_intr(void *);
146 static void vtnet_stop(struct vtnet_softc *);
147 static int vtnet_virtio_reinit(struct vtnet_softc *);
148 static void vtnet_init(void *);
150 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
151 struct sglist *, int, int);
153 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
154 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
155 static int vtnet_set_promisc(struct vtnet_softc *, int);
156 static int vtnet_set_allmulti(struct vtnet_softc *, int);
157 static void vtnet_rx_filter(struct vtnet_softc *sc);
158 static void vtnet_rx_filter_mac(struct vtnet_softc *);
160 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
161 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
162 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
163 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
164 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
166 static int vtnet_ifmedia_upd(struct ifnet *);
167 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
169 static void vtnet_add_statistics(struct vtnet_softc *);
171 static int vtnet_enable_rx_intr(struct vtnet_softc *);
172 static int vtnet_enable_tx_intr(struct vtnet_softc *);
173 static void vtnet_disable_rx_intr(struct vtnet_softc *);
174 static void vtnet_disable_tx_intr(struct vtnet_softc *);
177 static int vtnet_csum_disable = 0;
178 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
179 static int vtnet_tso_disable = 1;
180 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
181 static int vtnet_lro_disable = 0;
182 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
185 * Reducing the number of transmit completed interrupts can
186 * improve performance. To do so, the define below keeps the
187 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
188 * in the start path. The price to pay for this is the m_free'ing
189 * of transmitted mbufs may be delayed.
191 #define VTNET_TX_INTR_MODERATION
193 static struct virtio_feature_desc vtnet_feature_desc[] = {
194 { VIRTIO_NET_F_CSUM, "TxChecksum" },
195 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
196 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload" },
197 { VIRTIO_NET_F_MAC, "MacAddress" },
198 { VIRTIO_NET_F_GSO, "TxAllGSO" },
199 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
200 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
201 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
202 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
203 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
204 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
205 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
206 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
207 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
208 { VIRTIO_NET_F_STATUS, "Status" },
209 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
210 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
211 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
212 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
213 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
214 { VIRTIO_NET_F_MQ, "Multiqueue" },
215 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
219 static device_method_t vtnet_methods[] = {
220 /* Device methods. */
221 DEVMETHOD(device_probe, vtnet_probe),
222 DEVMETHOD(device_attach, vtnet_attach),
223 DEVMETHOD(device_detach, vtnet_detach),
224 DEVMETHOD(device_suspend, vtnet_suspend),
225 DEVMETHOD(device_resume, vtnet_resume),
226 DEVMETHOD(device_shutdown, vtnet_shutdown),
231 static driver_t vtnet_driver = {
234 sizeof(struct vtnet_softc)
237 static devclass_t vtnet_devclass;
239 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
240 MODULE_VERSION(vtnet, 1);
241 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
244 vtnet_probe(device_t dev)
246 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
249 device_set_desc(dev, "VirtIO Networking Adapter");
251 return (BUS_PROBE_DEFAULT);
255 vtnet_attach(device_t dev)
257 struct vtnet_softc *sc;
260 sc = device_get_softc(dev);
263 lwkt_serialize_init(&sc->vtnet_slz);
264 lwkt_serialize_init(&sc->vtnet_rx_slz);
265 lwkt_serialize_init(&sc->vtnet_tx_slz);
266 sc->serializes[0] = &sc->vtnet_slz;
267 sc->serializes[1] = &sc->vtnet_rx_slz;
268 sc->serializes[2] = &sc->vtnet_tx_slz;
270 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
272 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
273 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
275 vtnet_add_statistics(sc);
276 SLIST_INIT(&sc->vtnet_txhdr_free);
278 /* Register our feature descriptions. */
279 virtio_set_feature_desc(dev, vtnet_feature_desc);
280 vtnet_negotiate_features(sc);
282 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
283 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
285 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
286 /* This feature should always be negotiated. */
287 sc->vtnet_flags |= VTNET_FLAG_MAC;
290 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
291 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
292 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
294 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
297 sc->vtnet_rx_mbuf_size = MCLBYTES;
298 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
300 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
301 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
303 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
304 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
305 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
306 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
307 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
308 virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
309 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
312 error = vtnet_alloc_intrs(sc);
314 device_printf(dev, "cannot allocate interrupts\n");
318 error = vtnet_alloc_virtqueues(sc);
320 device_printf(dev, "cannot allocate virtqueues\n");
324 error = vtnet_bind_intrs(sc);
326 device_printf(dev, "cannot bind virtqueues to interrupts\n");
330 /* Read (or generate) the MAC address for the adapter. */
331 vtnet_get_hwaddr(sc);
333 error = vtnet_setup_interface(sc);
335 device_printf(dev, "cannot setup interface\n");
339 for (i = 0; i < sc->vtnet_nintr; i++) {
340 error = virtio_setup_intr(dev, i, sc->vtnet_intr_slz[i]);
342 device_printf(dev, "cannot setup virtqueue "
344 ether_ifdetach(sc->vtnet_ifp);
349 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
350 ifnet_serialize_all(sc->vtnet_ifp);
351 vtnet_set_hwaddr(sc);
352 ifnet_deserialize_all(sc->vtnet_ifp);
356 * Device defaults to promiscuous mode for backwards
357 * compatibility. Turn it off if possible.
359 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
360 ifnet_serialize_all(sc->vtnet_ifp);
361 if (vtnet_set_promisc(sc, 0) != 0) {
362 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
364 "cannot disable promiscuous mode\n");
366 ifnet_deserialize_all(sc->vtnet_ifp);
368 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
378 vtnet_detach(device_t dev)
380 struct vtnet_softc *sc;
384 sc = device_get_softc(dev);
387 for (i = 0; i < sc->vtnet_nintr; i++)
388 virtio_teardown_intr(dev, i);
390 if (device_is_attached(dev)) {
391 ifnet_serialize_all(ifp);
393 lwkt_serialize_handler_disable(&sc->vtnet_slz);
394 lwkt_serialize_handler_disable(&sc->vtnet_rx_slz);
395 lwkt_serialize_handler_disable(&sc->vtnet_tx_slz);
396 ifnet_deserialize_all(ifp);
401 if (sc->vtnet_vlan_attach != NULL) {
402 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
403 sc->vtnet_vlan_attach = NULL;
405 if (sc->vtnet_vlan_detach != NULL) {
406 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
407 sc->vtnet_vlan_detach = NULL;
412 sc->vtnet_ifp = NULL;
415 if (sc->vtnet_rx_vq != NULL)
416 vtnet_free_rx_mbufs(sc);
417 if (sc->vtnet_tx_vq != NULL)
418 vtnet_free_tx_mbufs(sc);
419 if (sc->vtnet_ctrl_vq != NULL)
420 vtnet_free_ctrl_vq(sc);
422 if (sc->vtnet_txhdrarea != NULL) {
423 contigfree(sc->vtnet_txhdrarea,
424 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
426 sc->vtnet_txhdrarea = NULL;
428 SLIST_INIT(&sc->vtnet_txhdr_free);
429 if (sc->vtnet_macfilter != NULL) {
430 contigfree(sc->vtnet_macfilter,
431 sizeof(struct vtnet_mac_filter), M_DEVBUF);
432 sc->vtnet_macfilter = NULL;
435 ifmedia_removeall(&sc->vtnet_media);
441 vtnet_suspend(device_t dev)
443 struct vtnet_softc *sc;
445 sc = device_get_softc(dev);
447 ifnet_serialize_all(sc->vtnet_ifp);
449 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
450 ifnet_deserialize_all(sc->vtnet_ifp);
456 vtnet_resume(device_t dev)
458 struct vtnet_softc *sc;
461 sc = device_get_softc(dev);
464 ifnet_serialize_all(ifp);
465 if (ifp->if_flags & IFF_UP)
467 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
468 ifnet_deserialize_all(ifp);
474 vtnet_shutdown(device_t dev)
478 * Suspend already does all of what we need to
479 * do here; we just never expect to be resumed.
481 return (vtnet_suspend(dev));
485 vtnet_negotiate_features(struct vtnet_softc *sc)
488 uint64_t mask, features;
493 if (vtnet_csum_disable)
494 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
497 * XXX DragonFly doesn't support receive checksum offload for ipv6 yet,
498 * hence always disable the virtio feature for now.
499 * XXX We need to support the DynOffload feature, in order to
500 * dynamically enable/disable this feature.
502 mask |= VIRTIO_NET_F_GUEST_CSUM;
505 * TSO is only available when the tx checksum offload feature is also
508 if (vtnet_csum_disable || vtnet_tso_disable)
509 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
510 VIRTIO_NET_F_HOST_ECN;
512 if (vtnet_lro_disable)
513 mask |= VTNET_LRO_FEATURES;
515 features = VTNET_FEATURES & ~mask;
516 features |= VIRTIO_F_NOTIFY_ON_EMPTY;
517 features |= VIRTIO_F_ANY_LAYOUT;
518 sc->vtnet_features = virtio_negotiate_features(dev, features);
520 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
521 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
523 * LRO without mergeable buffers requires special care. This
524 * is not ideal because every receive buffer must be large
525 * enough to hold the maximum TCP packet, the Ethernet header,
526 * and the header. This requires up to 34 descriptors with
527 * MCLBYTES clusters. If we do not have indirect descriptors,
528 * LRO is disabled since the virtqueue will not contain very
529 * many receive buffers.
531 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
533 "LRO disabled due to both mergeable buffers and "
534 "indirect descriptors not negotiated\n");
536 features &= ~VTNET_LRO_FEATURES;
538 virtio_negotiate_features(dev, features);
540 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
545 vtnet_serialize(struct ifnet *ifp, enum ifnet_serialize slz)
547 struct vtnet_softc *sc = ifp->if_softc;
549 ifnet_serialize_array_enter(sc->serializes, 3, slz);
553 vtnet_deserialize(struct ifnet *ifp, enum ifnet_serialize slz)
555 struct vtnet_softc *sc = ifp->if_softc;
557 ifnet_serialize_array_exit(sc->serializes, 3, slz);
561 vtnet_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz)
563 struct vtnet_softc *sc = ifp->if_softc;
565 return ifnet_serialize_array_try(sc->serializes, 3, slz);
571 vtnet_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz,
572 boolean_t serialized)
574 struct vtnet_softc *sc = ifp->if_softc;
576 ifnet_serialize_array_assert(sc->serializes, 3, slz, serialized);
579 #endif /* INVARIANTS */
582 vtnet_alloc_intrs(struct vtnet_softc *sc)
585 int intrcount = virtio_intr_count(sc->vtnet_dev);
589 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
591 /* We can use a maximum of 3 interrupt vectors. */
592 intrcount = imin(intrcount, 3);
594 /* We can use a maximum of 2 interrupt vectors. */
595 intrcount = imin(intrcount, 2);
601 for (i = 0; i < intrcount; i++)
602 sc->vtnet_cpus[i] = -1;
605 error = virtio_intr_alloc(sc->vtnet_dev, &cnt, use_config,
608 virtio_intr_release(sc->vtnet_dev);
611 sc->vtnet_nintr = cnt;
617 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
620 struct vq_alloc_info vq_info[3];
627 * Indirect descriptors are not needed for the Rx
628 * virtqueue when mergeable buffers are negotiated.
629 * The header is placed inline with the data, not
630 * in a separate descriptor, and mbuf clusters are
631 * always physically contiguous.
633 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
634 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
635 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
637 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
639 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
640 virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
641 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
643 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
645 VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs, &sc->vtnet_rx_vq,
646 "%s receive", device_get_nameunit(dev));
648 VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs, &sc->vtnet_tx_vq,
649 "%s transmit", device_get_nameunit(dev));
651 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
654 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, &sc->vtnet_ctrl_vq,
655 "%s control", device_get_nameunit(dev));
658 return (virtio_alloc_virtqueues(dev, nvqs, vq_info));
662 vtnet_bind_intrs(struct vtnet_softc *sc)
667 for (i = 0; i < 3; i++)
668 sc->vtnet_intr_slz[i] = &sc->vtnet_slz;
670 /* Possible "Virtqueue <-> IRQ" configurations */
671 switch (sc->vtnet_nintr) {
673 sc->vtnet_irqmap[0] = (struct irqmap){0, vtnet_rx_vq_intr};
674 sc->vtnet_irqmap[1] = (struct irqmap){0, vtnet_tx_vq_intr};
677 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
678 sc->vtnet_irqmap[0] =
679 (struct irqmap){1, vtnet_rx_vq_intr};
680 sc->vtnet_irqmap[1] =
681 (struct irqmap){1, vtnet_tx_vq_intr};
683 sc->vtnet_irqmap[0] =
684 (struct irqmap){0, vtnet_rx_msix_intr};
685 sc->vtnet_irqmap[1] =
686 (struct irqmap){1, vtnet_tx_msix_intr};
687 sc->vtnet_intr_slz[0] = &sc->vtnet_rx_slz;
688 sc->vtnet_intr_slz[1] = &sc->vtnet_tx_slz;
692 sc->vtnet_irqmap[0] = (struct irqmap){1, vtnet_rx_msix_intr};
693 sc->vtnet_irqmap[1] = (struct irqmap){2, vtnet_tx_msix_intr};
694 sc->vtnet_intr_slz[1] = &sc->vtnet_rx_slz;
695 sc->vtnet_intr_slz[2] = &sc->vtnet_tx_slz;
698 device_printf(sc->vtnet_dev,
699 "Invalid interrupt vector count: %d\n", sc->vtnet_nintr);
704 for (i = 0; i < 2; i++) {
705 error = virtio_bind_intr(sc->vtnet_dev,
706 sc->vtnet_irqmap[i].irq, i, sc->vtnet_irqmap[i].handler,
709 device_printf(sc->vtnet_dev,
710 "cannot bind virtqueue IRQs\n");
714 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
715 error = virtio_bind_intr(sc->vtnet_dev, 0, -1,
716 vtnet_config_intr, sc);
718 device_printf(sc->vtnet_dev,
719 "cannot bind config_change IRQ\n");
729 vtnet_setup_interface(struct vtnet_softc *sc)
737 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
739 device_printf(dev, "cannot allocate ifnet structure\n");
744 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
745 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
746 ifp->if_init = vtnet_init;
747 ifp->if_start = vtnet_start;
749 ifp->if_npoll = vtnet_npoll;
751 ifp->if_serialize = vtnet_serialize;
752 ifp->if_deserialize = vtnet_deserialize;
753 ifp->if_tryserialize = vtnet_tryserialize;
755 ifp->if_serialize_assert = vtnet_serialize_assert;
757 ifp->if_ioctl = vtnet_ioctl;
759 sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq);
760 sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq);
761 if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
762 sc->vtnet_txhdrcount = sc->vtnet_tx_size;
764 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
765 sc->vtnet_txhdrarea = contigmalloc(
766 sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
767 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
768 if (sc->vtnet_txhdrarea == NULL) {
769 device_printf(dev, "cannot contigmalloc the tx headers\n");
772 for (i = 0; i < sc->vtnet_txhdrcount; i++)
773 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
774 sc->vtnet_macfilter = contigmalloc(
775 sizeof(struct vtnet_mac_filter),
776 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
777 if (sc->vtnet_macfilter == NULL) {
779 "cannot contigmalloc the mac filter table\n");
782 ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1);
783 ifq_set_ready(&ifp->if_snd);
785 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
787 /* The Tx IRQ is currently always the last allocated interrupt. */
788 ifq_set_cpuid(&ifp->if_snd, sc->vtnet_cpus[sc->vtnet_nintr - 1]);
789 ifsq_watchdog_init(&sc->vtnet_tx_watchdog,
790 ifq_get_subq_default(&ifp->if_snd), vtnet_watchdog);
791 ifq_set_hw_serialize(&ifp->if_snd, &sc->vtnet_tx_slz);
793 /* Tell the upper layer(s) we support long frames. */
794 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
795 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
797 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
798 ifp->if_capabilities |= IFCAP_TXCSUM;
800 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
801 ifp->if_capabilities |= IFCAP_TSO4;
802 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
803 ifp->if_capabilities |= IFCAP_TSO6;
804 if (ifp->if_capabilities & IFCAP_TSO)
805 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
807 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
808 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
811 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM))
812 ifp->if_capabilities |= IFCAP_RXCSUM;
814 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
815 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
816 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
817 ifp->if_capabilities |= IFCAP_LRO;
820 if ((ifp->if_capabilities & IFCAP_HWCSUM) == IFCAP_HWCSUM) {
822 * VirtIO does not support VLAN tagging, but we can fake
823 * it by inserting and removing the 802.1Q header during
824 * transmit and receive. We are then able to do checksum
825 * offloading of VLAN frames.
827 ifp->if_capabilities |=
828 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
831 ifp->if_capenable = ifp->if_capabilities;
834 * Capabilities after here are not enabled by default.
837 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
838 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
840 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
841 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
842 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
843 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
850 vtnet_set_hwaddr(struct vtnet_softc *sc)
856 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
857 (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
858 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
859 device_printf(dev, "unable to set MAC address\n");
860 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
861 virtio_write_device_config(dev,
862 offsetof(struct virtio_net_config, mac),
863 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
868 vtnet_get_hwaddr(struct vtnet_softc *sc)
874 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
876 * Generate a random locally administered unicast address.
878 * It would be nice to generate the same MAC address across
879 * reboots, but it seems all the hosts currently available
880 * support the MAC feature, so this isn't too important.
882 sc->vtnet_hwaddr[0] = 0xB2;
883 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
887 virtio_read_device_config(dev,
888 offsetof(struct virtio_net_config, mac),
889 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
893 vtnet_is_link_up(struct vtnet_softc *sc)
902 ASSERT_SERIALIZED(&sc->vtnet_slz);
904 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) {
905 status = virtio_read_dev_config_2(dev,
906 offsetof(struct virtio_net_config, status));
908 status = VIRTIO_NET_S_LINK_UP;
911 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
915 vtnet_update_link_status(struct vtnet_softc *sc)
919 struct ifaltq_subque *ifsq;
924 ifsq = ifq_get_subq_default(&ifp->if_snd);
926 link = vtnet_is_link_up(sc);
928 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
929 sc->vtnet_flags |= VTNET_FLAG_LINK;
931 device_printf(dev, "Link is up\n");
932 ifp->if_link_state = LINK_STATE_UP;
933 if_link_state_change(ifp);
934 if (!ifsq_is_empty(ifsq))
935 ifsq_devstart_sched(ifsq);
936 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
937 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
939 device_printf(dev, "Link is down\n");
941 ifp->if_link_state = LINK_STATE_DOWN;
942 if_link_state_change(ifp);
947 vtnet_watchdog(struct ifaltq_subque *ifsq)
950 struct vtnet_softc *sc;
952 ifp = ifsq_get_ifp(ifsq);
954 ASSERT_IFNET_SERIALIZED_ALL(ifp);
956 if (virtqueue_empty(sc->vtnet_tx_vq)) {
957 if_printf(ifp, "Spurious TX watchdog timeout -- ignoring\n");
958 sc->vtnet_tx_watchdog.wd_timer = 0;
962 if_printf(ifp, "TX watchdog timeout -- resetting\n");
964 virtqueue_dump(sc->vtnet_tx_vq);
967 ifp->if_flags &= ~IFF_RUNNING;
969 ifsq_devstart_sched(ifsq);
973 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
975 struct vtnet_softc *sc;
977 int reinit, mask, error;
980 ifr = (struct ifreq *) data;
986 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
988 else if (ifp->if_mtu != ifr->ifr_mtu)
989 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
993 if ((ifp->if_flags & IFF_UP) == 0) {
994 if (ifp->if_flags & IFF_RUNNING)
996 } else if (ifp->if_flags & IFF_RUNNING) {
997 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
998 (IFF_PROMISC | IFF_ALLMULTI)) {
999 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1000 vtnet_rx_filter(sc);
1009 sc->vtnet_if_flags = ifp->if_flags;
1014 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
1015 (ifp->if_flags & IFF_RUNNING))
1016 vtnet_rx_filter_mac(sc);
1021 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1025 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1028 if (mask & IFCAP_TXCSUM) {
1029 ifp->if_capenable ^= IFCAP_TXCSUM;
1030 if (ifp->if_capenable & IFCAP_TXCSUM)
1031 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
1033 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
1036 if (mask & IFCAP_TSO4) {
1037 ifp->if_capenable ^= IFCAP_TSO4;
1038 if (ifp->if_capenable & IFCAP_TSO4)
1039 ifp->if_hwassist |= CSUM_TSO;
1041 ifp->if_hwassist &= ~CSUM_TSO;
1044 if (mask & IFCAP_RXCSUM) {
1045 ifp->if_capenable ^= IFCAP_RXCSUM;
1049 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
1050 if (mask & IFCAP_LRO) {
1051 ifp->if_capenable ^= IFCAP_LRO;
1056 if (mask & IFCAP_VLAN_HWFILTER) {
1057 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1061 if (mask & IFCAP_VLAN_HWTSO)
1062 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1064 if (mask & IFCAP_VLAN_HWTAGGING)
1065 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1067 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
1068 ifp->if_flags &= ~IFF_RUNNING;
1071 //VLAN_CAPABILITIES(ifp);
1076 error = ether_ioctl(ifp, cmd, data);
1084 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1087 int new_frame_size, clsize;
1089 ifp = sc->vtnet_ifp;
1091 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1092 new_frame_size = sizeof(struct vtnet_rx_header) +
1093 sizeof(struct ether_vlan_header) + new_mtu;
1095 if (new_frame_size > MJUM9BYTES)
1098 if (new_frame_size <= MCLBYTES)
1101 clsize = MJUM9BYTES;
1103 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
1104 sizeof(struct ether_vlan_header) + new_mtu;
1106 if (new_frame_size <= MCLBYTES)
1109 clsize = MJUMPAGESIZE;
1112 sc->vtnet_rx_mbuf_size = clsize;
1113 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
1114 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
1115 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
1117 ifp->if_mtu = new_mtu;
1119 if (ifp->if_flags & IFF_RUNNING) {
1120 ifp->if_flags &= ~IFF_RUNNING;
1128 vtnet_init_rx_vq(struct vtnet_softc *sc)
1130 struct virtqueue *vq;
1133 vq = sc->vtnet_rx_vq;
1137 while (!virtqueue_full(vq)) {
1138 if ((error = vtnet_newbuf(sc)) != 0)
1144 virtqueue_notify(vq, NULL);
1147 * EMSGSIZE signifies the virtqueue did not have enough
1148 * entries available to hold the last mbuf. This is not
1149 * an error. We should not get ENOSPC since we check if
1150 * the virtqueue is full before attempting to add a
1153 if (error == EMSGSIZE)
1161 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1163 struct virtqueue *vq;
1167 vq = sc->vtnet_rx_vq;
1170 while ((m = virtqueue_drain(vq, &last)) != NULL)
1173 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1177 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1179 struct virtqueue *vq;
1180 struct vtnet_tx_header *txhdr;
1183 vq = sc->vtnet_tx_vq;
1186 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1187 m_freem(txhdr->vth_mbuf);
1188 vtnet_enqueue_txhdr(sc, txhdr);
1191 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1195 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1198 * The control virtqueue is only polled, therefore
1199 * it should already be empty.
1201 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1202 ("Ctrl Vq not empty"));
1205 static struct mbuf *
1206 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1208 struct mbuf *m_head, *m_tail, *m;
1211 clsize = sc->vtnet_rx_mbuf_size;
1213 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/
1214 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1215 m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1219 m_head->m_len = clsize;
1223 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1224 ("chained Rx mbuf requested without LRO_NOMRG"));
1226 for (i = 0; i < nbufs - 1; i++) {
1227 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1228 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1238 if (m_tailp != NULL)
1244 sc->vtnet_stats.mbuf_alloc_failed++;
1251 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1253 struct mbuf *m, *m_prev;
1254 struct mbuf *m_new, *m_tail;
1255 int len, clsize, nreplace, error;
1262 clsize = sc->vtnet_rx_mbuf_size;
1265 if (m->m_next != NULL)
1266 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1267 ("chained Rx mbuf without LRO_NOMRG"));
1270 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1271 * allocating an entire chain for each received frame. When
1272 * the received frame's length is less than that of the chain,
1273 * the unused mbufs are reassigned to the new chain.
1277 * Something is seriously wrong if we received
1278 * a frame larger than the mbuf chain. Drop it.
1281 sc->vtnet_stats.rx_frame_too_large++;
1285 KASSERT(m->m_len == clsize,
1286 ("mbuf length not expected cluster size: %d",
1289 m->m_len = MIN(m->m_len, len);
1297 KASSERT(m_prev != NULL, ("m_prev == NULL"));
1298 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1299 ("too many replacement mbufs: %d/%d", nreplace,
1300 sc->vtnet_rx_mbuf_count));
1302 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1303 if (m_new == NULL) {
1304 m_prev->m_len = clsize;
1309 * Move unused mbufs, if any, from the original chain
1310 * onto the end of the new chain.
1312 if (m_prev->m_next != NULL) {
1313 m_tail->m_next = m_prev->m_next;
1314 m_prev->m_next = NULL;
1317 error = vtnet_enqueue_rxbuf(sc, m_new);
1320 * BAD! We could not enqueue the replacement mbuf chain. We
1321 * must restore the m0 chain to the original state if it was
1322 * modified so we can subsequently discard it.
1324 * NOTE: The replacement is suppose to be an identical copy
1325 * to the one just dequeued so this is an unexpected error.
1327 sc->vtnet_stats.rx_enq_replacement_failed++;
1329 if (m_tail->m_next != NULL) {
1330 m_prev->m_next = m_tail->m_next;
1331 m_tail->m_next = NULL;
1334 m_prev->m_len = clsize;
1342 vtnet_newbuf(struct vtnet_softc *sc)
1347 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1351 error = vtnet_enqueue_rxbuf(sc, m);
1359 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1361 struct virtqueue *vq;
1364 vq = sc->vtnet_rx_vq;
1366 while (--nbufs > 0) {
1367 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1369 vtnet_discard_rxbuf(sc, m);
1374 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1379 * Requeue the discarded mbuf. This should always be
1380 * successful since it was just dequeued.
1382 error = vtnet_enqueue_rxbuf(sc, m);
1383 KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1387 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1390 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1391 struct vtnet_rx_header *rxhdr;
1392 struct virtio_net_hdr *hdr;
1396 ASSERT_SERIALIZED(&sc->vtnet_rx_slz);
1397 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1398 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1400 sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1402 mdata = mtod(m, uint8_t *);
1405 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1406 rxhdr = (struct vtnet_rx_header *) mdata;
1407 hdr = &rxhdr->vrh_hdr;
1408 offset += sizeof(struct vtnet_rx_header);
1410 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1411 KASSERT(error == 0, ("cannot add header to sglist"));
1414 error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1418 if (m->m_next != NULL) {
1419 error = sglist_append_mbuf(&sg, m->m_next);
1424 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1427 #ifdef IFPOLL_ENABLE
1430 vtnet_npoll_status(struct ifnet *ifp)
1432 struct vtnet_softc *sc = ifp->if_softc;
1434 ASSERT_SERIALIZED(&sc->vtnet_slz);
1436 vtnet_update_link_status(sc);
1440 vtnet_npoll_rx(struct ifnet *ifp, void *arg __unused, int cycle)
1442 struct vtnet_softc *sc = ifp->if_softc;
1444 vtnet_rxeof(sc, cycle, NULL);
1448 vtnet_npoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused)
1450 struct vtnet_softc *sc = ifp->if_softc;
1452 ASSERT_SERIALIZED(&sc->vtnet_tx_slz);
1455 if (!ifq_is_empty(&ifp->if_snd))
1460 vtnet_npoll(struct ifnet *ifp, struct ifpoll_info *info)
1462 struct vtnet_softc *sc = ifp->if_softc;
1465 ASSERT_IFNET_SERIALIZED_ALL(ifp);
1470 info->ifpi_status.status_func = vtnet_npoll_status;
1471 info->ifpi_status.serializer = &sc->vtnet_slz;
1473 /* Use the same cpu for rx and tx. */
1474 cpu = device_get_unit(device_get_parent(sc->vtnet_dev));
1475 /* Shuffle a bit. */
1476 cpu = (cpu * 61) % netisr_ncpus;
1477 KKASSERT(cpu < netisr_ncpus);
1478 info->ifpi_tx[cpu].poll_func = vtnet_npoll_tx;
1479 info->ifpi_tx[cpu].arg = NULL;
1480 info->ifpi_tx[cpu].serializer = &sc->vtnet_tx_slz;
1481 ifq_set_cpuid(&ifp->if_snd, cpu);
1483 info->ifpi_rx[cpu].poll_func = vtnet_npoll_rx;
1484 info->ifpi_rx[cpu].arg = NULL;
1485 info->ifpi_rx[cpu].serializer = &sc->vtnet_rx_slz;
1487 for (i = 0; i < 3; i++)
1488 lwkt_serialize_handler_disable(sc->serializes[i]);
1489 vtnet_disable_rx_intr(sc);
1490 vtnet_disable_tx_intr(sc);
1491 for (i = 0; i < sc->vtnet_nintr; i++)
1492 virtio_teardown_intr(sc->vtnet_dev, i);
1493 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS))
1494 virtio_unbind_intr(sc->vtnet_dev, -1);
1495 for (i = 0; i < 2; i++)
1496 virtio_unbind_intr(sc->vtnet_dev, i);
1500 ifq_set_cpuid(&ifp->if_snd,
1501 sc->vtnet_cpus[sc->vtnet_nintr - 1]);
1502 for (i = 0; i < 3; i++)
1503 lwkt_serialize_handler_enable(sc->serializes[i]);
1504 for (i = 0; i < 2; i++) {
1505 error = virtio_bind_intr(sc->vtnet_dev,
1506 sc->vtnet_irqmap[i].irq, i,
1507 sc->vtnet_irqmap[i].handler, sc);
1509 device_printf(sc->vtnet_dev,
1510 "cannot re-bind virtqueue IRQs\n");
1513 if (virtio_with_feature(sc->vtnet_dev, VIRTIO_NET_F_STATUS)) {
1514 error = virtio_bind_intr(sc->vtnet_dev, 0, -1,
1515 vtnet_config_intr, sc);
1517 device_printf(sc->vtnet_dev,
1518 "cannot re-bind config_change IRQ\n");
1521 for (i = 0; i < sc->vtnet_nintr; i++) {
1522 error = virtio_setup_intr(sc->vtnet_dev, i,
1523 sc->vtnet_intr_slz[i]);
1525 device_printf(sc->vtnet_dev,
1526 "cannot setup virtqueue interrupts\n");
1529 vtnet_enable_rx_intr(sc);
1530 vtnet_enable_tx_intr(sc);
1534 #endif /* IFPOLL_ENABLE */
1537 vtnet_vlan_tag_remove(struct mbuf *m)
1539 struct ether_vlan_header *evl;
1541 evl = mtod(m, struct ether_vlan_header *);
1543 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1544 m->m_flags |= M_VLANTAG;
1546 /* Strip the 802.1Q header. */
1547 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1548 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1549 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1553 * Alternative method of doing receive checksum offloading. Rather
1554 * than parsing the received frame down to the IP header, use the
1555 * csum_offset to determine which CSUM_* flags are appropriate. We
1556 * can get by with doing this only because the checksum offsets are
1557 * unique for the things we care about.
1560 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1561 struct virtio_net_hdr *hdr)
1563 struct ether_header *eh;
1564 struct ether_vlan_header *evh;
1569 csum_len = hdr->csum_start + hdr->csum_offset;
1571 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1573 if (m->m_len < csum_len)
1576 eh = mtod(m, struct ether_header *);
1577 eth_type = ntohs(eh->ether_type);
1578 if (eth_type == ETHERTYPE_VLAN) {
1579 evh = mtod(m, struct ether_vlan_header *);
1580 eth_type = ntohs(evh->evl_proto);
1583 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1584 sc->vtnet_stats.rx_csum_bad_ethtype++;
1588 /* Use the offset to determine the appropriate CSUM_* flags. */
1589 switch (hdr->csum_offset) {
1590 case offsetof(struct udphdr, uh_sum):
1591 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1593 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1594 if (udp->uh_sum == 0)
1599 case offsetof(struct tcphdr, th_sum):
1600 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1601 m->m_pkthdr.csum_data = 0xFFFF;
1605 sc->vtnet_stats.rx_csum_bad_offset++;
1609 sc->vtnet_stats.rx_csum_offloaded++;
1615 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1618 struct virtqueue *vq;
1619 struct mbuf *m, *m_tail;
1622 ifp = sc->vtnet_ifp;
1623 vq = sc->vtnet_rx_vq;
1626 while (--nbufs > 0) {
1627 m = virtqueue_dequeue(vq, &len);
1633 if (vtnet_newbuf(sc) != 0) {
1635 vtnet_discard_rxbuf(sc, m);
1637 vtnet_discard_merged_rxbuf(sc, nbufs);
1645 m->m_flags &= ~M_PKTHDR;
1647 m_head->m_pkthdr.len += len;
1655 sc->vtnet_stats.rx_mergeable_failed++;
1662 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1664 struct virtio_net_hdr lhdr;
1666 struct virtqueue *vq;
1668 struct ether_header *eh;
1669 struct virtio_net_hdr *hdr;
1670 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1671 int len, deq, nbufs, adjsz, rx_npkts;
1673 ifp = sc->vtnet_ifp;
1674 vq = sc->vtnet_rx_vq;
1679 while (--count >= 0) {
1680 m = virtqueue_dequeue(vq, &len);
1685 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1687 vtnet_discard_rxbuf(sc, m);
1691 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1693 adjsz = sizeof(struct vtnet_rx_header);
1695 * Account for our pad between the header and
1696 * the actual start of the frame.
1698 len += VTNET_RX_HEADER_PAD;
1700 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1701 nbufs = mhdr->num_buffers;
1702 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1705 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1707 vtnet_discard_rxbuf(sc, m);
1709 vtnet_discard_merged_rxbuf(sc, nbufs);
1713 m->m_pkthdr.len = len;
1714 m->m_pkthdr.rcvif = ifp;
1715 m->m_pkthdr.csum_flags = 0;
1718 if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1725 * Save copy of header before we strip it. For both mergeable
1726 * and non-mergeable, the VirtIO header is placed first in the
1727 * mbuf's data. We no longer need num_buffers, so always use a
1730 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1733 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1734 eh = mtod(m, struct ether_header *);
1735 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1736 vtnet_vlan_tag_remove(m);
1739 * With the 802.1Q header removed, update the
1740 * checksum starting location accordingly.
1742 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1744 ETHER_VLAN_ENCAP_LEN;
1748 if (ifp->if_capenable & IFCAP_RXCSUM &&
1749 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1750 if (vtnet_rx_csum(sc, m, hdr) != 0)
1751 sc->vtnet_stats.rx_csum_failed++;
1755 ifp->if_input(ifp, m, NULL, mycpuid);
1758 * The interface may have been stopped while we were
1759 * passing the packet up the network stack.
1761 if ((ifp->if_flags & IFF_RUNNING) == 0)
1766 virtqueue_notify(vq, NULL);
1768 if (rx_npktsp != NULL)
1769 *rx_npktsp = rx_npkts;
1771 return (count > 0 ? 0 : EAGAIN);
1775 vtnet_rx_msix_intr(void *xsc)
1777 struct vtnet_softc *sc;
1782 ifp = sc->vtnet_ifp;
1784 if (!virtqueue_pending(sc->vtnet_rx_vq))
1787 vtnet_disable_rx_intr(sc);
1789 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1790 vtnet_enable_rx_intr(sc);
1794 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1795 if (!more && vtnet_enable_rx_intr(sc) != 0) {
1796 vtnet_disable_rx_intr(sc);
1801 sc->vtnet_stats.rx_task_rescheduled++;
1807 vtnet_rx_vq_intr(void *xsc)
1809 struct vtnet_softc *sc = xsc;
1811 lwkt_serialize_enter(&sc->vtnet_rx_slz);
1812 vtnet_rx_msix_intr(xsc);
1813 lwkt_serialize_exit(&sc->vtnet_rx_slz);
1817 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1819 bzero(txhdr, sizeof(*txhdr));
1820 SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1824 vtnet_txeof(struct vtnet_softc *sc)
1826 struct virtqueue *vq;
1828 struct vtnet_tx_header *txhdr;
1831 vq = sc->vtnet_tx_vq;
1832 ifp = sc->vtnet_ifp;
1835 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1838 m_freem(txhdr->vth_mbuf);
1839 vtnet_enqueue_txhdr(sc, txhdr);
1843 ifq_clr_oactive(&ifp->if_snd);
1844 if (virtqueue_empty(vq))
1845 sc->vtnet_tx_watchdog.wd_timer = 0;
1847 sc->vtnet_tx_watchdog.wd_timer = VTNET_WATCHDOG_TIMEOUT;
1851 static struct mbuf *
1852 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1853 struct virtio_net_hdr *hdr)
1856 struct ether_header *eh;
1857 struct ether_vlan_header *evh;
1859 struct ip6_hdr *ip6;
1862 uint16_t eth_type, csum_start;
1863 uint8_t ip_proto, gso_type;
1865 ifp = sc->vtnet_ifp;
1868 ip_offset = sizeof(struct ether_header);
1869 if (m->m_len < ip_offset) {
1870 if ((m = m_pullup(m, ip_offset)) == NULL)
1874 eh = mtod(m, struct ether_header *);
1875 eth_type = ntohs(eh->ether_type);
1876 if (eth_type == ETHERTYPE_VLAN) {
1877 ip_offset = sizeof(struct ether_vlan_header);
1878 if (m->m_len < ip_offset) {
1879 if ((m = m_pullup(m, ip_offset)) == NULL)
1882 evh = mtod(m, struct ether_vlan_header *);
1883 eth_type = ntohs(evh->evl_proto);
1888 if (m->m_len < ip_offset + sizeof(struct ip)) {
1889 m = m_pullup(m, ip_offset + sizeof(struct ip));
1894 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1895 ip_proto = ip->ip_p;
1896 csum_start = ip_offset + (ip->ip_hl << 2);
1897 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1900 case ETHERTYPE_IPV6:
1901 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1902 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1907 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1909 * XXX Assume no extension headers are present. Presently,
1910 * this will always be true in the case of TSO, and FreeBSD
1911 * does not perform checksum offloading of IPv6 yet.
1913 ip_proto = ip6->ip6_nxt;
1914 csum_start = ip_offset + sizeof(struct ip6_hdr);
1915 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1922 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1923 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1924 hdr->csum_start = csum_start;
1925 hdr->csum_offset = m->m_pkthdr.csum_data;
1927 sc->vtnet_stats.tx_csum_offloaded++;
1930 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1931 if (ip_proto != IPPROTO_TCP)
1934 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1935 m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1940 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1941 hdr->gso_type = gso_type;
1942 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1943 hdr->gso_size = m->m_pkthdr.tso_segsz;
1945 if (tcp->th_flags & TH_CWR) {
1947 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1948 * ECN support is only configurable globally with the
1949 * net.inet.tcp.ecn.enable sysctl knob.
1951 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1952 if_printf(ifp, "TSO with ECN not supported "
1958 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1961 sc->vtnet_stats.tx_tso_offloaded++;
1968 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1969 struct vtnet_tx_header *txhdr)
1972 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1973 struct virtqueue *vq;
1977 vq = sc->vtnet_tx_vq;
1980 sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1981 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1982 KASSERT(error == 0 && sg.sg_nseg == 1,
1983 ("%s: error %d adding header to sglist", __func__, error));
1985 error = sglist_append_mbuf(&sg, m);
1987 m = m_defrag(m, M_NOWAIT);
1992 sc->vtnet_stats.tx_defragged++;
1994 error = sglist_append_mbuf(&sg, m);
1999 txhdr->vth_mbuf = m;
2000 error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
2005 sc->vtnet_stats.tx_defrag_failed++;
2012 static struct mbuf *
2013 vtnet_vlan_tag_insert(struct mbuf *m)
2016 struct ether_vlan_header *evl;
2018 if (M_WRITABLE(m) == 0) {
2019 n = m_dup(m, M_NOWAIT);
2021 if ((m = n) == NULL)
2025 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
2028 if (m->m_len < sizeof(struct ether_vlan_header)) {
2029 m = m_pullup(m, sizeof(struct ether_vlan_header));
2034 /* Insert 802.1Q header into the existing Ethernet header. */
2035 evl = mtod(m, struct ether_vlan_header *);
2036 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
2037 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
2038 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
2039 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
2040 m->m_flags &= ~M_VLANTAG;
2046 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
2048 struct vtnet_tx_header *txhdr;
2049 struct virtio_net_hdr *hdr;
2053 txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
2056 SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
2059 * Always use the non-mergeable header to simplify things. When
2060 * the mergeable feature is negotiated, the num_buffers field
2061 * must be set to zero. We use vtnet_hdr_size later to enqueue
2062 * the correct header size to the host.
2064 hdr = &txhdr->vth_uhdr.hdr;
2069 if (m->m_flags & M_VLANTAG) {
2070 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2071 m = vtnet_vlan_tag_insert(m);
2072 if ((*m_head = m) == NULL)
2074 m->m_flags &= ~M_VLANTAG;
2077 if (m->m_pkthdr.csum_flags != 0) {
2078 m = vtnet_tx_offload(sc, m, hdr);
2079 if ((*m_head = m) == NULL)
2083 error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
2086 vtnet_enqueue_txhdr(sc, txhdr);
2091 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2093 struct vtnet_softc *sc;
2094 struct virtqueue *vq;
2099 vq = sc->vtnet_tx_vq;
2102 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2103 ASSERT_SERIALIZED(&sc->vtnet_tx_slz);
2105 if ((ifp->if_flags & (IFF_RUNNING)) !=
2106 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
2109 #ifdef VTNET_TX_INTR_MODERATION
2110 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
2114 while (!ifsq_is_empty(ifsq)) {
2115 if (virtqueue_full(vq)) {
2116 ifsq_set_oactive(ifsq);
2120 m0 = ifsq_dequeue(ifsq);
2124 if (vtnet_encap(sc, &m0) != 0) {
2127 ifsq_prepend(ifsq, m0);
2128 ifsq_set_oactive(ifsq);
2133 ETHER_BPF_MTAP(ifp, m0);
2137 virtqueue_notify(vq, NULL);
2138 sc->vtnet_tx_watchdog.wd_timer = VTNET_WATCHDOG_TIMEOUT;
2143 vtnet_tx_msix_intr(void *xsc)
2145 struct vtnet_softc *sc;
2147 struct ifaltq_subque *ifsq;
2150 ifp = sc->vtnet_ifp;
2151 ifsq = ifq_get_subq_default(&ifp->if_snd);
2153 if (!virtqueue_pending(sc->vtnet_tx_vq))
2156 vtnet_disable_tx_intr(sc);
2158 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2159 vtnet_enable_tx_intr(sc);
2165 if (!ifsq_is_empty(ifsq))
2166 ifsq_devstart(ifsq);
2168 if (vtnet_enable_tx_intr(sc) != 0) {
2169 vtnet_disable_tx_intr(sc);
2170 sc->vtnet_stats.tx_task_rescheduled++;
2176 vtnet_tx_vq_intr(void *xsc)
2178 struct vtnet_softc *sc = xsc;
2180 lwkt_serialize_enter(&sc->vtnet_tx_slz);
2181 vtnet_tx_msix_intr(xsc);
2182 lwkt_serialize_exit(&sc->vtnet_tx_slz);
2186 vtnet_config_intr(void *arg)
2188 struct vtnet_softc *sc;
2192 vtnet_update_link_status(sc);
2196 vtnet_stop(struct vtnet_softc *sc)
2201 dev = sc->vtnet_dev;
2202 ifp = sc->vtnet_ifp;
2204 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2206 ifq_clr_oactive(&ifp->if_snd);
2207 ifsq_watchdog_stop(&sc->vtnet_tx_watchdog);
2208 ifp->if_flags &= ~(IFF_RUNNING);
2210 vtnet_disable_rx_intr(sc);
2211 vtnet_disable_tx_intr(sc);
2214 * Stop the host VirtIO adapter. Note this will reset the host
2215 * adapter's state back to the pre-initialized state, so in
2216 * order to make the device usable again, we must drive it
2217 * through virtio_reinit() and virtio_reinit_complete().
2221 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2223 vtnet_free_rx_mbufs(sc);
2224 vtnet_free_tx_mbufs(sc);
2228 vtnet_virtio_reinit(struct vtnet_softc *sc)
2235 dev = sc->vtnet_dev;
2236 ifp = sc->vtnet_ifp;
2237 features = sc->vtnet_features;
2240 * Re-negotiate with the host, removing any disabled receive
2241 * features. Transmit features are disabled only on our side
2242 * via if_capenable and if_hwassist.
2245 if (ifp->if_capabilities & IFCAP_RXCSUM) {
2246 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2247 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2250 #if 0 /* IFCAP_LRO doesn't exist in DragonFly. */
2251 if (ifp->if_capabilities & IFCAP_LRO) {
2252 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2253 features &= ~VTNET_LRO_FEATURES;
2257 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2258 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2259 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2262 error = virtio_reinit(dev, features);
2264 device_printf(dev, "virtio reinit error %d\n", error);
2270 vtnet_init(void *xsc)
2272 struct vtnet_softc *sc;
2278 dev = sc->vtnet_dev;
2279 ifp = sc->vtnet_ifp;
2281 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2283 if (ifp->if_flags & IFF_RUNNING)
2286 /* Stop host's adapter, cancel any pending I/O. */
2289 /* Reinitialize the host device. */
2290 error = vtnet_virtio_reinit(sc);
2293 "reinitialization failed, stopping device...\n");
2298 /* Update host with assigned MAC address. */
2299 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2300 vtnet_set_hwaddr(sc);
2302 ifp->if_hwassist = 0;
2303 if (ifp->if_capenable & IFCAP_TXCSUM)
2304 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2305 if (ifp->if_capenable & IFCAP_TSO4)
2306 ifp->if_hwassist |= CSUM_TSO;
2308 error = vtnet_init_rx_vq(sc);
2311 "cannot allocate mbufs for Rx virtqueue\n");
2316 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2317 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2318 /* Restore promiscuous and all-multicast modes. */
2319 vtnet_rx_filter(sc);
2321 /* Restore filtered MAC addresses. */
2322 vtnet_rx_filter_mac(sc);
2325 /* Restore VLAN filters. */
2326 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2327 vtnet_rx_filter_vlan(sc);
2330 #ifdef IFPOLL_ENABLE
2331 if (!(ifp->if_flags & IFF_NPOLLING))
2334 vtnet_enable_rx_intr(sc);
2335 vtnet_enable_tx_intr(sc);
2338 ifp->if_flags |= IFF_RUNNING;
2339 ifq_clr_oactive(&ifp->if_snd);
2340 ifsq_watchdog_start(&sc->vtnet_tx_watchdog);
2342 virtio_reinit_complete(dev);
2344 vtnet_update_link_status(sc);
2348 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2349 struct sglist *sg, int readable, int writable)
2351 struct virtqueue *vq;
2354 vq = sc->vtnet_ctrl_vq;
2356 ASSERT_IFNET_SERIALIZED_ALL(sc->vtnet_ifp);
2357 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2358 ("no control virtqueue"));
2359 KASSERT(virtqueue_empty(vq),
2360 ("control command already enqueued"));
2362 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2366 * XXX We can safely drop the serializer between here, and the end of
2367 * the function, when we can correctly sleep for this command to
2370 virtqueue_notify(vq, NULL);
2373 * Poll until the command is complete. Previously, we would
2374 * sleep until the control virtqueue interrupt handler woke
2375 * us up, but dropping the VTNET_MTX leads to serialization
2378 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2379 * vectors. Two of those vectors are needed for the Rx and Tx
2380 * virtqueues. We do not support sharing both a Vq and config
2381 * changed notification on the same MSIX vector.
2383 c = virtqueue_poll(vq, NULL);
2384 KASSERT(c == cookie, ("unexpected control command response"));
2388 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2391 struct virtio_net_ctrl_hdr hdr __aligned(2);
2393 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2397 struct sglist_seg segs[3];
2401 s.hdr.class = VIRTIO_NET_CTRL_MAC;
2402 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2403 s.ack = VIRTIO_NET_ERR;
2405 /* Copy the mac address into physically contiguous memory */
2406 memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2408 sglist_init(&sg, 3, segs);
2410 error |= sglist_append(&sg, &s.hdr,
2411 sizeof(struct virtio_net_ctrl_hdr));
2412 error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2413 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2414 KASSERT(error == 0 && sg.sg_nseg == 3,
2415 ("%s: error %d adding set MAC msg to sglist", __func__, error));
2417 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2419 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2423 vtnet_rx_filter(struct vtnet_softc *sc)
2428 dev = sc->vtnet_dev;
2429 ifp = sc->vtnet_ifp;
2431 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2432 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2433 ("CTRL_RX feature not negotiated"));
2435 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2436 device_printf(dev, "cannot %s promiscuous mode\n",
2437 (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2439 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2440 device_printf(dev, "cannot %s all-multicast mode\n",
2441 (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2445 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2447 struct sglist_seg segs[3];
2450 struct virtio_net_ctrl_hdr hdr __aligned(2);
2458 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2459 ("%s: CTRL_RX feature not negotiated", __func__));
2461 s.hdr.class = VIRTIO_NET_CTRL_RX;
2464 s.ack = VIRTIO_NET_ERR;
2466 sglist_init(&sg, 3, segs);
2468 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2469 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2470 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2471 KASSERT(error == 0 && sg.sg_nseg == 3,
2472 ("%s: error %d adding Rx message to sglist", __func__, error));
2474 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2476 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2480 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2483 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2487 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2490 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2494 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2496 struct virtio_net_ctrl_hdr hdr __aligned(2);
2497 struct vtnet_mac_filter *filter;
2498 struct sglist_seg segs[4];
2502 struct ifaddr_container *ifac;
2503 struct ifmultiaddr *ifma;
2504 int ucnt, mcnt, promisc, allmulti, error;
2507 ifp = sc->vtnet_ifp;
2513 ASSERT_IFNET_SERIALIZED_ALL(ifp);
2514 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2515 ("%s: CTRL_RX feature not negotiated", __func__));
2517 /* Use the MAC filtering table allocated in vtnet_attach. */
2518 filter = sc->vtnet_macfilter;
2519 memset(filter, 0, sizeof(struct vtnet_mac_filter));
2521 /* Unicast MAC addresses: */
2522 //if_addr_rlock(ifp);
2523 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2525 if (ifa->ifa_addr->sa_family != AF_LINK)
2527 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2528 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2530 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2535 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2536 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2539 //if_addr_runlock(ifp);
2542 filter->vmf_unicast.nentries = 0;
2543 if_printf(ifp, "more than %d MAC addresses assigned, "
2544 "falling back to promiscuous mode\n",
2545 VTNET_MAX_MAC_ENTRIES);
2547 filter->vmf_unicast.nentries = ucnt;
2549 /* Multicast MAC addresses: */
2550 //if_maddr_rlock(ifp);
2551 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2552 if (ifma->ifma_addr->sa_family != AF_LINK)
2554 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2559 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2560 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2563 //if_maddr_runlock(ifp);
2565 if (allmulti != 0) {
2566 filter->vmf_multicast.nentries = 0;
2567 if_printf(ifp, "more than %d multicast MAC addresses "
2568 "assigned, falling back to all-multicast mode\n",
2569 VTNET_MAX_MAC_ENTRIES);
2571 filter->vmf_multicast.nentries = mcnt;
2573 if (promisc != 0 && allmulti != 0)
2576 hdr.class = VIRTIO_NET_CTRL_MAC;
2577 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2578 ack = VIRTIO_NET_ERR;
2580 sglist_init(&sg, 4, segs);
2582 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2583 error |= sglist_append(&sg, &filter->vmf_unicast,
2584 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2585 error |= sglist_append(&sg, &filter->vmf_multicast,
2586 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2587 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2588 KASSERT(error == 0 && sg.sg_nseg == 4,
2589 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2591 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2593 if (ack != VIRTIO_NET_OK)
2594 if_printf(ifp, "error setting host MAC filter table\n");
2597 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2598 if_printf(ifp, "cannot enable promiscuous mode\n");
2599 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2600 if_printf(ifp, "cannot enable all-multicast mode\n");
2604 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2606 struct sglist_seg segs[3];
2609 struct virtio_net_ctrl_hdr hdr __aligned(2);
2617 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2618 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2620 s.ack = VIRTIO_NET_ERR;
2622 sglist_init(&sg, 3, segs);
2624 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2625 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2626 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2627 KASSERT(error == 0 && sg.sg_nseg == 3,
2628 ("%s: error %d adding VLAN message to sglist", __func__, error));
2630 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2632 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2636 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2642 ASSERT_IFNET_SERIALIZED_ALL(sc->vtnet_ifp);
2643 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2644 ("%s: VLAN_FILTER feature not negotiated", __func__));
2646 nvlans = sc->vtnet_nvlans;
2648 /* Enable the filter for each configured VLAN. */
2649 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2650 w = sc->vtnet_vlan_shadow[i];
2651 while ((bit = ffs(w) - 1) != -1) {
2653 tag = sizeof(w) * CHAR_BIT * i + bit;
2656 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2657 device_printf(sc->vtnet_dev,
2658 "cannot enable VLAN %d filter\n", tag);
2663 KASSERT(nvlans == 0, ("VLAN count incorrect"));
2667 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2672 ifp = sc->vtnet_ifp;
2673 idx = (tag >> 5) & 0x7F;
2676 if (tag == 0 || tag > 4095)
2679 ifnet_serialize_all(ifp);
2681 /* Update shadow VLAN table. */
2684 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2687 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2690 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2691 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2692 device_printf(sc->vtnet_dev,
2693 "cannot %s VLAN %d %s the host filter table\n",
2694 add ? "add" : "remove", tag, add ? "to" : "from");
2697 ifnet_deserialize_all(ifp);
2701 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2704 if (ifp->if_softc != arg)
2707 vtnet_update_vlan_filter(arg, 1, tag);
2711 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2714 if (ifp->if_softc != arg)
2717 vtnet_update_vlan_filter(arg, 0, tag);
2721 vtnet_ifmedia_upd(struct ifnet *ifp)
2723 struct vtnet_softc *sc;
2724 struct ifmedia *ifm;
2727 ifm = &sc->vtnet_media;
2729 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2736 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2738 struct vtnet_softc *sc;
2742 ifmr->ifm_status = IFM_AVALID;
2743 ifmr->ifm_active = IFM_ETHER;
2745 if (vtnet_is_link_up(sc) != 0) {
2746 ifmr->ifm_status |= IFM_ACTIVE;
2747 ifmr->ifm_active |= VTNET_MEDIATYPE;
2749 ifmr->ifm_active |= IFM_NONE;
2753 vtnet_add_statistics(struct vtnet_softc *sc)
2756 struct vtnet_statistics *stats;
2757 struct sysctl_ctx_list *ctx;
2758 struct sysctl_oid *tree;
2759 struct sysctl_oid_list *child;
2761 dev = sc->vtnet_dev;
2762 stats = &sc->vtnet_stats;
2763 ctx = device_get_sysctl_ctx(dev);
2764 tree = device_get_sysctl_tree(dev);
2765 child = SYSCTL_CHILDREN(tree);
2767 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2768 CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2769 "Mbuf cluster allocation failures");
2771 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2772 CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2773 "Received frame larger than the mbuf chain");
2774 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2775 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2776 "Enqueuing the replacement receive mbuf failed");
2777 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2778 CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2779 "Mergeable buffers receive failures");
2780 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2781 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2782 "Received checksum offloaded buffer with unsupported "
2784 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2785 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2786 "Received checksum offloaded buffer with incorrect IP protocol");
2787 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2788 CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2789 "Received checksum offloaded buffer with incorrect offset");
2790 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2791 CTLFLAG_RD, &stats->rx_csum_failed, 0,
2792 "Received buffer checksum offload failed");
2793 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2794 CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2795 "Received buffer checksum offload succeeded");
2796 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2797 CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2798 "Times the receive interrupt task rescheduled itself");
2800 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2801 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2802 "Aborted transmit of checksum offloaded buffer with unknown "
2804 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2805 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2806 "Aborted transmit of TSO buffer with unknown Ethernet type");
2807 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2808 CTLFLAG_RD, &stats->tx_defragged, 0,
2809 "Transmit mbufs defragged");
2810 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2811 CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2812 "Aborted transmit of buffer because defrag failed");
2813 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2814 CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2815 "Offloaded checksum of transmitted buffer");
2816 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2817 CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2818 "Segmentation offload of transmitted buffer");
2819 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2820 CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2821 "Times the transmit interrupt task rescheduled itself");
2825 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2828 return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2832 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2835 virtqueue_disable_intr(sc->vtnet_rx_vq);
2839 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2842 #ifdef VTNET_TX_INTR_MODERATION
2845 return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2850 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2853 virtqueue_disable_intr(sc->vtnet_tx_vq);