2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
47 #include <net/ethernet.h>
49 #include <net/if_arp.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/if_media.h>
53 #include <net/vlan/if_vlan_var.h>
54 #include <net/vlan/if_vlan_ether.h>
55 #include <net/ifq_var.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/ip6.h>
63 #include <netinet/udp.h>
64 #include <netinet/tcp.h>
65 #include <netinet/sctp.h>
67 #include <dev/virtual/virtio/virtio/virtio.h>
68 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include "virtio_net.h"
71 #include "virtio_if.h"
73 struct vtnet_statistics {
74 unsigned long mbuf_alloc_failed;
76 unsigned long rx_frame_too_large;
77 unsigned long rx_enq_replacement_failed;
78 unsigned long rx_mergeable_failed;
79 unsigned long rx_csum_bad_ethtype;
80 unsigned long rx_csum_bad_start;
81 unsigned long rx_csum_bad_ipproto;
82 unsigned long rx_csum_bad_offset;
83 unsigned long rx_csum_failed;
84 unsigned long rx_csum_offloaded;
85 unsigned long rx_task_rescheduled;
87 unsigned long tx_csum_offloaded;
88 unsigned long tx_tso_offloaded;
89 unsigned long tx_csum_bad_ethtype;
90 unsigned long tx_tso_bad_ethtype;
91 unsigned long tx_task_rescheduled;
96 struct ifnet *vtnet_ifp;
97 struct lwkt_serialize vtnet_slz;
100 #define VTNET_FLAG_LINK 0x0001
101 #define VTNET_FLAG_SUSPENDED 0x0002
102 #define VTNET_FLAG_CTRL_VQ 0x0004
103 #define VTNET_FLAG_CTRL_RX 0x0008
104 #define VTNET_FLAG_VLAN_FILTER 0x0010
105 #define VTNET_FLAG_TSO_ECN 0x0020
106 #define VTNET_FLAG_MRG_RXBUFS 0x0040
107 #define VTNET_FLAG_LRO_NOMRG 0x0080
109 struct virtqueue *vtnet_rx_vq;
110 struct virtqueue *vtnet_tx_vq;
111 struct virtqueue *vtnet_ctrl_vq;
113 struct vtnet_tx_header *vtnet_txhdrarea;
114 uint32_t vtnet_txhdridx;
115 struct vtnet_mac_filter *vtnet_macfilter;
120 int vtnet_rx_process_limit;
121 int vtnet_rx_mbuf_size;
122 int vtnet_rx_mbuf_count;
124 int vtnet_watchdog_timer;
125 uint64_t vtnet_features;
127 struct task vtnet_cfgchg_task;
129 struct vtnet_statistics vtnet_stats;
131 struct callout vtnet_tick_ch;
133 eventhandler_tag vtnet_vlan_attach;
134 eventhandler_tag vtnet_vlan_detach;
136 struct ifmedia vtnet_media;
138 * Fake media type; the host does not provide us with
139 * any real media information.
141 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX)
142 char vtnet_hwaddr[ETHER_ADDR_LEN];
145 * During reset, the host's VLAN filtering table is lost. The
146 * array below is used to restore all the VLANs configured on
147 * this interface after a reset.
149 #define VTNET_VLAN_SHADOW_SIZE (4096 / 32)
151 uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
153 char vtnet_mtx_name[16];
157 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
158 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
159 * both keep the VirtIO header and the data non-contiguous and to keep the
160 * frame's payload 4 byte aligned.
162 * When mergeable buffers are negotiated, the host puts the VirtIO header in
163 * the beginning of the first mbuf's data.
165 #define VTNET_RX_HEADER_PAD 4
166 struct vtnet_rx_header {
167 struct virtio_net_hdr vrh_hdr;
168 char vrh_pad[VTNET_RX_HEADER_PAD];
172 * For each outgoing frame, the vtnet_tx_header below is allocated from
173 * the vtnet_tx_header_zone.
175 struct vtnet_tx_header {
177 struct virtio_net_hdr hdr;
178 struct virtio_net_hdr_mrg_rxbuf mhdr;
181 struct mbuf *vth_mbuf;
184 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
187 * The VirtIO specification does not place a limit on the number of MAC
188 * addresses the guest driver may request to be filtered. In practice,
189 * the host is constrained by available resources. To simplify this driver,
190 * impose a reasonably high limit of MAC addresses we will filter before
191 * falling back to promiscuous or all-multicast modes.
193 #define VTNET_MAX_MAC_ENTRIES 128
195 struct vtnet_mac_table {
197 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
200 struct vtnet_mac_filter {
201 struct vtnet_mac_table vmf_unicast;
202 uint32_t vmf_pad; /* Make tables non-contiguous. */
203 struct vtnet_mac_table vmf_multicast;
206 #define VTNET_WATCHDOG_TIMEOUT 5
207 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)// | CSUM_SCTP)
209 /* Features desired/implemented by this driver. */
210 #define VTNET_FEATURES \
211 (VIRTIO_NET_F_MAC | \
212 VIRTIO_NET_F_STATUS | \
213 VIRTIO_NET_F_CTRL_VQ | \
214 VIRTIO_NET_F_CTRL_RX | \
215 VIRTIO_NET_F_CTRL_VLAN | \
216 VIRTIO_NET_F_CSUM | \
217 VIRTIO_NET_F_HOST_TSO4 | \
218 VIRTIO_NET_F_HOST_TSO6 | \
219 VIRTIO_NET_F_HOST_ECN | \
220 VIRTIO_NET_F_GUEST_CSUM | \
221 VIRTIO_NET_F_GUEST_TSO4 | \
222 VIRTIO_NET_F_GUEST_TSO6 | \
223 VIRTIO_NET_F_GUEST_ECN | \
224 VIRTIO_NET_F_MRG_RXBUF)
227 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
228 * frames larger than 1514 bytes. We do not yet support software LRO
231 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
232 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
234 #define VTNET_MAX_MTU 65536
235 #define VTNET_MAX_RX_SIZE 65550
238 * Used to preallocate the Vq indirect descriptors. The first segment
239 * is reserved for the header.
241 #define VTNET_MIN_RX_SEGS 2
242 #define VTNET_MAX_RX_SEGS 34
243 #define VTNET_MAX_TX_SEGS 34
245 #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */
246 #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */
247 #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */
248 #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */
249 #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */
253 * Assert we can receive and transmit the maximum with regular
256 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
257 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
260 * Determine how many mbufs are in each receive buffer. For LRO without
261 * mergeable descriptors, we must allocate an mbuf chain large enough to
262 * hold both the vtnet_rx_header and the maximum receivable data.
264 #define VTNET_NEEDED_RX_MBUFS(_sc) \
265 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
266 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
267 (_sc)->vtnet_rx_mbuf_size)
269 static int vtnet_modevent(module_t, int, void *);
271 static int vtnet_probe(device_t);
272 static int vtnet_attach(device_t);
273 static int vtnet_detach(device_t);
274 static int vtnet_suspend(device_t);
275 static int vtnet_resume(device_t);
276 static int vtnet_shutdown(device_t);
277 static int vtnet_config_change(device_t);
279 static void vtnet_negotiate_features(struct vtnet_softc *);
280 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
281 static void vtnet_get_hwaddr(struct vtnet_softc *);
282 static void vtnet_set_hwaddr(struct vtnet_softc *);
283 static int vtnet_is_link_up(struct vtnet_softc *);
284 static void vtnet_update_link_status(struct vtnet_softc *);
286 static void vtnet_watchdog(struct vtnet_softc *);
288 static void vtnet_config_change_task(void *, int);
289 static int vtnet_change_mtu(struct vtnet_softc *, int);
290 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
292 static int vtnet_init_rx_vq(struct vtnet_softc *);
293 static void vtnet_free_rx_mbufs(struct vtnet_softc *);
294 static void vtnet_free_tx_mbufs(struct vtnet_softc *);
295 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
297 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
299 static int vtnet_replace_rxbuf(struct vtnet_softc *,
301 static int vtnet_newbuf(struct vtnet_softc *);
302 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
303 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
304 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
305 static void vtnet_vlan_tag_remove(struct mbuf *);
306 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
307 struct virtio_net_hdr *);
308 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
309 static int vtnet_rxeof(struct vtnet_softc *, int, int *);
310 static void vtnet_rx_intr_task(void *);
311 static int vtnet_rx_vq_intr(void *);
313 static void vtnet_txeof(struct vtnet_softc *);
314 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
315 struct virtio_net_hdr *);
316 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
317 struct vtnet_tx_header *);
318 static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
319 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
320 static void vtnet_start(struct ifnet *, struct ifaltq_subque *);
321 static void vtnet_tick(void *);
322 static void vtnet_tx_intr_task(void *);
323 static int vtnet_tx_vq_intr(void *);
325 static void vtnet_stop(struct vtnet_softc *);
326 static int vtnet_reinit(struct vtnet_softc *);
327 static void vtnet_init_locked(struct vtnet_softc *);
328 static void vtnet_init(void *);
330 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
331 struct sglist *, int, int);
333 static void vtnet_rx_filter(struct vtnet_softc *sc);
334 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
335 static int vtnet_set_promisc(struct vtnet_softc *, int);
336 static int vtnet_set_allmulti(struct vtnet_softc *, int);
337 static void vtnet_rx_filter_mac(struct vtnet_softc *);
339 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
340 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
341 static void vtnet_set_vlan_filter(struct vtnet_softc *, int, uint16_t);
342 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
343 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
345 static int vtnet_ifmedia_upd(struct ifnet *);
346 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
348 static void vtnet_add_statistics(struct vtnet_softc *);
350 static int vtnet_enable_rx_intr(struct vtnet_softc *);
351 static int vtnet_enable_tx_intr(struct vtnet_softc *);
352 static void vtnet_disable_rx_intr(struct vtnet_softc *);
353 static void vtnet_disable_tx_intr(struct vtnet_softc *);
356 static int vtnet_csum_disable = 0;
357 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
358 static int vtnet_tso_disable = 1;
359 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
360 static int vtnet_lro_disable = 1;
361 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
364 * Reducing the number of transmit completed interrupts can
365 * improve performance. To do so, the define below keeps the
366 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
367 * in the start and watchdog paths. The price to pay for this
368 * is the m_free'ing of transmitted mbufs may be delayed until
369 * the watchdog fires.
371 #define VTNET_TX_INTR_MODERATION
373 static struct virtio_feature_desc vtnet_feature_desc[] = {
374 { VIRTIO_NET_F_CSUM, "TxChecksum" },
375 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
376 { VIRTIO_NET_F_MAC, "MacAddress" },
377 { VIRTIO_NET_F_GSO, "TxAllGSO" },
378 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
379 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
380 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
381 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
382 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
383 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
384 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
385 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
386 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
387 { VIRTIO_NET_F_STATUS, "Status" },
388 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
389 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
390 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
391 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
392 { VIRTIO_NET_F_MQ, "RFS" },
396 static device_method_t vtnet_methods[] = {
397 /* Device methods. */
398 DEVMETHOD(device_probe, vtnet_probe),
399 DEVMETHOD(device_attach, vtnet_attach),
400 DEVMETHOD(device_detach, vtnet_detach),
401 DEVMETHOD(device_suspend, vtnet_suspend),
402 DEVMETHOD(device_resume, vtnet_resume),
403 DEVMETHOD(device_shutdown, vtnet_shutdown),
405 /* VirtIO methods. */
406 DEVMETHOD(virtio_config_change, vtnet_config_change),
411 static driver_t vtnet_driver = {
414 sizeof(struct vtnet_softc)
417 static devclass_t vtnet_devclass;
419 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
421 MODULE_VERSION(vtnet, 1);
422 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
425 vtnet_modevent(module_t mod, int type, void *unused)
447 vtnet_probe(device_t dev)
449 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
452 device_set_desc(dev, "VirtIO Networking Adapter");
454 return (BUS_PROBE_DEFAULT);
458 vtnet_attach(device_t dev)
460 struct vtnet_softc *sc;
464 sc = device_get_softc(dev);
467 lwkt_serialize_init(&sc->vtnet_slz);
468 callout_init(&sc->vtnet_tick_ch);
470 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
472 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
473 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
475 vtnet_add_statistics(sc);
477 virtio_set_feature_desc(dev, vtnet_feature_desc);
478 vtnet_negotiate_features(sc);
480 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
481 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
482 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
484 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
487 sc->vtnet_rx_mbuf_size = MCLBYTES;
488 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
490 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
491 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
493 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
494 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
495 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
496 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
499 vtnet_get_hwaddr(sc);
501 error = vtnet_alloc_virtqueues(sc);
503 device_printf(dev, "cannot allocate virtqueues\n");
507 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
509 device_printf(dev, "cannot allocate ifnet structure\n");
515 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
516 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
517 ifp->if_init = vtnet_init;
518 ifp->if_start = vtnet_start;
519 ifp->if_ioctl = vtnet_ioctl;
521 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
522 sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
524 tx_size = virtqueue_size(sc->vtnet_tx_vq);
525 sc->vtnet_tx_size = tx_size;
526 sc->vtnet_txhdridx = 0;
527 sc->vtnet_txhdrarea = contigmalloc(
528 ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header),
529 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
530 if (sc->vtnet_txhdrarea == NULL) {
531 device_printf(dev, "cannot contigmalloc the tx headers\n");
534 sc->vtnet_macfilter = contigmalloc(
535 sizeof(struct vtnet_mac_filter),
536 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
537 if (sc->vtnet_macfilter == NULL) {
539 "cannot contigmalloc the mac filter table\n");
542 ifq_set_maxlen(&ifp->if_snd, tx_size - 1);
543 ifq_set_ready(&ifp->if_snd);
545 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
547 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){
548 //ifp->if_capabilities |= IFCAP_LINKSTATE;
549 kprintf("add dynamic link state\n");
552 /* Tell the upper layer(s) we support long frames. */
553 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
554 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
556 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
557 ifp->if_capabilities |= IFCAP_TXCSUM;
559 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
560 ifp->if_capabilities |= IFCAP_TSO4;
561 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
562 ifp->if_capabilities |= IFCAP_TSO6;
563 if (ifp->if_capabilities & IFCAP_TSO)
564 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
566 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
567 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
570 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
571 ifp->if_capabilities |= IFCAP_RXCSUM;
573 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
574 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
575 ifp->if_capabilities |= IFCAP_LRO;
578 if (ifp->if_capabilities & IFCAP_HWCSUM) {
580 * VirtIO does not support VLAN tagging, but we can fake
581 * it by inserting and removing the 802.1Q header during
582 * transmit and receive. We are then able to do checksum
583 * offloading of VLAN frames.
585 ifp->if_capabilities |=
586 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
589 ifp->if_capenable = ifp->if_capabilities;
592 * Capabilities after here are not enabled by default.
595 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
596 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
598 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
599 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
600 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
601 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
604 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
606 error = virtio_setup_intr(dev, &sc->vtnet_slz);
608 device_printf(dev, "cannot setup virtqueue interrupts\n");
614 * Device defaults to promiscuous mode for backwards
615 * compatibility. Turn it off if possible.
617 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
618 lwkt_serialize_enter(&sc->vtnet_slz);
619 if (vtnet_set_promisc(sc, 0) != 0) {
620 ifp->if_flags |= IFF_PROMISC;
622 "cannot disable promiscuous mode\n");
624 lwkt_serialize_exit(&sc->vtnet_slz);
626 ifp->if_flags |= IFF_PROMISC;
636 vtnet_detach(device_t dev)
638 struct vtnet_softc *sc;
641 sc = device_get_softc(dev);
644 if (device_is_attached(dev)) {
645 lwkt_serialize_enter(&sc->vtnet_slz);
647 lwkt_serialize_exit(&sc->vtnet_slz);
649 callout_stop(&sc->vtnet_tick_ch);
650 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
655 if (sc->vtnet_vlan_attach != NULL) {
656 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
657 sc->vtnet_vlan_attach = NULL;
659 if (sc->vtnet_vlan_detach != NULL) {
660 EVENTHANDLER_DEREGISTER(vlan_unconfg, sc->vtnet_vlan_detach);
661 sc->vtnet_vlan_detach = NULL;
666 sc->vtnet_ifp = NULL;
669 if (sc->vtnet_rx_vq != NULL)
670 vtnet_free_rx_mbufs(sc);
671 if (sc->vtnet_tx_vq != NULL)
672 vtnet_free_tx_mbufs(sc);
673 if (sc->vtnet_ctrl_vq != NULL)
674 vtnet_free_ctrl_vq(sc);
676 if (sc->vtnet_txhdrarea != NULL) {
677 contigfree(sc->vtnet_txhdrarea,
678 ((sc->vtnet_tx_size / 2) + 1) *
679 sizeof(struct vtnet_tx_header), M_VTNET);
680 sc->vtnet_txhdrarea = NULL;
682 if (sc->vtnet_macfilter != NULL) {
683 contigfree(sc->vtnet_macfilter,
684 sizeof(struct vtnet_mac_filter), M_DEVBUF);
685 sc->vtnet_macfilter = NULL;
688 ifmedia_removeall(&sc->vtnet_media);
694 vtnet_suspend(device_t dev)
696 struct vtnet_softc *sc;
698 sc = device_get_softc(dev);
700 lwkt_serialize_enter(&sc->vtnet_slz);
702 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
703 lwkt_serialize_exit(&sc->vtnet_slz);
709 vtnet_resume(device_t dev)
711 struct vtnet_softc *sc;
714 sc = device_get_softc(dev);
717 lwkt_serialize_enter(&sc->vtnet_slz);
718 if (ifp->if_flags & IFF_UP)
719 vtnet_init_locked(sc);
720 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
721 lwkt_serialize_exit(&sc->vtnet_slz);
727 vtnet_shutdown(device_t dev)
731 * Suspend already does all of what we need to
732 * do here; we just never expect to be resumed.
734 return (vtnet_suspend(dev));
738 vtnet_config_change(device_t dev)
740 struct vtnet_softc *sc;
742 sc = device_get_softc(dev);
744 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
750 vtnet_negotiate_features(struct vtnet_softc *sc)
753 uint64_t mask, features;
758 if (vtnet_csum_disable)
759 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
762 * TSO and LRO are only available when their corresponding
763 * checksum offload feature is also negotiated.
766 if (vtnet_csum_disable || vtnet_tso_disable)
767 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
768 VIRTIO_NET_F_HOST_ECN;
770 if (vtnet_csum_disable || vtnet_lro_disable)
771 mask |= VTNET_LRO_FEATURES;
773 features = VTNET_FEATURES & ~mask;
774 features |= VIRTIO_F_NOTIFY_ON_EMPTY;
775 sc->vtnet_features = virtio_negotiate_features(dev, features);
779 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
782 struct vq_alloc_info vq_info[3];
789 * Indirect descriptors are not needed for the Rx
790 * virtqueue when mergeable buffers are negotiated.
791 * The header is placed inline with the data, not
792 * in a separate descriptor, and mbuf clusters are
793 * always physically contiguous.
795 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
796 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
797 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
801 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs,
802 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
803 "%s receive", device_get_nameunit(dev));
805 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS,
806 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
807 "%s transmit", device_get_nameunit(dev));
809 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
812 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
813 &sc->vtnet_ctrl_vq, "%s control",
814 device_get_nameunit(dev));
817 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
821 vtnet_get_hwaddr(struct vtnet_softc *sc)
827 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
828 virtio_read_device_config(dev,
829 offsetof(struct virtio_net_config, mac),
830 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
832 /* Generate random locally administered unicast address. */
833 sc->vtnet_hwaddr[0] = 0xB2;
834 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
836 vtnet_set_hwaddr(sc);
841 vtnet_set_hwaddr(struct vtnet_softc *sc)
847 virtio_write_device_config(dev,
848 offsetof(struct virtio_net_config, mac),
849 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
853 vtnet_is_link_up(struct vtnet_softc *sc)
862 ASSERT_SERIALIZED(&sc->vtnet_slz);
864 status = virtio_read_dev_config_2(dev,
865 offsetof(struct virtio_net_config, status));
867 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
871 vtnet_update_link_status(struct vtnet_softc *sc)
875 struct ifaltq_subque *ifsq;
880 ifsq = ifq_get_subq_default(&ifp->if_snd);
882 link = vtnet_is_link_up(sc);
884 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
885 sc->vtnet_flags |= VTNET_FLAG_LINK;
887 device_printf(dev, "Link is up\n");
888 ifp->if_link_state = LINK_STATE_UP;
889 if_link_state_change(ifp);
890 if (!ifsq_is_empty(ifsq))
891 vtnet_start_locked(ifp, ifsq);
892 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
893 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
895 device_printf(dev, "Link is down\n");
897 ifp->if_link_state = LINK_STATE_DOWN;
898 if_link_state_change(ifp);
904 vtnet_watchdog(struct vtnet_softc *sc)
910 #ifdef VTNET_TX_INTR_MODERATION
914 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
917 if_printf(ifp, "watchdog timeout -- resetting\n");
919 virtqueue_dump(sc->vtnet_tx_vq);
922 ifp->if_flags &= ~IFF_RUNNING;
923 vtnet_init_locked(sc);
928 vtnet_config_change_task(void *arg, int pending)
930 struct vtnet_softc *sc;
934 lwkt_serialize_enter(&sc->vtnet_slz);
935 vtnet_update_link_status(sc);
936 lwkt_serialize_exit(&sc->vtnet_slz);
940 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
942 struct vtnet_softc *sc;
944 int reinit, mask, error;
947 ifr = (struct ifreq *) data;
953 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
955 else if (ifp->if_mtu != ifr->ifr_mtu) {
956 lwkt_serialize_enter(&sc->vtnet_slz);
957 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
958 lwkt_serialize_exit(&sc->vtnet_slz);
963 lwkt_serialize_enter(&sc->vtnet_slz);
964 if ((ifp->if_flags & IFF_UP) == 0) {
965 if (ifp->if_flags & IFF_RUNNING)
967 } else if (ifp->if_flags & IFF_RUNNING) {
968 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
969 (IFF_PROMISC | IFF_ALLMULTI)) {
970 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
976 vtnet_init_locked(sc);
979 sc->vtnet_if_flags = ifp->if_flags;
980 lwkt_serialize_exit(&sc->vtnet_slz);
985 lwkt_serialize_enter(&sc->vtnet_slz);
986 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
987 (ifp->if_flags & IFF_RUNNING))
988 vtnet_rx_filter_mac(sc);
989 lwkt_serialize_exit(&sc->vtnet_slz);
994 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
998 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1000 lwkt_serialize_enter(&sc->vtnet_slz);
1002 if (mask & IFCAP_TXCSUM) {
1003 ifp->if_capenable ^= IFCAP_TXCSUM;
1004 if (ifp->if_capenable & IFCAP_TXCSUM)
1005 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
1007 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
1010 if (mask & IFCAP_TSO4) {
1011 ifp->if_capenable ^= IFCAP_TSO4;
1012 if (ifp->if_capenable & IFCAP_TSO4)
1013 ifp->if_hwassist |= CSUM_TSO;
1015 ifp->if_hwassist &= ~CSUM_TSO;
1018 if (mask & IFCAP_RXCSUM) {
1019 ifp->if_capenable ^= IFCAP_RXCSUM;
1023 if (mask & IFCAP_LRO) {
1024 ifp->if_capenable ^= IFCAP_LRO;
1028 if (mask & IFCAP_VLAN_HWFILTER) {
1029 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1033 if (mask & IFCAP_VLAN_HWTSO)
1034 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1036 if (mask & IFCAP_VLAN_HWTAGGING)
1037 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1039 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
1040 ifp->if_flags &= ~IFF_RUNNING;
1041 vtnet_init_locked(sc);
1043 //VLAN_CAPABILITIES(ifp);
1045 lwkt_serialize_exit(&sc->vtnet_slz);
1049 error = ether_ioctl(ifp, cmd, data);
1057 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1060 int new_frame_size, clsize;
1062 ifp = sc->vtnet_ifp;
1064 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1065 new_frame_size = sizeof(struct vtnet_rx_header) +
1066 sizeof(struct ether_vlan_header) + new_mtu;
1068 if (new_frame_size > MJUM9BYTES)
1071 if (new_frame_size <= MCLBYTES)
1074 clsize = MJUM9BYTES;
1076 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
1077 sizeof(struct ether_vlan_header) + new_mtu;
1079 if (new_frame_size <= MCLBYTES)
1082 clsize = MJUMPAGESIZE;
1085 sc->vtnet_rx_mbuf_size = clsize;
1086 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
1087 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
1088 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
1090 ifp->if_mtu = new_mtu;
1092 if (ifp->if_flags & IFF_RUNNING) {
1093 ifp->if_flags &= ~IFF_RUNNING;
1094 vtnet_init_locked(sc);
1101 vtnet_init_rx_vq(struct vtnet_softc *sc)
1103 struct virtqueue *vq;
1106 vq = sc->vtnet_rx_vq;
1110 while (!virtqueue_full(vq)) {
1111 if ((error = vtnet_newbuf(sc)) != 0)
1117 virtqueue_notify(vq, &sc->vtnet_slz);
1120 * EMSGSIZE signifies the virtqueue did not have enough
1121 * entries available to hold the last mbuf. This is not
1122 * an error. We should not get ENOSPC since we check if
1123 * the virtqueue is full before attempting to add a
1126 if (error == EMSGSIZE)
1134 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1136 struct virtqueue *vq;
1140 vq = sc->vtnet_rx_vq;
1143 while ((m = virtqueue_drain(vq, &last)) != NULL)
1146 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1150 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1152 struct virtqueue *vq;
1153 struct vtnet_tx_header *txhdr;
1156 vq = sc->vtnet_tx_vq;
1159 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1160 m_freem(txhdr->vth_mbuf);
1163 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1167 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1170 * The control virtqueue is only polled, therefore
1171 * it should already be empty.
1173 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1174 ("Ctrl Vq not empty"));
1177 static struct mbuf *
1178 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1180 struct mbuf *m_head, *m_tail, *m;
1183 clsize = sc->vtnet_rx_mbuf_size;
1185 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/
1186 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1187 m_head = m_getcl(MB_DONTWAIT, MT_DATA, M_PKTHDR );
1191 m_head->m_len = clsize;
1195 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1196 ("chained Rx mbuf requested without LRO_NOMRG"));
1198 for (i = 0; i < nbufs - 1; i++) {
1199 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1200 m = m_getcl(MB_DONTWAIT, MT_DATA, 0);
1210 if (m_tailp != NULL)
1216 sc->vtnet_stats.mbuf_alloc_failed++;
1223 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1225 struct mbuf *m, *m_prev;
1226 struct mbuf *m_new, *m_tail;
1227 int len, clsize, nreplace, error;
1234 clsize = sc->vtnet_rx_mbuf_size;
1237 if (m->m_next != NULL)
1238 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1239 ("chained Rx mbuf without LRO_NOMRG"));
1242 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1243 * allocating an entire chain for each received frame. When
1244 * the received frame's length is less than that of the chain,
1245 * the unused mbufs are reassigned to the new chain.
1249 * Something is seriously wrong if we received
1250 * a frame larger than the mbuf chain. Drop it.
1253 sc->vtnet_stats.rx_frame_too_large++;
1257 KASSERT(m->m_len == clsize,
1258 ("mbuf length not expected cluster size: %d",
1261 m->m_len = MIN(m->m_len, len);
1269 KASSERT(m_prev != NULL, ("m_prev == NULL"));
1270 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1271 ("too many replacement mbufs: %d/%d", nreplace,
1272 sc->vtnet_rx_mbuf_count));
1274 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1275 if (m_new == NULL) {
1276 m_prev->m_len = clsize;
1281 * Move unused mbufs, if any, from the original chain
1282 * onto the end of the new chain.
1284 if (m_prev->m_next != NULL) {
1285 m_tail->m_next = m_prev->m_next;
1286 m_prev->m_next = NULL;
1289 error = vtnet_enqueue_rxbuf(sc, m_new);
1292 * BAD! We could not enqueue the replacement mbuf chain. We
1293 * must restore the m0 chain to the original state if it was
1294 * modified so we can subsequently discard it.
1296 * NOTE: The replacement is suppose to be an identical copy
1297 * to the one just dequeued so this is an unexpected error.
1299 sc->vtnet_stats.rx_enq_replacement_failed++;
1301 if (m_tail->m_next != NULL) {
1302 m_prev->m_next = m_tail->m_next;
1303 m_tail->m_next = NULL;
1306 m_prev->m_len = clsize;
1314 vtnet_newbuf(struct vtnet_softc *sc)
1319 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1323 error = vtnet_enqueue_rxbuf(sc, m);
1331 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1333 struct virtqueue *vq;
1336 vq = sc->vtnet_rx_vq;
1338 while (--nbufs > 0) {
1339 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1341 vtnet_discard_rxbuf(sc, m);
1346 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1351 * Requeue the discarded mbuf. This should always be
1352 * successful since it was just dequeued.
1354 error = vtnet_enqueue_rxbuf(sc, m);
1355 KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1359 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1362 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1363 struct vtnet_rx_header *rxhdr;
1364 struct virtio_net_hdr *hdr;
1368 ASSERT_SERIALIZED(&sc->vtnet_slz);
1369 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1370 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1372 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
1374 mdata = mtod(m, uint8_t *);
1377 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1378 rxhdr = (struct vtnet_rx_header *) mdata;
1379 hdr = &rxhdr->vrh_hdr;
1380 offset += sizeof(struct vtnet_rx_header);
1382 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1383 KASSERT(error == 0, ("cannot add header to sglist"));
1386 error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1390 if (m->m_next != NULL) {
1391 error = sglist_append_mbuf(&sg, m->m_next);
1396 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1400 vtnet_vlan_tag_remove(struct mbuf *m)
1402 struct ether_vlan_header *evl;
1404 evl = mtod(m, struct ether_vlan_header *);
1406 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1407 m->m_flags |= M_VLANTAG;
1409 /* Strip the 802.1Q header. */
1410 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1411 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1412 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1416 * Alternative method of doing receive checksum offloading. Rather
1417 * than parsing the received frame down to the IP header, use the
1418 * csum_offset to determine which CSUM_* flags are appropriate. We
1419 * can get by with doing this only because the checksum offsets are
1420 * unique for the things we care about.
1423 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1424 struct virtio_net_hdr *hdr)
1426 struct ether_header *eh;
1427 struct ether_vlan_header *evh;
1432 csum_len = hdr->csum_start + hdr->csum_offset;
1434 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1436 if (m->m_len < csum_len)
1439 eh = mtod(m, struct ether_header *);
1440 eth_type = ntohs(eh->ether_type);
1441 if (eth_type == ETHERTYPE_VLAN) {
1442 evh = mtod(m, struct ether_vlan_header *);
1443 eth_type = ntohs(evh->evl_proto);
1446 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1447 sc->vtnet_stats.rx_csum_bad_ethtype++;
1451 /* Use the offset to determine the appropriate CSUM_* flags. */
1452 switch (hdr->csum_offset) {
1453 case offsetof(struct udphdr, uh_sum):
1454 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1456 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1457 if (udp->uh_sum == 0)
1462 case offsetof(struct tcphdr, th_sum):
1463 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1464 m->m_pkthdr.csum_data = 0xFFFF;
1467 case offsetof(struct sctphdr, checksum):
1468 //m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1472 sc->vtnet_stats.rx_csum_bad_offset++;
1476 sc->vtnet_stats.rx_csum_offloaded++;
1482 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1485 struct virtqueue *vq;
1486 struct mbuf *m, *m_tail;
1489 ifp = sc->vtnet_ifp;
1490 vq = sc->vtnet_rx_vq;
1493 while (--nbufs > 0) {
1494 m = virtqueue_dequeue(vq, &len);
1500 if (vtnet_newbuf(sc) != 0) {
1502 vtnet_discard_rxbuf(sc, m);
1504 vtnet_discard_merged_rxbuf(sc, nbufs);
1512 m->m_flags &= ~M_PKTHDR;
1514 m_head->m_pkthdr.len += len;
1522 sc->vtnet_stats.rx_mergeable_failed++;
1529 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1531 struct virtio_net_hdr lhdr;
1533 struct virtqueue *vq;
1535 struct ether_header *eh;
1536 struct virtio_net_hdr *hdr;
1537 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1538 int len, deq, nbufs, adjsz, rx_npkts;
1540 ifp = sc->vtnet_ifp;
1541 vq = sc->vtnet_rx_vq;
1546 ASSERT_SERIALIZED(&sc->vtnet_slz);
1548 while (--count >= 0) {
1549 m = virtqueue_dequeue(vq, &len);
1554 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1556 vtnet_discard_rxbuf(sc, m);
1560 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1562 adjsz = sizeof(struct vtnet_rx_header);
1564 * Account for our pad between the header and
1565 * the actual start of the frame.
1567 len += VTNET_RX_HEADER_PAD;
1569 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1570 nbufs = mhdr->num_buffers;
1571 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1574 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1576 vtnet_discard_rxbuf(sc, m);
1578 vtnet_discard_merged_rxbuf(sc, nbufs);
1582 m->m_pkthdr.len = len;
1583 m->m_pkthdr.rcvif = ifp;
1584 m->m_pkthdr.csum_flags = 0;
1587 if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1594 * Save copy of header before we strip it. For both mergeable
1595 * and non-mergeable, the VirtIO header is placed first in the
1596 * mbuf's data. We no longer need num_buffers, so always use a
1599 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1602 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1603 eh = mtod(m, struct ether_header *);
1604 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1605 vtnet_vlan_tag_remove(m);
1608 * With the 802.1Q header removed, update the
1609 * checksum starting location accordingly.
1611 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1613 ETHER_VLAN_ENCAP_LEN;
1617 if (ifp->if_capenable & IFCAP_RXCSUM &&
1618 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1619 if (vtnet_rx_csum(sc, m, hdr) != 0)
1620 sc->vtnet_stats.rx_csum_failed++;
1623 lwkt_serialize_exit(&sc->vtnet_slz);
1625 ifp->if_input(ifp, m, NULL, -1);
1626 lwkt_serialize_enter(&sc->vtnet_slz);
1629 * The interface may have been stopped while we were
1630 * passing the packet up the network stack.
1632 if ((ifp->if_flags & IFF_RUNNING) == 0)
1636 virtqueue_notify(vq, &sc->vtnet_slz);
1638 if (rx_npktsp != NULL)
1639 *rx_npktsp = rx_npkts;
1641 return (count > 0 ? 0 : EAGAIN);
1645 vtnet_rx_intr_task(void *arg)
1647 struct vtnet_softc *sc;
1652 ifp = sc->vtnet_ifp;
1655 // lwkt_serialize_enter(&sc->vtnet_slz);
1657 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1658 vtnet_enable_rx_intr(sc);
1659 // lwkt_serialize_exit(&sc->vtnet_slz);
1663 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1664 if (!more && vtnet_enable_rx_intr(sc) != 0) {
1665 vtnet_disable_rx_intr(sc);
1669 // lwkt_serialize_exit(&sc->vtnet_slz);
1672 sc->vtnet_stats.rx_task_rescheduled++;
1678 vtnet_rx_vq_intr(void *xsc)
1680 struct vtnet_softc *sc;
1684 vtnet_disable_rx_intr(sc);
1685 vtnet_rx_intr_task(sc);
1691 vtnet_txeof(struct vtnet_softc *sc)
1693 struct virtqueue *vq;
1695 struct vtnet_tx_header *txhdr;
1698 vq = sc->vtnet_tx_vq;
1699 ifp = sc->vtnet_ifp;
1702 ASSERT_SERIALIZED(&sc->vtnet_slz);
1704 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1707 m_freem(txhdr->vth_mbuf);
1711 ifq_clr_oactive(&ifp->if_snd);
1712 if (virtqueue_empty(vq))
1713 sc->vtnet_watchdog_timer = 0;
1717 static struct mbuf *
1718 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1719 struct virtio_net_hdr *hdr)
1722 struct ether_header *eh;
1723 struct ether_vlan_header *evh;
1725 struct ip6_hdr *ip6;
1728 uint16_t eth_type, csum_start;
1729 uint8_t ip_proto, gso_type;
1731 ifp = sc->vtnet_ifp;
1734 ip_offset = sizeof(struct ether_header);
1735 if (m->m_len < ip_offset) {
1736 if ((m = m_pullup(m, ip_offset)) == NULL)
1740 eh = mtod(m, struct ether_header *);
1741 eth_type = ntohs(eh->ether_type);
1742 if (eth_type == ETHERTYPE_VLAN) {
1743 ip_offset = sizeof(struct ether_vlan_header);
1744 if (m->m_len < ip_offset) {
1745 if ((m = m_pullup(m, ip_offset)) == NULL)
1748 evh = mtod(m, struct ether_vlan_header *);
1749 eth_type = ntohs(evh->evl_proto);
1754 if (m->m_len < ip_offset + sizeof(struct ip)) {
1755 m = m_pullup(m, ip_offset + sizeof(struct ip));
1760 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1761 ip_proto = ip->ip_p;
1762 csum_start = ip_offset + (ip->ip_hl << 2);
1763 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1766 case ETHERTYPE_IPV6:
1767 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1768 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1773 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1775 * XXX Assume no extension headers are present. Presently,
1776 * this will always be true in the case of TSO, and FreeBSD
1777 * does not perform checksum offloading of IPv6 yet.
1779 ip_proto = ip6->ip6_nxt;
1780 csum_start = ip_offset + sizeof(struct ip6_hdr);
1781 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1788 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1789 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1790 hdr->csum_start = csum_start;
1791 hdr->csum_offset = m->m_pkthdr.csum_data;
1793 sc->vtnet_stats.tx_csum_offloaded++;
1796 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1797 if (ip_proto != IPPROTO_TCP)
1800 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1801 m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1806 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1807 hdr->gso_type = gso_type;
1808 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1809 hdr->gso_size = m->m_pkthdr.tso_segsz;
1811 if (tcp->th_flags & TH_CWR) {
1813 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1814 * ECN support is only configurable globally with the
1815 * net.inet.tcp.ecn.enable sysctl knob.
1817 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1818 if_printf(ifp, "TSO with ECN not supported "
1824 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1827 sc->vtnet_stats.tx_tso_offloaded++;
1834 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1835 struct vtnet_tx_header *txhdr)
1838 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1839 struct virtqueue *vq;
1841 int collapsed, error;
1843 vq = sc->vtnet_tx_vq;
1847 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
1848 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1849 KASSERT(error == 0 && sg.sg_nseg == 1,
1850 ("cannot add header to sglist"));
1853 error = sglist_append_mbuf(&sg, m);
1858 //m = m_collapse(m, MB_DONTWAIT, VTNET_MAX_TX_SEGS - 1);
1859 m = m_defrag(m, MB_DONTWAIT);
1868 txhdr->vth_mbuf = m;
1870 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0));
1879 static struct mbuf *
1880 vtnet_vlan_tag_insert(struct mbuf *m)
1883 struct ether_vlan_header *evl;
1885 if (M_WRITABLE(m) == 0) {
1886 n = m_dup(m, MB_DONTWAIT);
1888 if ((m = n) == NULL)
1892 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, MB_DONTWAIT);
1895 if (m->m_len < sizeof(struct ether_vlan_header)) {
1896 m = m_pullup(m, sizeof(struct ether_vlan_header));
1901 /* Insert 802.1Q header into the existing Ethernet header. */
1902 evl = mtod(m, struct ether_vlan_header *);
1903 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1904 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1905 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1906 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1907 m->m_flags &= ~M_VLANTAG;
1913 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1915 struct vtnet_tx_header *txhdr;
1916 struct virtio_net_hdr *hdr;
1920 txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx];
1921 memset(txhdr, 0, sizeof(struct vtnet_tx_header));
1924 * Always use the non-mergeable header to simplify things. When
1925 * the mergeable feature is negotiated, the num_buffers field
1926 * must be set to zero. We use vtnet_hdr_size later to enqueue
1927 * the correct header size to the host.
1929 hdr = &txhdr->vth_uhdr.hdr;
1934 if (m->m_flags & M_VLANTAG) {
1935 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1936 m = vtnet_vlan_tag_insert(m);
1937 if ((*m_head = m) == NULL)
1939 m->m_flags &= ~M_VLANTAG;
1942 if (m->m_pkthdr.csum_flags != 0) {
1943 m = vtnet_tx_offload(sc, m, hdr);
1944 if ((*m_head = m) == NULL)
1948 error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1950 sc->vtnet_txhdridx =
1951 (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1);
1957 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1959 struct vtnet_softc *sc;
1963 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1964 lwkt_serialize_enter(&sc->vtnet_slz);
1965 vtnet_start_locked(ifp, ifsq);
1966 lwkt_serialize_exit(&sc->vtnet_slz);
1970 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1972 struct vtnet_softc *sc;
1973 struct virtqueue *vq;
1978 vq = sc->vtnet_tx_vq;
1981 ASSERT_SERIALIZED(&sc->vtnet_slz);
1983 if ((ifp->if_flags & (IFF_RUNNING)) !=
1984 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1987 #ifdef VTNET_TX_INTR_MODERATION
1988 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1992 while (!ifsq_is_empty(ifsq)) {
1993 if (virtqueue_full(vq)) {
1994 ifq_set_oactive(&ifp->if_snd);
1998 m0 = ifq_dequeue(&ifp->if_snd);
2002 if (vtnet_encap(sc, &m0) != 0) {
2005 ifq_prepend(&ifp->if_snd, m0);
2006 ifq_set_oactive(&ifp->if_snd);
2011 ETHER_BPF_MTAP(ifp, m0);
2015 virtqueue_notify(vq, &sc->vtnet_slz);
2016 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
2021 vtnet_tick(void *xsc)
2023 struct vtnet_softc *sc;
2028 ASSERT_SERIALIZED(&sc->vtnet_slz);
2030 virtqueue_dump(sc->vtnet_rx_vq);
2031 virtqueue_dump(sc->vtnet_tx_vq);
2035 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2040 vtnet_tx_intr_task(void *arg)
2042 struct vtnet_softc *sc;
2044 struct ifaltq_subque *ifsq;
2047 ifp = sc->vtnet_ifp;
2048 ifsq = ifq_get_subq_default(&ifp->if_snd);
2051 // lwkt_serialize_enter(&sc->vtnet_slz);
2053 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2054 vtnet_enable_tx_intr(sc);
2055 // lwkt_serialize_exit(&sc->vtnet_slz);
2061 if (!ifsq_is_empty(ifsq))
2062 vtnet_start_locked(ifp, ifsq);
2064 if (vtnet_enable_tx_intr(sc) != 0) {
2065 vtnet_disable_tx_intr(sc);
2066 sc->vtnet_stats.tx_task_rescheduled++;
2067 // lwkt_serialize_exit(&sc->vtnet_slz);
2071 // lwkt_serialize_exit(&sc->vtnet_slz);
2075 vtnet_tx_vq_intr(void *xsc)
2077 struct vtnet_softc *sc;
2081 vtnet_disable_tx_intr(sc);
2082 vtnet_tx_intr_task(sc);
2088 vtnet_stop(struct vtnet_softc *sc)
2093 dev = sc->vtnet_dev;
2094 ifp = sc->vtnet_ifp;
2096 ASSERT_SERIALIZED(&sc->vtnet_slz);
2098 sc->vtnet_watchdog_timer = 0;
2099 callout_stop(&sc->vtnet_tick_ch);
2100 ifq_clr_oactive(&ifp->if_snd);
2101 ifp->if_flags &= ~(IFF_RUNNING);
2103 vtnet_disable_rx_intr(sc);
2104 vtnet_disable_tx_intr(sc);
2107 * Stop the host VirtIO adapter. Note this will reset the host
2108 * adapter's state back to the pre-initialized state, so in
2109 * order to make the device usable again, we must drive it
2110 * through virtio_reinit() and virtio_reinit_complete().
2114 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2116 vtnet_free_rx_mbufs(sc);
2117 vtnet_free_tx_mbufs(sc);
2121 vtnet_reinit(struct vtnet_softc *sc)
2126 ifp = sc->vtnet_ifp;
2127 features = sc->vtnet_features;
2130 * Re-negotiate with the host, removing any disabled receive
2131 * features. Transmit features are disabled only on our side
2132 * via if_capenable and if_hwassist.
2135 if (ifp->if_capabilities & IFCAP_RXCSUM) {
2136 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2137 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2140 if (ifp->if_capabilities & IFCAP_LRO) {
2141 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2142 features &= ~VTNET_LRO_FEATURES;
2145 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2146 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2147 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2150 return (virtio_reinit(sc->vtnet_dev, features));
2154 vtnet_init_locked(struct vtnet_softc *sc)
2160 dev = sc->vtnet_dev;
2161 ifp = sc->vtnet_ifp;
2163 ASSERT_SERIALIZED(&sc->vtnet_slz);
2165 if (ifp->if_flags & IFF_RUNNING)
2168 /* Stop host's adapter, cancel any pending I/O. */
2171 /* Reinitialize the host device. */
2172 error = vtnet_reinit(sc);
2175 "reinitialization failed, stopping device...\n");
2180 /* Update host with assigned MAC address. */
2181 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2182 vtnet_set_hwaddr(sc);
2184 ifp->if_hwassist = 0;
2185 if (ifp->if_capenable & IFCAP_TXCSUM)
2186 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2187 if (ifp->if_capenable & IFCAP_TSO4)
2188 ifp->if_hwassist |= CSUM_TSO;
2190 error = vtnet_init_rx_vq(sc);
2193 "cannot allocate mbufs for Rx virtqueue\n");
2198 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2199 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2200 /* Restore promiscuous and all-multicast modes. */
2201 vtnet_rx_filter(sc);
2203 /* Restore filtered MAC addresses. */
2204 vtnet_rx_filter_mac(sc);
2207 /* Restore VLAN filters. */
2208 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2209 vtnet_rx_filter_vlan(sc);
2213 vtnet_enable_rx_intr(sc);
2214 vtnet_enable_tx_intr(sc);
2217 ifp->if_flags |= IFF_RUNNING;
2218 ifq_clr_oactive(&ifp->if_snd);
2220 virtio_reinit_complete(dev);
2222 vtnet_update_link_status(sc);
2223 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2227 vtnet_init(void *xsc)
2229 struct vtnet_softc *sc;
2233 lwkt_serialize_enter(&sc->vtnet_slz);
2234 vtnet_init_locked(sc);
2235 lwkt_serialize_exit(&sc->vtnet_slz);
2239 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2240 struct sglist *sg, int readable, int writable)
2242 struct virtqueue *vq;
2245 vq = sc->vtnet_ctrl_vq;
2247 ASSERT_SERIALIZED(&sc->vtnet_slz);
2248 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2249 ("no control virtqueue"));
2250 KASSERT(virtqueue_empty(vq),
2251 ("control command already enqueued"));
2253 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2256 virtqueue_notify(vq, &sc->vtnet_slz);
2259 * Poll until the command is complete. Previously, we would
2260 * sleep until the control virtqueue interrupt handler woke
2261 * us up, but dropping the VTNET_MTX leads to serialization
2264 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2265 * vectors. Two of those vectors are needed for the Rx and Tx
2266 * virtqueues. We do not support sharing both a Vq and config
2267 * changed notification on the same MSIX vector.
2269 c = virtqueue_poll(vq, NULL);
2270 KASSERT(c == cookie, ("unexpected control command response"));
2274 vtnet_rx_filter(struct vtnet_softc *sc)
2279 dev = sc->vtnet_dev;
2280 ifp = sc->vtnet_ifp;
2282 ASSERT_SERIALIZED(&sc->vtnet_slz);
2283 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2284 ("CTRL_RX feature not negotiated"));
2286 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2287 device_printf(dev, "cannot %s promiscuous mode\n",
2288 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
2290 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2291 device_printf(dev, "cannot %s all-multicast mode\n",
2292 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
2296 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2298 struct virtio_net_ctrl_hdr hdr __aligned(2);
2299 struct sglist_seg segs[3];
2304 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0)
2309 hdr.class = VIRTIO_NET_CTRL_RX;
2312 ack = VIRTIO_NET_ERR;
2314 sglist_init(&sg, 3, segs);
2315 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2316 error |= sglist_append(&sg, &onoff, sizeof(uint8_t));
2317 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2318 KASSERT(error == 0 && sg.sg_nseg == 3,
2319 ("error adding Rx filter message to sglist"));
2321 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2323 return (ack == VIRTIO_NET_OK ? 0 : EIO);
2327 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2330 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2334 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2337 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2341 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2343 struct virtio_net_ctrl_hdr hdr __aligned(2);
2344 struct vtnet_mac_filter *filter;
2345 struct sglist_seg segs[4];
2349 struct ifaddr_container *ifac;
2350 struct ifmultiaddr *ifma;
2351 int ucnt, mcnt, promisc, allmulti, error;
2354 ifp = sc->vtnet_ifp;
2361 ASSERT_SERIALIZED(&sc->vtnet_slz);
2362 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2363 ("CTRL_RX feature not negotiated"));
2365 /* Use the MAC filtering table allocated in vtnet_attach. */
2366 filter = sc->vtnet_macfilter;
2367 memset(filter, 0, sizeof(struct vtnet_mac_filter));
2369 /* Unicast MAC addresses: */
2370 //if_addr_rlock(ifp);
2371 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2373 if (ifa->ifa_addr->sa_family != AF_LINK)
2375 else if (ucnt == VTNET_MAX_MAC_ENTRIES)
2378 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2379 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2382 //if_addr_runlock(ifp);
2384 if (ucnt >= VTNET_MAX_MAC_ENTRIES) {
2386 filter->vmf_unicast.nentries = 0;
2388 if_printf(ifp, "more than %d MAC addresses assigned, "
2389 "falling back to promiscuous mode\n",
2390 VTNET_MAX_MAC_ENTRIES);
2392 filter->vmf_unicast.nentries = ucnt;
2394 /* Multicast MAC addresses: */
2395 //if_maddr_rlock(ifp);
2396 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2397 if (ifma->ifma_addr->sa_family != AF_LINK)
2399 else if (mcnt == VTNET_MAX_MAC_ENTRIES)
2402 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2403 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2406 //if_maddr_runlock(ifp);
2408 if (mcnt >= VTNET_MAX_MAC_ENTRIES) {
2410 filter->vmf_multicast.nentries = 0;
2412 if_printf(ifp, "more than %d multicast MAC addresses "
2413 "assigned, falling back to all-multicast mode\n",
2414 VTNET_MAX_MAC_ENTRIES);
2416 filter->vmf_multicast.nentries = mcnt;
2418 if (promisc && allmulti)
2421 hdr.class = VIRTIO_NET_CTRL_MAC;
2422 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2423 ack = VIRTIO_NET_ERR;
2425 sglist_init(&sg, 4, segs);
2426 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2427 error |= sglist_append(&sg, &filter->vmf_unicast,
2428 sizeof(struct vtnet_mac_table));
2429 error |= sglist_append(&sg, &filter->vmf_multicast,
2430 sizeof(struct vtnet_mac_table));
2431 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2432 KASSERT(error == 0 && sg.sg_nseg == 4,
2433 ("error adding MAC filtering message to sglist"));
2435 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2437 if (ack != VIRTIO_NET_OK)
2438 if_printf(ifp, "error setting host MAC filter table\n");
2442 if (vtnet_set_promisc(sc, 1) != 0)
2443 if_printf(ifp, "cannot enable promiscuous mode\n");
2445 if (vtnet_set_allmulti(sc, 1) != 0)
2446 if_printf(ifp, "cannot enable all-multicast mode\n");
2450 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2452 struct virtio_net_ctrl_hdr hdr __aligned(2);
2453 struct sglist_seg segs[3];
2458 hdr.class = VIRTIO_NET_CTRL_VLAN;
2459 hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2460 ack = VIRTIO_NET_ERR;
2463 sglist_init(&sg, 3, segs);
2464 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2465 error |= sglist_append(&sg, &tag, sizeof(uint16_t));
2466 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2467 KASSERT(error == 0 && sg.sg_nseg == 3,
2468 ("error adding VLAN control message to sglist"));
2470 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2472 return (ack == VIRTIO_NET_OK ? 0 : EIO);
2476 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2481 int i, nvlans, error;
2483 ASSERT_SERIALIZED(&sc->vtnet_slz);
2484 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2485 ("VLAN_FILTER feature not negotiated"));
2487 dev = sc->vtnet_dev;
2488 nvlans = sc->vtnet_nvlans;
2491 /* Enable filtering for each configured VLAN. */
2492 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2493 w = sc->vtnet_vlan_shadow[i];
2494 for (mask = 1, tag = i * 32; w != 0; mask <<= 1, tag++) {
2495 if ((w & mask) != 0) {
2498 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0)
2504 KASSERT(nvlans == 0, ("VLAN count incorrect"));
2506 device_printf(dev, "cannot restore VLAN filter table\n");
2510 vtnet_set_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2515 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2516 ("VLAN_FILTER feature not negotiated"));
2518 if ((tag == 0) || (tag > 4095))
2521 ifp = sc->vtnet_ifp;
2522 idx = (tag >> 5) & 0x7F;
2525 lwkt_serialize_enter(&sc->vtnet_slz);
2527 /* Update shadow VLAN table. */
2530 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2533 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2536 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
2537 if (vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2538 device_printf(sc->vtnet_dev,
2539 "cannot %s VLAN %d %s the host filter table\n",
2540 add ? "add" : "remove", tag,
2541 add ? "to" : "from");
2545 lwkt_serialize_exit(&sc->vtnet_slz);
2549 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2552 if (ifp->if_softc != arg)
2555 vtnet_set_vlan_filter(arg, 1, tag);
2559 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2562 if (ifp->if_softc != arg)
2565 vtnet_set_vlan_filter(arg, 0, tag);
2569 vtnet_ifmedia_upd(struct ifnet *ifp)
2571 struct vtnet_softc *sc;
2572 struct ifmedia *ifm;
2575 ifm = &sc->vtnet_media;
2577 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2584 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2586 struct vtnet_softc *sc;
2590 ifmr->ifm_status = IFM_AVALID;
2591 ifmr->ifm_active = IFM_ETHER;
2593 lwkt_serialize_enter(&sc->vtnet_slz);
2594 if (vtnet_is_link_up(sc) != 0) {
2595 ifmr->ifm_status |= IFM_ACTIVE;
2596 ifmr->ifm_active |= VTNET_MEDIATYPE;
2598 ifmr->ifm_active |= IFM_NONE;
2599 lwkt_serialize_exit(&sc->vtnet_slz);
2603 vtnet_add_statistics(struct vtnet_softc *sc)
2606 struct vtnet_statistics *stats;
2607 struct sysctl_ctx_list *ctx;
2608 struct sysctl_oid *tree;
2609 struct sysctl_oid_list *child;
2611 dev = sc->vtnet_dev;
2612 stats = &sc->vtnet_stats;
2613 ctx = device_get_sysctl_ctx(dev);
2614 tree = device_get_sysctl_tree(dev);
2615 child = SYSCTL_CHILDREN(tree);
2617 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2618 CTLFLAG_RD, &stats->mbuf_alloc_failed,
2619 "Mbuf cluster allocation failures");
2620 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_frame_too_large",
2621 CTLFLAG_RD, &stats->rx_frame_too_large,
2622 "Received frame larger than the mbuf chain");
2623 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2624 CTLFLAG_RD, &stats->rx_enq_replacement_failed,
2625 "Enqueuing the replacement receive mbuf failed");
2626 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_mergeable_failed",
2627 CTLFLAG_RD, &stats->rx_mergeable_failed,
2628 "Mergeable buffers receive failures");
2629 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2630 CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
2631 "Received checksum offloaded buffer with unsupported "
2633 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_start",
2634 CTLFLAG_RD, &stats->rx_csum_bad_start,
2635 "Received checksum offloaded buffer with incorrect start offset");
2636 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2637 CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
2638 "Received checksum offloaded buffer with incorrect IP protocol");
2639 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2640 CTLFLAG_RD, &stats->rx_csum_bad_offset,
2641 "Received checksum offloaded buffer with incorrect offset");
2642 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_failed",
2643 CTLFLAG_RD, &stats->rx_csum_failed,
2644 "Received buffer checksum offload failed");
2645 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_csum_offloaded",
2646 CTLFLAG_RD, &stats->rx_csum_offloaded,
2647 "Received buffer checksum offload succeeded");
2648 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_task_rescheduled",
2649 CTLFLAG_RD, &stats->rx_task_rescheduled,
2650 "Times the receive interrupt task rescheduled itself");
2652 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_offloaded",
2653 CTLFLAG_RD, &stats->tx_csum_offloaded,
2654 "Offloaded checksum of transmitted buffer");
2655 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_offloaded",
2656 CTLFLAG_RD, &stats->tx_tso_offloaded,
2657 "Segmentation offload of transmitted buffer");
2658 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2659 CTLFLAG_RD, &stats->tx_csum_bad_ethtype,
2660 "Aborted transmit of checksum offloaded buffer with unknown "
2662 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2663 CTLFLAG_RD, &stats->tx_tso_bad_ethtype,
2664 "Aborted transmit of TSO buffer with unknown Ethernet type");
2665 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_task_rescheduled",
2666 CTLFLAG_RD, &stats->tx_task_rescheduled,
2667 "Times the transmit interrupt task rescheduled itself");
2671 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2674 return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2678 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2681 virtqueue_disable_intr(sc->vtnet_rx_vq);
2685 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2688 #ifdef VTNET_TX_INTR_MODERATION
2691 return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2696 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2699 virtqueue_disable_intr(sc->vtnet_tx_vq);