2 * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
47 #include <machine/limits.h>
49 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
71 #include "virtio_net.h"
72 #include "virtio_if.h"
74 struct vtnet_statistics {
75 uint64_t mbuf_alloc_failed;
77 uint64_t rx_frame_too_large;
78 uint64_t rx_enq_replacement_failed;
79 uint64_t rx_mergeable_failed;
80 uint64_t rx_csum_bad_ethtype;
81 uint64_t rx_csum_bad_ipproto;
82 uint64_t rx_csum_bad_offset;
83 uint64_t rx_csum_failed;
84 uint64_t rx_csum_offloaded;
85 uint64_t rx_task_rescheduled;
87 uint64_t tx_csum_offloaded;
88 uint64_t tx_tso_offloaded;
89 uint64_t tx_csum_bad_ethtype;
90 uint64_t tx_tso_bad_ethtype;
91 uint64_t tx_task_rescheduled;
96 struct ifnet *vtnet_ifp;
97 struct lwkt_serialize vtnet_slz;
100 #define VTNET_FLAG_LINK 0x0001
101 #define VTNET_FLAG_SUSPENDED 0x0002
102 #define VTNET_FLAG_MAC 0x0004
103 #define VTNET_FLAG_CTRL_VQ 0x0008
104 #define VTNET_FLAG_CTRL_RX 0x0010
105 #define VTNET_FLAG_CTRL_MAC 0x0020
106 #define VTNET_FLAG_VLAN_FILTER 0x0040
107 #define VTNET_FLAG_TSO_ECN 0x0080
108 #define VTNET_FLAG_MRG_RXBUFS 0x0100
109 #define VTNET_FLAG_LRO_NOMRG 0x0200
111 struct virtqueue *vtnet_rx_vq;
112 struct virtqueue *vtnet_tx_vq;
113 struct virtqueue *vtnet_ctrl_vq;
115 struct vtnet_tx_header *vtnet_txhdrarea;
116 uint32_t vtnet_txhdridx;
117 struct vtnet_mac_filter *vtnet_macfilter;
122 int vtnet_rx_process_limit;
123 int vtnet_rx_mbuf_size;
124 int vtnet_rx_mbuf_count;
126 int vtnet_watchdog_timer;
127 uint64_t vtnet_features;
129 struct task vtnet_cfgchg_task;
131 struct vtnet_statistics vtnet_stats;
133 struct callout vtnet_tick_ch;
135 eventhandler_tag vtnet_vlan_attach;
136 eventhandler_tag vtnet_vlan_detach;
138 struct ifmedia vtnet_media;
140 * Fake media type; the host does not provide us with
141 * any real media information.
143 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_1000_T | IFM_FDX)
144 char vtnet_hwaddr[ETHER_ADDR_LEN];
147 * During reset, the host's VLAN filtering table is lost. The
148 * array below is used to restore all the VLANs configured on
149 * this interface after a reset.
151 #define VTNET_VLAN_SHADOW_SIZE (4096 / 32)
153 uint32_t vtnet_vlan_shadow[VTNET_VLAN_SHADOW_SIZE];
155 char vtnet_mtx_name[16];
159 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
160 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
161 * both keep the VirtIO header and the data non-contiguous and to keep the
162 * frame's payload 4 byte aligned.
164 * When mergeable buffers are negotiated, the host puts the VirtIO header in
165 * the beginning of the first mbuf's data.
167 #define VTNET_RX_HEADER_PAD 4
168 struct vtnet_rx_header {
169 struct virtio_net_hdr vrh_hdr;
170 char vrh_pad[VTNET_RX_HEADER_PAD];
174 * For each outgoing frame, the vtnet_tx_header below is allocated from
175 * the vtnet_tx_header_zone.
177 struct vtnet_tx_header {
179 struct virtio_net_hdr hdr;
180 struct virtio_net_hdr_mrg_rxbuf mhdr;
183 struct mbuf *vth_mbuf;
186 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
189 * The VirtIO specification does not place a limit on the number of MAC
190 * addresses the guest driver may request to be filtered. In practice,
191 * the host is constrained by available resources. To simplify this driver,
192 * impose a reasonably high limit of MAC addresses we will filter before
193 * falling back to promiscuous or all-multicast modes.
195 #define VTNET_MAX_MAC_ENTRIES 128
197 struct vtnet_mac_table {
199 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
202 struct vtnet_mac_filter {
203 struct vtnet_mac_table vmf_unicast;
204 uint32_t vmf_pad; /* Make tables non-contiguous. */
205 struct vtnet_mac_table vmf_multicast;
208 #define VTNET_WATCHDOG_TIMEOUT 5
209 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
211 /* Features desired/implemented by this driver. */
212 #define VTNET_FEATURES \
213 (VIRTIO_NET_F_MAC | \
214 VIRTIO_NET_F_STATUS | \
215 VIRTIO_NET_F_CTRL_VQ | \
216 VIRTIO_NET_F_CTRL_RX | \
217 VIRTIO_NET_F_CTRL_MAC_ADDR | \
218 VIRTIO_NET_F_CTRL_VLAN | \
219 VIRTIO_NET_F_CSUM | \
220 VIRTIO_NET_F_HOST_TSO4 | \
221 VIRTIO_NET_F_HOST_TSO6 | \
222 VIRTIO_NET_F_HOST_ECN | \
223 VIRTIO_NET_F_GUEST_CSUM | \
224 VIRTIO_NET_F_GUEST_TSO4 | \
225 VIRTIO_NET_F_GUEST_TSO6 | \
226 VIRTIO_NET_F_GUEST_ECN | \
227 VIRTIO_NET_F_MRG_RXBUF)
230 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
231 * frames larger than 1514 bytes. We do not yet support software LRO
234 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
235 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
237 #define VTNET_MAX_MTU 65536
238 #define VTNET_MAX_RX_SIZE 65550
241 * Used to preallocate the Vq indirect descriptors. The first segment
242 * is reserved for the header.
244 #define VTNET_MIN_RX_SEGS 2
245 #define VTNET_MAX_RX_SEGS 34
246 #define VTNET_MAX_TX_SEGS 34
248 #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */
249 #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */
250 #define IFCAP_VLAN_HWTSO 0x40000 /* can do IFCAP_TSO on VLANs */
254 * Assert we can receive and transmit the maximum with regular
257 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
258 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
261 * Determine how many mbufs are in each receive buffer. For LRO without
262 * mergeable descriptors, we must allocate an mbuf chain large enough to
263 * hold both the vtnet_rx_header and the maximum receivable data.
265 #define VTNET_NEEDED_RX_MBUFS(_sc) \
266 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
267 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
268 (_sc)->vtnet_rx_mbuf_size)
270 static int vtnet_modevent(module_t, int, void *);
272 static int vtnet_probe(device_t);
273 static int vtnet_attach(device_t);
274 static int vtnet_detach(device_t);
275 static int vtnet_suspend(device_t);
276 static int vtnet_resume(device_t);
277 static int vtnet_shutdown(device_t);
278 static int vtnet_config_change(device_t);
280 static void vtnet_negotiate_features(struct vtnet_softc *);
281 static int vtnet_alloc_virtqueues(struct vtnet_softc *);
282 static void vtnet_get_hwaddr(struct vtnet_softc *);
283 static void vtnet_set_hwaddr(struct vtnet_softc *);
284 static int vtnet_is_link_up(struct vtnet_softc *);
285 static void vtnet_update_link_status(struct vtnet_softc *);
287 static void vtnet_watchdog(struct vtnet_softc *);
289 static void vtnet_config_change_task(void *, int);
290 static int vtnet_setup_interface(struct vtnet_softc *);
291 static int vtnet_change_mtu(struct vtnet_softc *, int);
292 static int vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
294 static int vtnet_init_rx_vq(struct vtnet_softc *);
295 static void vtnet_free_rx_mbufs(struct vtnet_softc *);
296 static void vtnet_free_tx_mbufs(struct vtnet_softc *);
297 static void vtnet_free_ctrl_vq(struct vtnet_softc *);
299 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
301 static int vtnet_replace_rxbuf(struct vtnet_softc *,
303 static int vtnet_newbuf(struct vtnet_softc *);
304 static void vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
305 static void vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
306 static int vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
307 static void vtnet_vlan_tag_remove(struct mbuf *);
308 static int vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
309 struct virtio_net_hdr *);
310 static int vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
311 static int vtnet_rxeof(struct vtnet_softc *, int, int *);
312 static void vtnet_rx_intr_task(void *);
313 static int vtnet_rx_vq_intr(void *);
315 static void vtnet_txeof(struct vtnet_softc *);
316 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
317 struct virtio_net_hdr *);
318 static int vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
319 struct vtnet_tx_header *);
320 static int vtnet_encap(struct vtnet_softc *, struct mbuf **);
321 static void vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
322 static void vtnet_start(struct ifnet *, struct ifaltq_subque *);
323 static void vtnet_tick(void *);
324 static void vtnet_tx_intr_task(void *);
325 static int vtnet_tx_vq_intr(void *);
327 static void vtnet_stop(struct vtnet_softc *);
328 static int vtnet_virtio_reinit(struct vtnet_softc *);
329 static void vtnet_init_locked(struct vtnet_softc *);
330 static void vtnet_init(void *);
332 static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
333 struct sglist *, int, int);
335 static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
336 static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
337 static int vtnet_set_promisc(struct vtnet_softc *, int);
338 static int vtnet_set_allmulti(struct vtnet_softc *, int);
339 static void vtnet_rx_filter(struct vtnet_softc *sc);
340 static void vtnet_rx_filter_mac(struct vtnet_softc *);
342 static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
343 static void vtnet_rx_filter_vlan(struct vtnet_softc *);
344 static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
345 static void vtnet_register_vlan(void *, struct ifnet *, uint16_t);
346 static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
348 static int vtnet_ifmedia_upd(struct ifnet *);
349 static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
351 static void vtnet_add_statistics(struct vtnet_softc *);
353 static int vtnet_enable_rx_intr(struct vtnet_softc *);
354 static int vtnet_enable_tx_intr(struct vtnet_softc *);
355 static void vtnet_disable_rx_intr(struct vtnet_softc *);
356 static void vtnet_disable_tx_intr(struct vtnet_softc *);
359 static int vtnet_csum_disable = 0;
360 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
361 static int vtnet_tso_disable = 1;
362 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
363 static int vtnet_lro_disable = 1;
364 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
367 * Reducing the number of transmit completed interrupts can
368 * improve performance. To do so, the define below keeps the
369 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
370 * in the start and watchdog paths. The price to pay for this
371 * is the m_free'ing of transmitted mbufs may be delayed until
372 * the watchdog fires.
374 #define VTNET_TX_INTR_MODERATION
376 static struct virtio_feature_desc vtnet_feature_desc[] = {
377 { VIRTIO_NET_F_CSUM, "TxChecksum" },
378 { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" },
379 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload" },
380 { VIRTIO_NET_F_MAC, "MacAddress" },
381 { VIRTIO_NET_F_GSO, "TxAllGSO" },
382 { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" },
383 { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" },
384 { VIRTIO_NET_F_GUEST_ECN, "RxECN" },
385 { VIRTIO_NET_F_GUEST_UFO, "RxUFO" },
386 { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" },
387 { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" },
388 { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" },
389 { VIRTIO_NET_F_HOST_UFO, "TxUFO" },
390 { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" },
391 { VIRTIO_NET_F_STATUS, "Status" },
392 { VIRTIO_NET_F_CTRL_VQ, "ControlVq" },
393 { VIRTIO_NET_F_CTRL_RX, "RxMode" },
394 { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" },
395 { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" },
396 { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" },
397 { VIRTIO_NET_F_MQ, "RFS" },
398 { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" },
402 static device_method_t vtnet_methods[] = {
403 /* Device methods. */
404 DEVMETHOD(device_probe, vtnet_probe),
405 DEVMETHOD(device_attach, vtnet_attach),
406 DEVMETHOD(device_detach, vtnet_detach),
407 DEVMETHOD(device_suspend, vtnet_suspend),
408 DEVMETHOD(device_resume, vtnet_resume),
409 DEVMETHOD(device_shutdown, vtnet_shutdown),
411 /* VirtIO methods. */
412 DEVMETHOD(virtio_config_change, vtnet_config_change),
417 static driver_t vtnet_driver = {
420 sizeof(struct vtnet_softc)
423 static devclass_t vtnet_devclass;
425 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass,
427 MODULE_VERSION(vtnet, 1);
428 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
431 vtnet_modevent(module_t mod, int type, void *unused)
453 vtnet_probe(device_t dev)
455 if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
458 device_set_desc(dev, "VirtIO Networking Adapter");
460 return (BUS_PROBE_DEFAULT);
464 vtnet_attach(device_t dev)
466 struct vtnet_softc *sc;
469 sc = device_get_softc(dev);
472 lwkt_serialize_init(&sc->vtnet_slz);
473 callout_init(&sc->vtnet_tick_ch);
475 ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
477 ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
478 ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
480 vtnet_add_statistics(sc);
482 /* Register our feature descriptions. */
483 virtio_set_feature_desc(dev, vtnet_feature_desc);
484 vtnet_negotiate_features(sc);
486 if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
487 /* This feature should always be negotiated. */
488 sc->vtnet_flags |= VTNET_FLAG_MAC;
491 if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
492 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
493 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
495 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
498 sc->vtnet_rx_mbuf_size = MCLBYTES;
499 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
501 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
502 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
504 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
505 sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
506 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
507 sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
508 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
509 virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
510 sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
513 /* Read (or generate) the MAC address for the adapter. */
514 vtnet_get_hwaddr(sc);
516 error = vtnet_alloc_virtqueues(sc);
518 device_printf(dev, "cannot allocate virtqueues\n");
522 error = vtnet_setup_interface(sc);
524 device_printf(dev, "cannot setup interface\n");
528 TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
530 error = virtio_setup_intr(dev, &sc->vtnet_slz);
532 device_printf(dev, "cannot setup virtqueue interrupts\n");
533 ether_ifdetach(sc->vtnet_ifp);
538 * Device defaults to promiscuous mode for backwards
539 * compatibility. Turn it off if possible.
541 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
542 lwkt_serialize_enter(&sc->vtnet_slz);
543 if (vtnet_set_promisc(sc, 0) != 0) {
544 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
546 "cannot disable promiscuous mode\n");
548 lwkt_serialize_exit(&sc->vtnet_slz);
550 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
560 vtnet_detach(device_t dev)
562 struct vtnet_softc *sc;
565 sc = device_get_softc(dev);
568 if (device_is_attached(dev)) {
569 lwkt_serialize_enter(&sc->vtnet_slz);
571 lwkt_serialize_exit(&sc->vtnet_slz);
573 callout_stop(&sc->vtnet_tick_ch);
574 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
579 if (sc->vtnet_vlan_attach != NULL) {
580 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
581 sc->vtnet_vlan_attach = NULL;
583 if (sc->vtnet_vlan_detach != NULL) {
584 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
585 sc->vtnet_vlan_detach = NULL;
590 sc->vtnet_ifp = NULL;
593 if (sc->vtnet_rx_vq != NULL)
594 vtnet_free_rx_mbufs(sc);
595 if (sc->vtnet_tx_vq != NULL)
596 vtnet_free_tx_mbufs(sc);
597 if (sc->vtnet_ctrl_vq != NULL)
598 vtnet_free_ctrl_vq(sc);
600 if (sc->vtnet_txhdrarea != NULL) {
601 contigfree(sc->vtnet_txhdrarea,
602 ((sc->vtnet_tx_size / 2) + 1) *
603 sizeof(struct vtnet_tx_header), M_VTNET);
604 sc->vtnet_txhdrarea = NULL;
606 if (sc->vtnet_macfilter != NULL) {
607 contigfree(sc->vtnet_macfilter,
608 sizeof(struct vtnet_mac_filter), M_DEVBUF);
609 sc->vtnet_macfilter = NULL;
612 ifmedia_removeall(&sc->vtnet_media);
618 vtnet_suspend(device_t dev)
620 struct vtnet_softc *sc;
622 sc = device_get_softc(dev);
624 lwkt_serialize_enter(&sc->vtnet_slz);
626 sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
627 lwkt_serialize_exit(&sc->vtnet_slz);
633 vtnet_resume(device_t dev)
635 struct vtnet_softc *sc;
638 sc = device_get_softc(dev);
641 lwkt_serialize_enter(&sc->vtnet_slz);
642 if (ifp->if_flags & IFF_UP)
643 vtnet_init_locked(sc);
644 sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
645 lwkt_serialize_exit(&sc->vtnet_slz);
651 vtnet_shutdown(device_t dev)
655 * Suspend already does all of what we need to
656 * do here; we just never expect to be resumed.
658 return (vtnet_suspend(dev));
662 vtnet_config_change(device_t dev)
664 struct vtnet_softc *sc;
666 sc = device_get_softc(dev);
668 taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
674 vtnet_negotiate_features(struct vtnet_softc *sc)
677 uint64_t mask, features;
682 if (vtnet_csum_disable)
683 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
686 * TSO and LRO are only available when their corresponding checksum
687 * offload feature is also negotiated.
690 if (vtnet_csum_disable || vtnet_tso_disable)
691 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
692 VIRTIO_NET_F_HOST_ECN;
694 if (vtnet_csum_disable || vtnet_lro_disable)
695 mask |= VTNET_LRO_FEATURES;
697 features = VTNET_FEATURES & ~mask;
698 features |= VIRTIO_F_NOTIFY_ON_EMPTY;
699 features |= VIRTIO_F_ANY_LAYOUT;
700 sc->vtnet_features = virtio_negotiate_features(dev, features);
702 if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
703 virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
705 * LRO without mergeable buffers requires special care. This
706 * is not ideal because every receive buffer must be large
707 * enough to hold the maximum TCP packet, the Ethernet header,
708 * and the header. This requires up to 34 descriptors with
709 * MCLBYTES clusters. If we do not have indirect descriptors,
710 * LRO is disabled since the virtqueue will not contain very
711 * many receive buffers.
713 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
715 "LRO disabled due to both mergeable buffers and "
716 "indirect descriptors not negotiated\n");
718 features &= ~VTNET_LRO_FEATURES;
720 virtio_negotiate_features(dev, features);
722 sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
727 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
730 struct vq_alloc_info vq_info[3];
737 * Indirect descriptors are not needed for the Rx
738 * virtqueue when mergeable buffers are negotiated.
739 * The header is placed inline with the data, not
740 * in a separate descriptor, and mbuf clusters are
741 * always physically contiguous.
743 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
744 rxsegs = sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG ?
745 VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
749 VQ_ALLOC_INFO_INIT(&vq_info[0], rxsegs,
750 vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
751 "%s receive", device_get_nameunit(dev));
753 VQ_ALLOC_INFO_INIT(&vq_info[1], VTNET_MAX_TX_SEGS,
754 vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
755 "%s transmit", device_get_nameunit(dev));
757 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
760 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
761 &sc->vtnet_ctrl_vq, "%s control",
762 device_get_nameunit(dev));
765 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
769 vtnet_setup_interface(struct vtnet_softc *sc)
777 ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
779 device_printf(dev, "cannot allocate ifnet structure\n");
784 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
785 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
786 ifp->if_init = vtnet_init;
787 ifp->if_start = vtnet_start;
788 ifp->if_ioctl = vtnet_ioctl;
790 sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
791 sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
793 tx_size = virtqueue_size(sc->vtnet_tx_vq);
794 sc->vtnet_tx_size = tx_size;
795 sc->vtnet_txhdridx = 0;
796 sc->vtnet_txhdrarea = contigmalloc(
797 ((sc->vtnet_tx_size / 2) + 1) * sizeof(struct vtnet_tx_header),
798 M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
799 if (sc->vtnet_txhdrarea == NULL) {
800 device_printf(dev, "cannot contigmalloc the tx headers\n");
803 sc->vtnet_macfilter = contigmalloc(
804 sizeof(struct vtnet_mac_filter),
805 M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
806 if (sc->vtnet_macfilter == NULL) {
808 "cannot contigmalloc the mac filter table\n");
811 ifq_set_maxlen(&ifp->if_snd, tx_size - 1);
812 ifq_set_ready(&ifp->if_snd);
814 ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
816 if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){
817 //ifp->if_capabilities |= IFCAP_LINKSTATE;
818 kprintf("add dynamic link state\n");
821 /* Tell the upper layer(s) we support long frames. */
822 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
823 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
825 if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
826 ifp->if_capabilities |= IFCAP_TXCSUM;
828 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
829 ifp->if_capabilities |= IFCAP_TSO4;
830 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
831 ifp->if_capabilities |= IFCAP_TSO6;
832 if (ifp->if_capabilities & IFCAP_TSO)
833 ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
835 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
836 sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
839 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
840 ifp->if_capabilities |= IFCAP_RXCSUM;
842 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
843 virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
844 ifp->if_capabilities |= IFCAP_LRO;
847 if (ifp->if_capabilities & IFCAP_HWCSUM) {
849 * VirtIO does not support VLAN tagging, but we can fake
850 * it by inserting and removing the 802.1Q header during
851 * transmit and receive. We are then able to do checksum
852 * offloading of VLAN frames.
854 ifp->if_capabilities |=
855 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
858 ifp->if_capenable = ifp->if_capabilities;
861 * Capabilities after here are not enabled by default.
864 if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
865 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
867 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
868 vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
869 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
870 vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
877 vtnet_set_hwaddr(struct vtnet_softc *sc)
883 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
884 (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
885 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
886 device_printf(dev, "unable to set MAC address\n");
887 } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
888 virtio_write_device_config(dev,
889 offsetof(struct virtio_net_config, mac),
890 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
895 vtnet_get_hwaddr(struct vtnet_softc *sc)
901 if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
903 * Generate a random locally administered unicast address.
905 * It would be nice to generate the same MAC address across
906 * reboots, but it seems all the hosts currently available
907 * support the MAC feature, so this isn't too important.
909 sc->vtnet_hwaddr[0] = 0xB2;
910 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
911 vtnet_set_hwaddr(sc);
915 virtio_read_device_config(dev,
916 offsetof(struct virtio_net_config, mac),
917 sc->vtnet_hwaddr, ETHER_ADDR_LEN);
921 vtnet_is_link_up(struct vtnet_softc *sc)
930 ASSERT_SERIALIZED(&sc->vtnet_slz);
932 status = virtio_read_dev_config_2(dev,
933 offsetof(struct virtio_net_config, status));
935 return ((status & VIRTIO_NET_S_LINK_UP) != 0);
939 vtnet_update_link_status(struct vtnet_softc *sc)
943 struct ifaltq_subque *ifsq;
948 ifsq = ifq_get_subq_default(&ifp->if_snd);
950 link = vtnet_is_link_up(sc);
952 if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
953 sc->vtnet_flags |= VTNET_FLAG_LINK;
955 device_printf(dev, "Link is up\n");
956 ifp->if_link_state = LINK_STATE_UP;
957 if_link_state_change(ifp);
958 if (!ifsq_is_empty(ifsq))
959 vtnet_start_locked(ifp, ifsq);
960 } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
961 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
963 device_printf(dev, "Link is down\n");
965 ifp->if_link_state = LINK_STATE_DOWN;
966 if_link_state_change(ifp);
972 vtnet_watchdog(struct vtnet_softc *sc)
978 #ifdef VTNET_TX_INTR_MODERATION
982 if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
985 if_printf(ifp, "watchdog timeout -- resetting\n");
987 virtqueue_dump(sc->vtnet_tx_vq);
990 ifp->if_flags &= ~IFF_RUNNING;
991 vtnet_init_locked(sc);
996 vtnet_config_change_task(void *arg, int pending)
998 struct vtnet_softc *sc;
1002 lwkt_serialize_enter(&sc->vtnet_slz);
1003 vtnet_update_link_status(sc);
1004 lwkt_serialize_exit(&sc->vtnet_slz);
1008 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
1010 struct vtnet_softc *sc;
1012 int reinit, mask, error;
1015 ifr = (struct ifreq *) data;
1021 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
1023 else if (ifp->if_mtu != ifr->ifr_mtu) {
1024 lwkt_serialize_enter(&sc->vtnet_slz);
1025 error = vtnet_change_mtu(sc, ifr->ifr_mtu);
1026 lwkt_serialize_exit(&sc->vtnet_slz);
1031 lwkt_serialize_enter(&sc->vtnet_slz);
1032 if ((ifp->if_flags & IFF_UP) == 0) {
1033 if (ifp->if_flags & IFF_RUNNING)
1035 } else if (ifp->if_flags & IFF_RUNNING) {
1036 if ((ifp->if_flags ^ sc->vtnet_if_flags) &
1037 (IFF_PROMISC | IFF_ALLMULTI)) {
1038 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
1039 vtnet_rx_filter(sc);
1044 vtnet_init_locked(sc);
1047 sc->vtnet_if_flags = ifp->if_flags;
1048 lwkt_serialize_exit(&sc->vtnet_slz);
1053 lwkt_serialize_enter(&sc->vtnet_slz);
1054 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
1055 (ifp->if_flags & IFF_RUNNING))
1056 vtnet_rx_filter_mac(sc);
1057 lwkt_serialize_exit(&sc->vtnet_slz);
1062 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
1066 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1068 lwkt_serialize_enter(&sc->vtnet_slz);
1070 if (mask & IFCAP_TXCSUM) {
1071 ifp->if_capenable ^= IFCAP_TXCSUM;
1072 if (ifp->if_capenable & IFCAP_TXCSUM)
1073 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
1075 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
1078 if (mask & IFCAP_TSO4) {
1079 ifp->if_capenable ^= IFCAP_TSO4;
1080 if (ifp->if_capenable & IFCAP_TSO4)
1081 ifp->if_hwassist |= CSUM_TSO;
1083 ifp->if_hwassist &= ~CSUM_TSO;
1086 if (mask & IFCAP_RXCSUM) {
1087 ifp->if_capenable ^= IFCAP_RXCSUM;
1091 if (mask & IFCAP_LRO) {
1092 ifp->if_capenable ^= IFCAP_LRO;
1096 if (mask & IFCAP_VLAN_HWFILTER) {
1097 ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
1101 if (mask & IFCAP_VLAN_HWTSO)
1102 ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
1104 if (mask & IFCAP_VLAN_HWTAGGING)
1105 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
1107 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
1108 ifp->if_flags &= ~IFF_RUNNING;
1109 vtnet_init_locked(sc);
1111 //VLAN_CAPABILITIES(ifp);
1113 lwkt_serialize_exit(&sc->vtnet_slz);
1117 error = ether_ioctl(ifp, cmd, data);
1125 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
1128 int new_frame_size, clsize;
1130 ifp = sc->vtnet_ifp;
1132 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1133 new_frame_size = sizeof(struct vtnet_rx_header) +
1134 sizeof(struct ether_vlan_header) + new_mtu;
1136 if (new_frame_size > MJUM9BYTES)
1139 if (new_frame_size <= MCLBYTES)
1142 clsize = MJUM9BYTES;
1144 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
1145 sizeof(struct ether_vlan_header) + new_mtu;
1147 if (new_frame_size <= MCLBYTES)
1150 clsize = MJUMPAGESIZE;
1153 sc->vtnet_rx_mbuf_size = clsize;
1154 sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
1155 KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
1156 ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
1158 ifp->if_mtu = new_mtu;
1160 if (ifp->if_flags & IFF_RUNNING) {
1161 ifp->if_flags &= ~IFF_RUNNING;
1162 vtnet_init_locked(sc);
1169 vtnet_init_rx_vq(struct vtnet_softc *sc)
1171 struct virtqueue *vq;
1174 vq = sc->vtnet_rx_vq;
1178 while (!virtqueue_full(vq)) {
1179 if ((error = vtnet_newbuf(sc)) != 0)
1185 virtqueue_notify(vq, &sc->vtnet_slz);
1188 * EMSGSIZE signifies the virtqueue did not have enough
1189 * entries available to hold the last mbuf. This is not
1190 * an error. We should not get ENOSPC since we check if
1191 * the virtqueue is full before attempting to add a
1194 if (error == EMSGSIZE)
1202 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1204 struct virtqueue *vq;
1208 vq = sc->vtnet_rx_vq;
1211 while ((m = virtqueue_drain(vq, &last)) != NULL)
1214 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1218 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1220 struct virtqueue *vq;
1221 struct vtnet_tx_header *txhdr;
1224 vq = sc->vtnet_tx_vq;
1227 while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1228 m_freem(txhdr->vth_mbuf);
1231 KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1235 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1238 * The control virtqueue is only polled, therefore
1239 * it should already be empty.
1241 KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1242 ("Ctrl Vq not empty"));
1245 static struct mbuf *
1246 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1248 struct mbuf *m_head, *m_tail, *m;
1251 clsize = sc->vtnet_rx_mbuf_size;
1253 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/
1254 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1255 m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1259 m_head->m_len = clsize;
1263 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1264 ("chained Rx mbuf requested without LRO_NOMRG"));
1266 for (i = 0; i < nbufs - 1; i++) {
1267 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1268 m = m_getcl(M_NOWAIT, MT_DATA, 0);
1278 if (m_tailp != NULL)
1284 sc->vtnet_stats.mbuf_alloc_failed++;
1291 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1293 struct mbuf *m, *m_prev;
1294 struct mbuf *m_new, *m_tail;
1295 int len, clsize, nreplace, error;
1302 clsize = sc->vtnet_rx_mbuf_size;
1305 if (m->m_next != NULL)
1306 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1307 ("chained Rx mbuf without LRO_NOMRG"));
1310 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1311 * allocating an entire chain for each received frame. When
1312 * the received frame's length is less than that of the chain,
1313 * the unused mbufs are reassigned to the new chain.
1317 * Something is seriously wrong if we received
1318 * a frame larger than the mbuf chain. Drop it.
1321 sc->vtnet_stats.rx_frame_too_large++;
1325 KASSERT(m->m_len == clsize,
1326 ("mbuf length not expected cluster size: %d",
1329 m->m_len = MIN(m->m_len, len);
1337 KASSERT(m_prev != NULL, ("m_prev == NULL"));
1338 KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1339 ("too many replacement mbufs: %d/%d", nreplace,
1340 sc->vtnet_rx_mbuf_count));
1342 m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1343 if (m_new == NULL) {
1344 m_prev->m_len = clsize;
1349 * Move unused mbufs, if any, from the original chain
1350 * onto the end of the new chain.
1352 if (m_prev->m_next != NULL) {
1353 m_tail->m_next = m_prev->m_next;
1354 m_prev->m_next = NULL;
1357 error = vtnet_enqueue_rxbuf(sc, m_new);
1360 * BAD! We could not enqueue the replacement mbuf chain. We
1361 * must restore the m0 chain to the original state if it was
1362 * modified so we can subsequently discard it.
1364 * NOTE: The replacement is suppose to be an identical copy
1365 * to the one just dequeued so this is an unexpected error.
1367 sc->vtnet_stats.rx_enq_replacement_failed++;
1369 if (m_tail->m_next != NULL) {
1370 m_prev->m_next = m_tail->m_next;
1371 m_tail->m_next = NULL;
1374 m_prev->m_len = clsize;
1382 vtnet_newbuf(struct vtnet_softc *sc)
1387 m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1391 error = vtnet_enqueue_rxbuf(sc, m);
1399 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1401 struct virtqueue *vq;
1404 vq = sc->vtnet_rx_vq;
1406 while (--nbufs > 0) {
1407 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1409 vtnet_discard_rxbuf(sc, m);
1414 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1419 * Requeue the discarded mbuf. This should always be
1420 * successful since it was just dequeued.
1422 error = vtnet_enqueue_rxbuf(sc, m);
1423 KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1427 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1430 struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1431 struct vtnet_rx_header *rxhdr;
1432 struct virtio_net_hdr *hdr;
1436 ASSERT_SERIALIZED(&sc->vtnet_slz);
1437 if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1438 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1440 sglist_init(&sg, VTNET_MAX_RX_SEGS, segs);
1442 mdata = mtod(m, uint8_t *);
1445 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1446 rxhdr = (struct vtnet_rx_header *) mdata;
1447 hdr = &rxhdr->vrh_hdr;
1448 offset += sizeof(struct vtnet_rx_header);
1450 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1451 KASSERT(error == 0, ("cannot add header to sglist"));
1454 error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1458 if (m->m_next != NULL) {
1459 error = sglist_append_mbuf(&sg, m->m_next);
1464 return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1468 vtnet_vlan_tag_remove(struct mbuf *m)
1470 struct ether_vlan_header *evl;
1472 evl = mtod(m, struct ether_vlan_header *);
1474 m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1475 m->m_flags |= M_VLANTAG;
1477 /* Strip the 802.1Q header. */
1478 bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1479 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1480 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1484 * Alternative method of doing receive checksum offloading. Rather
1485 * than parsing the received frame down to the IP header, use the
1486 * csum_offset to determine which CSUM_* flags are appropriate. We
1487 * can get by with doing this only because the checksum offsets are
1488 * unique for the things we care about.
1491 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1492 struct virtio_net_hdr *hdr)
1494 struct ether_header *eh;
1495 struct ether_vlan_header *evh;
1500 csum_len = hdr->csum_start + hdr->csum_offset;
1502 if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1504 if (m->m_len < csum_len)
1507 eh = mtod(m, struct ether_header *);
1508 eth_type = ntohs(eh->ether_type);
1509 if (eth_type == ETHERTYPE_VLAN) {
1510 evh = mtod(m, struct ether_vlan_header *);
1511 eth_type = ntohs(evh->evl_proto);
1514 if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1515 sc->vtnet_stats.rx_csum_bad_ethtype++;
1519 /* Use the offset to determine the appropriate CSUM_* flags. */
1520 switch (hdr->csum_offset) {
1521 case offsetof(struct udphdr, uh_sum):
1522 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1524 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1525 if (udp->uh_sum == 0)
1530 case offsetof(struct tcphdr, th_sum):
1531 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1532 m->m_pkthdr.csum_data = 0xFFFF;
1536 sc->vtnet_stats.rx_csum_bad_offset++;
1540 sc->vtnet_stats.rx_csum_offloaded++;
1546 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1549 struct virtqueue *vq;
1550 struct mbuf *m, *m_tail;
1553 ifp = sc->vtnet_ifp;
1554 vq = sc->vtnet_rx_vq;
1557 while (--nbufs > 0) {
1558 m = virtqueue_dequeue(vq, &len);
1564 if (vtnet_newbuf(sc) != 0) {
1566 vtnet_discard_rxbuf(sc, m);
1568 vtnet_discard_merged_rxbuf(sc, nbufs);
1576 m->m_flags &= ~M_PKTHDR;
1578 m_head->m_pkthdr.len += len;
1586 sc->vtnet_stats.rx_mergeable_failed++;
1593 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1595 struct virtio_net_hdr lhdr;
1597 struct virtqueue *vq;
1599 struct ether_header *eh;
1600 struct virtio_net_hdr *hdr;
1601 struct virtio_net_hdr_mrg_rxbuf *mhdr;
1602 int len, deq, nbufs, adjsz, rx_npkts;
1604 ifp = sc->vtnet_ifp;
1605 vq = sc->vtnet_rx_vq;
1610 ASSERT_SERIALIZED(&sc->vtnet_slz);
1612 while (--count >= 0) {
1613 m = virtqueue_dequeue(vq, &len);
1618 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1620 vtnet_discard_rxbuf(sc, m);
1624 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1626 adjsz = sizeof(struct vtnet_rx_header);
1628 * Account for our pad between the header and
1629 * the actual start of the frame.
1631 len += VTNET_RX_HEADER_PAD;
1633 mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1634 nbufs = mhdr->num_buffers;
1635 adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1638 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1640 vtnet_discard_rxbuf(sc, m);
1642 vtnet_discard_merged_rxbuf(sc, nbufs);
1646 m->m_pkthdr.len = len;
1647 m->m_pkthdr.rcvif = ifp;
1648 m->m_pkthdr.csum_flags = 0;
1651 if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1658 * Save copy of header before we strip it. For both mergeable
1659 * and non-mergeable, the VirtIO header is placed first in the
1660 * mbuf's data. We no longer need num_buffers, so always use a
1663 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1666 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1667 eh = mtod(m, struct ether_header *);
1668 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1669 vtnet_vlan_tag_remove(m);
1672 * With the 802.1Q header removed, update the
1673 * checksum starting location accordingly.
1675 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1677 ETHER_VLAN_ENCAP_LEN;
1681 if (ifp->if_capenable & IFCAP_RXCSUM &&
1682 hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1683 if (vtnet_rx_csum(sc, m, hdr) != 0)
1684 sc->vtnet_stats.rx_csum_failed++;
1687 lwkt_serialize_exit(&sc->vtnet_slz);
1689 ifp->if_input(ifp, m, NULL, -1);
1690 lwkt_serialize_enter(&sc->vtnet_slz);
1693 * The interface may have been stopped while we were
1694 * passing the packet up the network stack.
1696 if ((ifp->if_flags & IFF_RUNNING) == 0)
1700 virtqueue_notify(vq, &sc->vtnet_slz);
1702 if (rx_npktsp != NULL)
1703 *rx_npktsp = rx_npkts;
1705 return (count > 0 ? 0 : EAGAIN);
1709 vtnet_rx_intr_task(void *arg)
1711 struct vtnet_softc *sc;
1716 ifp = sc->vtnet_ifp;
1719 // lwkt_serialize_enter(&sc->vtnet_slz);
1721 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1722 vtnet_enable_rx_intr(sc);
1723 // lwkt_serialize_exit(&sc->vtnet_slz);
1727 more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1728 if (!more && vtnet_enable_rx_intr(sc) != 0) {
1729 vtnet_disable_rx_intr(sc);
1733 // lwkt_serialize_exit(&sc->vtnet_slz);
1736 sc->vtnet_stats.rx_task_rescheduled++;
1742 vtnet_rx_vq_intr(void *xsc)
1744 struct vtnet_softc *sc;
1748 vtnet_disable_rx_intr(sc);
1749 vtnet_rx_intr_task(sc);
1755 vtnet_txeof(struct vtnet_softc *sc)
1757 struct virtqueue *vq;
1759 struct vtnet_tx_header *txhdr;
1762 vq = sc->vtnet_tx_vq;
1763 ifp = sc->vtnet_ifp;
1766 ASSERT_SERIALIZED(&sc->vtnet_slz);
1768 while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1771 m_freem(txhdr->vth_mbuf);
1775 ifq_clr_oactive(&ifp->if_snd);
1776 if (virtqueue_empty(vq))
1777 sc->vtnet_watchdog_timer = 0;
1781 static struct mbuf *
1782 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1783 struct virtio_net_hdr *hdr)
1786 struct ether_header *eh;
1787 struct ether_vlan_header *evh;
1789 struct ip6_hdr *ip6;
1792 uint16_t eth_type, csum_start;
1793 uint8_t ip_proto, gso_type;
1795 ifp = sc->vtnet_ifp;
1798 ip_offset = sizeof(struct ether_header);
1799 if (m->m_len < ip_offset) {
1800 if ((m = m_pullup(m, ip_offset)) == NULL)
1804 eh = mtod(m, struct ether_header *);
1805 eth_type = ntohs(eh->ether_type);
1806 if (eth_type == ETHERTYPE_VLAN) {
1807 ip_offset = sizeof(struct ether_vlan_header);
1808 if (m->m_len < ip_offset) {
1809 if ((m = m_pullup(m, ip_offset)) == NULL)
1812 evh = mtod(m, struct ether_vlan_header *);
1813 eth_type = ntohs(evh->evl_proto);
1818 if (m->m_len < ip_offset + sizeof(struct ip)) {
1819 m = m_pullup(m, ip_offset + sizeof(struct ip));
1824 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1825 ip_proto = ip->ip_p;
1826 csum_start = ip_offset + (ip->ip_hl << 2);
1827 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1830 case ETHERTYPE_IPV6:
1831 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1832 m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1837 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1839 * XXX Assume no extension headers are present. Presently,
1840 * this will always be true in the case of TSO, and FreeBSD
1841 * does not perform checksum offloading of IPv6 yet.
1843 ip_proto = ip6->ip6_nxt;
1844 csum_start = ip_offset + sizeof(struct ip6_hdr);
1845 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1852 if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1853 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1854 hdr->csum_start = csum_start;
1855 hdr->csum_offset = m->m_pkthdr.csum_data;
1857 sc->vtnet_stats.tx_csum_offloaded++;
1860 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1861 if (ip_proto != IPPROTO_TCP)
1864 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1865 m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1870 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1871 hdr->gso_type = gso_type;
1872 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1873 hdr->gso_size = m->m_pkthdr.tso_segsz;
1875 if (tcp->th_flags & TH_CWR) {
1877 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1878 * ECN support is only configurable globally with the
1879 * net.inet.tcp.ecn.enable sysctl knob.
1881 if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1882 if_printf(ifp, "TSO with ECN not supported "
1888 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1891 sc->vtnet_stats.tx_tso_offloaded++;
1898 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1899 struct vtnet_tx_header *txhdr)
1902 struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1903 struct virtqueue *vq;
1905 int collapsed, error;
1907 vq = sc->vtnet_tx_vq;
1911 sglist_init(&sg, VTNET_MAX_TX_SEGS, segs);
1912 error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1913 KASSERT(error == 0 && sg.sg_nseg == 1,
1914 ("cannot add header to sglist"));
1917 error = sglist_append_mbuf(&sg, m);
1922 //m = m_collapse(m, M_NOWAIT, VTNET_MAX_TX_SEGS - 1);
1923 m = m_defrag(m, M_NOWAIT);
1932 txhdr->vth_mbuf = m;
1934 return (virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0));
1943 static struct mbuf *
1944 vtnet_vlan_tag_insert(struct mbuf *m)
1947 struct ether_vlan_header *evl;
1949 if (M_WRITABLE(m) == 0) {
1950 n = m_dup(m, M_NOWAIT);
1952 if ((m = n) == NULL)
1956 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1959 if (m->m_len < sizeof(struct ether_vlan_header)) {
1960 m = m_pullup(m, sizeof(struct ether_vlan_header));
1965 /* Insert 802.1Q header into the existing Ethernet header. */
1966 evl = mtod(m, struct ether_vlan_header *);
1967 bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1968 (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1969 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1970 evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1971 m->m_flags &= ~M_VLANTAG;
1977 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1979 struct vtnet_tx_header *txhdr;
1980 struct virtio_net_hdr *hdr;
1984 txhdr = &sc->vtnet_txhdrarea[sc->vtnet_txhdridx];
1985 memset(txhdr, 0, sizeof(struct vtnet_tx_header));
1988 * Always use the non-mergeable header to simplify things. When
1989 * the mergeable feature is negotiated, the num_buffers field
1990 * must be set to zero. We use vtnet_hdr_size later to enqueue
1991 * the correct header size to the host.
1993 hdr = &txhdr->vth_uhdr.hdr;
1998 if (m->m_flags & M_VLANTAG) {
1999 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2000 m = vtnet_vlan_tag_insert(m);
2001 if ((*m_head = m) == NULL)
2003 m->m_flags &= ~M_VLANTAG;
2006 if (m->m_pkthdr.csum_flags != 0) {
2007 m = vtnet_tx_offload(sc, m, hdr);
2008 if ((*m_head = m) == NULL)
2012 error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
2014 sc->vtnet_txhdridx =
2015 (sc->vtnet_txhdridx + 1) % ((sc->vtnet_tx_size / 2) + 1);
2021 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2023 struct vtnet_softc *sc;
2027 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
2028 lwkt_serialize_enter(&sc->vtnet_slz);
2029 vtnet_start_locked(ifp, ifsq);
2030 lwkt_serialize_exit(&sc->vtnet_slz);
2034 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
2036 struct vtnet_softc *sc;
2037 struct virtqueue *vq;
2042 vq = sc->vtnet_tx_vq;
2045 ASSERT_SERIALIZED(&sc->vtnet_slz);
2047 if ((ifp->if_flags & (IFF_RUNNING)) !=
2048 IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
2051 #ifdef VTNET_TX_INTR_MODERATION
2052 if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
2056 while (!ifsq_is_empty(ifsq)) {
2057 if (virtqueue_full(vq)) {
2058 ifq_set_oactive(&ifp->if_snd);
2062 m0 = ifq_dequeue(&ifp->if_snd);
2066 if (vtnet_encap(sc, &m0) != 0) {
2069 ifq_prepend(&ifp->if_snd, m0);
2070 ifq_set_oactive(&ifp->if_snd);
2075 ETHER_BPF_MTAP(ifp, m0);
2079 virtqueue_notify(vq, &sc->vtnet_slz);
2080 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
2085 vtnet_tick(void *xsc)
2087 struct vtnet_softc *sc;
2092 ASSERT_SERIALIZED(&sc->vtnet_slz);
2094 virtqueue_dump(sc->vtnet_rx_vq);
2095 virtqueue_dump(sc->vtnet_tx_vq);
2099 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2104 vtnet_tx_intr_task(void *arg)
2106 struct vtnet_softc *sc;
2108 struct ifaltq_subque *ifsq;
2111 ifp = sc->vtnet_ifp;
2112 ifsq = ifq_get_subq_default(&ifp->if_snd);
2115 // lwkt_serialize_enter(&sc->vtnet_slz);
2117 if ((ifp->if_flags & IFF_RUNNING) == 0) {
2118 vtnet_enable_tx_intr(sc);
2119 // lwkt_serialize_exit(&sc->vtnet_slz);
2125 if (!ifsq_is_empty(ifsq))
2126 vtnet_start_locked(ifp, ifsq);
2128 if (vtnet_enable_tx_intr(sc) != 0) {
2129 vtnet_disable_tx_intr(sc);
2130 sc->vtnet_stats.tx_task_rescheduled++;
2131 // lwkt_serialize_exit(&sc->vtnet_slz);
2135 // lwkt_serialize_exit(&sc->vtnet_slz);
2139 vtnet_tx_vq_intr(void *xsc)
2141 struct vtnet_softc *sc;
2145 vtnet_disable_tx_intr(sc);
2146 vtnet_tx_intr_task(sc);
2152 vtnet_stop(struct vtnet_softc *sc)
2157 dev = sc->vtnet_dev;
2158 ifp = sc->vtnet_ifp;
2160 ASSERT_SERIALIZED(&sc->vtnet_slz);
2162 sc->vtnet_watchdog_timer = 0;
2163 callout_stop(&sc->vtnet_tick_ch);
2164 ifq_clr_oactive(&ifp->if_snd);
2165 ifp->if_flags &= ~(IFF_RUNNING);
2167 vtnet_disable_rx_intr(sc);
2168 vtnet_disable_tx_intr(sc);
2171 * Stop the host VirtIO adapter. Note this will reset the host
2172 * adapter's state back to the pre-initialized state, so in
2173 * order to make the device usable again, we must drive it
2174 * through virtio_reinit() and virtio_reinit_complete().
2178 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
2180 vtnet_free_rx_mbufs(sc);
2181 vtnet_free_tx_mbufs(sc);
2185 vtnet_virtio_reinit(struct vtnet_softc *sc)
2192 dev = sc->vtnet_dev;
2193 ifp = sc->vtnet_ifp;
2194 features = sc->vtnet_features;
2197 * Re-negotiate with the host, removing any disabled receive
2198 * features. Transmit features are disabled only on our side
2199 * via if_capenable and if_hwassist.
2202 if (ifp->if_capabilities & IFCAP_RXCSUM) {
2203 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2204 features &= ~VIRTIO_NET_F_GUEST_CSUM;
2207 if (ifp->if_capabilities & IFCAP_LRO) {
2208 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2209 features &= ~VTNET_LRO_FEATURES;
2212 if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2213 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2214 features &= ~VIRTIO_NET_F_CTRL_VLAN;
2217 error = virtio_reinit(dev, features);
2219 device_printf(dev, "virtio reinit error %d\n", error);
2225 vtnet_init_locked(struct vtnet_softc *sc)
2231 dev = sc->vtnet_dev;
2232 ifp = sc->vtnet_ifp;
2234 ASSERT_SERIALIZED(&sc->vtnet_slz);
2236 if (ifp->if_flags & IFF_RUNNING)
2239 /* Stop host's adapter, cancel any pending I/O. */
2242 /* Reinitialize the host device. */
2243 error = vtnet_virtio_reinit(sc);
2246 "reinitialization failed, stopping device...\n");
2251 /* Update host with assigned MAC address. */
2252 bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2253 vtnet_set_hwaddr(sc);
2255 ifp->if_hwassist = 0;
2256 if (ifp->if_capenable & IFCAP_TXCSUM)
2257 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2258 if (ifp->if_capenable & IFCAP_TSO4)
2259 ifp->if_hwassist |= CSUM_TSO;
2261 error = vtnet_init_rx_vq(sc);
2264 "cannot allocate mbufs for Rx virtqueue\n");
2269 if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2270 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2271 /* Restore promiscuous and all-multicast modes. */
2272 vtnet_rx_filter(sc);
2274 /* Restore filtered MAC addresses. */
2275 vtnet_rx_filter_mac(sc);
2278 /* Restore VLAN filters. */
2279 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2280 vtnet_rx_filter_vlan(sc);
2284 vtnet_enable_rx_intr(sc);
2285 vtnet_enable_tx_intr(sc);
2288 ifp->if_flags |= IFF_RUNNING;
2289 ifq_clr_oactive(&ifp->if_snd);
2291 virtio_reinit_complete(dev);
2293 vtnet_update_link_status(sc);
2294 callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2298 vtnet_init(void *xsc)
2300 struct vtnet_softc *sc;
2304 lwkt_serialize_enter(&sc->vtnet_slz);
2305 vtnet_init_locked(sc);
2306 lwkt_serialize_exit(&sc->vtnet_slz);
2310 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2311 struct sglist *sg, int readable, int writable)
2313 struct virtqueue *vq;
2316 vq = sc->vtnet_ctrl_vq;
2318 ASSERT_SERIALIZED(&sc->vtnet_slz);
2319 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2320 ("no control virtqueue"));
2321 KASSERT(virtqueue_empty(vq),
2322 ("control command already enqueued"));
2324 if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2327 virtqueue_notify(vq, &sc->vtnet_slz);
2330 * Poll until the command is complete. Previously, we would
2331 * sleep until the control virtqueue interrupt handler woke
2332 * us up, but dropping the VTNET_MTX leads to serialization
2335 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2336 * vectors. Two of those vectors are needed for the Rx and Tx
2337 * virtqueues. We do not support sharing both a Vq and config
2338 * changed notification on the same MSIX vector.
2340 c = virtqueue_poll(vq, NULL);
2341 KASSERT(c == cookie, ("unexpected control command response"));
2345 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2348 struct virtio_net_ctrl_hdr hdr __aligned(2);
2350 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2354 struct sglist_seg segs[3];
2358 s.hdr.class = VIRTIO_NET_CTRL_MAC;
2359 s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2360 s.ack = VIRTIO_NET_ERR;
2362 /* Copy the mac address into physically contiguous memory */
2363 memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2365 sglist_init(&sg, 3, segs);
2367 error |= sglist_append(&sg, &s.hdr,
2368 sizeof(struct virtio_net_ctrl_hdr));
2369 error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2370 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2371 KASSERT(error == 0 && sg.sg_nseg == 3,
2372 ("%s: error %d adding set MAC msg to sglist", __func__, error));
2374 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2376 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2380 vtnet_rx_filter(struct vtnet_softc *sc)
2385 dev = sc->vtnet_dev;
2386 ifp = sc->vtnet_ifp;
2388 ASSERT_SERIALIZED(&sc->vtnet_slz);
2389 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2390 ("CTRL_RX feature not negotiated"));
2392 if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2393 device_printf(dev, "cannot %s promiscuous mode\n",
2394 ifp->if_flags & IFF_PROMISC ? "enable" : "disable");
2396 if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2397 device_printf(dev, "cannot %s all-multicast mode\n",
2398 ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable");
2402 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2404 struct sglist_seg segs[3];
2407 struct virtio_net_ctrl_hdr hdr __aligned(2);
2415 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2416 ("%s: CTRL_RX feature not negotiated", __func__));
2418 s.hdr.class = VIRTIO_NET_CTRL_RX;
2421 s.ack = VIRTIO_NET_ERR;
2423 sglist_init(&sg, 3, segs);
2425 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2426 error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2427 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2428 KASSERT(error == 0 && sg.sg_nseg == 3,
2429 ("%s: error %d adding Rx message to sglist", __func__, error));
2431 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2433 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2437 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2440 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2444 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2447 return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2451 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2453 struct virtio_net_ctrl_hdr hdr __aligned(2);
2454 struct vtnet_mac_filter *filter;
2455 struct sglist_seg segs[4];
2459 struct ifaddr_container *ifac;
2460 struct ifmultiaddr *ifma;
2461 int ucnt, mcnt, promisc, allmulti, error;
2464 ifp = sc->vtnet_ifp;
2470 ASSERT_SERIALIZED(&sc->vtnet_slz);
2471 KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2472 ("%s: CTRL_RX feature not negotiated", __func__));
2474 /* Use the MAC filtering table allocated in vtnet_attach. */
2475 filter = sc->vtnet_macfilter;
2476 memset(filter, 0, sizeof(struct vtnet_mac_filter));
2478 /* Unicast MAC addresses: */
2479 //if_addr_rlock(ifp);
2480 TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2482 if (ifa->ifa_addr->sa_family != AF_LINK)
2484 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2485 sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2487 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2492 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2493 &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2496 //if_addr_runlock(ifp);
2499 filter->vmf_unicast.nentries = 0;
2500 if_printf(ifp, "more than %d MAC addresses assigned, "
2501 "falling back to promiscuous mode\n",
2502 VTNET_MAX_MAC_ENTRIES);
2504 filter->vmf_unicast.nentries = ucnt;
2506 /* Multicast MAC addresses: */
2507 //if_maddr_rlock(ifp);
2508 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2509 if (ifma->ifma_addr->sa_family != AF_LINK)
2511 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2516 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2517 &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2520 //if_maddr_runlock(ifp);
2522 if (allmulti != 0) {
2523 filter->vmf_multicast.nentries = 0;
2524 if_printf(ifp, "more than %d multicast MAC addresses "
2525 "assigned, falling back to all-multicast mode\n",
2526 VTNET_MAX_MAC_ENTRIES);
2528 filter->vmf_multicast.nentries = mcnt;
2530 if (promisc != 0 && allmulti != 0)
2533 hdr.class = VIRTIO_NET_CTRL_MAC;
2534 hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2535 ack = VIRTIO_NET_ERR;
2537 sglist_init(&sg, 4, segs);
2539 error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2540 error |= sglist_append(&sg, &filter->vmf_unicast,
2541 sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2542 error |= sglist_append(&sg, &filter->vmf_multicast,
2543 sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2544 error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2545 KASSERT(error == 0 && sg.sg_nseg == 4,
2546 ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2548 vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2550 if (ack != VIRTIO_NET_OK)
2551 if_printf(ifp, "error setting host MAC filter table\n");
2554 if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2555 if_printf(ifp, "cannot enable promiscuous mode\n");
2556 if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2557 if_printf(ifp, "cannot enable all-multicast mode\n");
2561 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2563 struct sglist_seg segs[3];
2566 struct virtio_net_ctrl_hdr hdr __aligned(2);
2574 s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2575 s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2577 s.ack = VIRTIO_NET_ERR;
2579 sglist_init(&sg, 3, segs);
2581 error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2582 error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2583 error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2584 KASSERT(error == 0 && sg.sg_nseg == 3,
2585 ("%s: error %d adding VLAN message to sglist", __func__, error));
2587 vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2589 return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2593 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2599 ASSERT_SERIALIZED(&sc->vtnet_slz);
2600 KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2601 ("%s: VLAN_FILTER feature not negotiated", __func__));
2603 nvlans = sc->vtnet_nvlans;
2605 /* Enable the filter for each configured VLAN. */
2606 for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2607 w = sc->vtnet_vlan_shadow[i];
2608 while ((bit = ffs(w) - 1) != -1) {
2610 tag = sizeof(w) * CHAR_BIT * i + bit;
2613 if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2614 device_printf(sc->vtnet_dev,
2615 "cannot enable VLAN %d filter\n", tag);
2620 KASSERT(nvlans == 0, ("VLAN count incorrect"));
2624 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2629 ifp = sc->vtnet_ifp;
2630 idx = (tag >> 5) & 0x7F;
2633 if (tag == 0 || tag > 4095)
2636 lwkt_serialize_enter(&sc->vtnet_slz);
2638 /* Update shadow VLAN table. */
2641 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2644 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2647 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2648 vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2649 device_printf(sc->vtnet_dev,
2650 "cannot %s VLAN %d %s the host filter table\n",
2651 add ? "add" : "remove", tag, add ? "to" : "from");
2654 lwkt_serialize_exit(&sc->vtnet_slz);
2658 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2661 if (ifp->if_softc != arg)
2664 vtnet_update_vlan_filter(arg, 1, tag);
2668 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2671 if (ifp->if_softc != arg)
2674 vtnet_update_vlan_filter(arg, 0, tag);
2678 vtnet_ifmedia_upd(struct ifnet *ifp)
2680 struct vtnet_softc *sc;
2681 struct ifmedia *ifm;
2684 ifm = &sc->vtnet_media;
2686 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2693 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2695 struct vtnet_softc *sc;
2699 ifmr->ifm_status = IFM_AVALID;
2700 ifmr->ifm_active = IFM_ETHER;
2702 lwkt_serialize_enter(&sc->vtnet_slz);
2703 if (vtnet_is_link_up(sc) != 0) {
2704 ifmr->ifm_status |= IFM_ACTIVE;
2705 ifmr->ifm_active |= VTNET_MEDIATYPE;
2707 ifmr->ifm_active |= IFM_NONE;
2708 lwkt_serialize_exit(&sc->vtnet_slz);
2712 vtnet_add_statistics(struct vtnet_softc *sc)
2715 struct vtnet_statistics *stats;
2716 struct sysctl_ctx_list *ctx;
2717 struct sysctl_oid *tree;
2718 struct sysctl_oid_list *child;
2720 dev = sc->vtnet_dev;
2721 stats = &sc->vtnet_stats;
2722 ctx = device_get_sysctl_ctx(dev);
2723 tree = device_get_sysctl_tree(dev);
2724 child = SYSCTL_CHILDREN(tree);
2726 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2727 CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2728 "Mbuf cluster allocation failures");
2730 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2731 CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2732 "Received frame larger than the mbuf chain");
2733 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2734 CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2735 "Enqueuing the replacement receive mbuf failed");
2736 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2737 CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2738 "Mergeable buffers receive failures");
2739 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2740 CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2741 "Received checksum offloaded buffer with unsupported "
2743 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2744 CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2745 "Received checksum offloaded buffer with incorrect IP protocol");
2746 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2747 CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2748 "Received checksum offloaded buffer with incorrect offset");
2749 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2750 CTLFLAG_RD, &stats->rx_csum_failed, 0,
2751 "Received buffer checksum offload failed");
2752 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2753 CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2754 "Received buffer checksum offload succeeded");
2755 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2756 CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2757 "Times the receive interrupt task rescheduled itself");
2759 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2760 CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2761 "Aborted transmit of checksum offloaded buffer with unknown "
2763 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2764 CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2765 "Aborted transmit of TSO buffer with unknown Ethernet type");
2766 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2767 CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2768 "Offloaded checksum of transmitted buffer");
2769 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2770 CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2771 "Segmentation offload of transmitted buffer");
2772 SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2773 CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2774 "Times the transmit interrupt task rescheduled itself");
2778 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2781 return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2785 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2788 virtqueue_disable_intr(sc->vtnet_rx_vq);
2792 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2795 #ifdef VTNET_TX_INTR_MODERATION
2798 return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2803 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2806 virtqueue_disable_intr(sc->vtnet_tx_vq);