if_vtnet - Disable rx csum offload due to unsupported ipv6 rx csum offload.
[dragonfly.git] / sys / dev / virtual / virtio / net / if_vtnet.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO network devices. */
28
29 #include <sys/cdefs.h>
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46
47 #include <machine/limits.h>
48
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58
59 #include <net/bpf.h>
60
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
72
73 #include "virtio_if.h"
74
75 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
76
77 static int      vtnet_probe(device_t);
78 static int      vtnet_attach(device_t);
79 static int      vtnet_detach(device_t);
80 static int      vtnet_suspend(device_t);
81 static int      vtnet_resume(device_t);
82 static int      vtnet_shutdown(device_t);
83 static int      vtnet_config_change(device_t);
84
85 static void     vtnet_negotiate_features(struct vtnet_softc *);
86 static int      vtnet_alloc_virtqueues(struct vtnet_softc *);
87 static void     vtnet_get_hwaddr(struct vtnet_softc *);
88 static void     vtnet_set_hwaddr(struct vtnet_softc *);
89 static int      vtnet_is_link_up(struct vtnet_softc *);
90 static void     vtnet_update_link_status(struct vtnet_softc *);
91 #if 0
92 static void     vtnet_watchdog(struct vtnet_softc *);
93 #endif
94 static void     vtnet_config_change_task(void *, int);
95 static int      vtnet_setup_interface(struct vtnet_softc *);
96 static int      vtnet_change_mtu(struct vtnet_softc *, int);
97 static int      vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98
99 static int      vtnet_init_rx_vq(struct vtnet_softc *);
100 static void     vtnet_free_rx_mbufs(struct vtnet_softc *);
101 static void     vtnet_free_tx_mbufs(struct vtnet_softc *);
102 static void     vtnet_free_ctrl_vq(struct vtnet_softc *);
103
104 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
105                     struct mbuf **);
106 static int      vtnet_replace_rxbuf(struct vtnet_softc *,
107                     struct mbuf *, int);
108 static int      vtnet_newbuf(struct vtnet_softc *);
109 static void     vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
110 static void     vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
111 static int      vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
112 static void     vtnet_vlan_tag_remove(struct mbuf *);
113 static int      vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
114                     struct virtio_net_hdr *);
115 static int      vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
116 static int      vtnet_rxeof(struct vtnet_softc *, int, int *);
117 static void     vtnet_rx_intr_task(void *);
118 static int      vtnet_rx_vq_intr(void *);
119
120 static void     vtnet_enqueue_txhdr(struct vtnet_softc *,
121                     struct vtnet_tx_header *);
122 static void     vtnet_txeof(struct vtnet_softc *);
123 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
124                     struct virtio_net_hdr *);
125 static int      vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
126                     struct vtnet_tx_header *);
127 static int      vtnet_encap(struct vtnet_softc *, struct mbuf **);
128 static void     vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
129 static void     vtnet_start(struct ifnet *, struct ifaltq_subque *);
130 static void     vtnet_tick(void *);
131 static void     vtnet_tx_intr_task(void *);
132 static int      vtnet_tx_vq_intr(void *);
133
134 static void     vtnet_stop(struct vtnet_softc *);
135 static int      vtnet_virtio_reinit(struct vtnet_softc *);
136 static void     vtnet_init_locked(struct vtnet_softc *);
137 static void     vtnet_init(void *);
138
139 static void     vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
140                     struct sglist *, int, int);
141
142 static int      vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
143 static int      vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
144 static int      vtnet_set_promisc(struct vtnet_softc *, int);
145 static int      vtnet_set_allmulti(struct vtnet_softc *, int);
146 static void     vtnet_rx_filter(struct vtnet_softc *sc);
147 static void     vtnet_rx_filter_mac(struct vtnet_softc *);
148
149 static int      vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
150 static void     vtnet_rx_filter_vlan(struct vtnet_softc *);
151 static void     vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
152 static void     vtnet_register_vlan(void *, struct ifnet *, uint16_t);
153 static void     vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
154
155 static int      vtnet_ifmedia_upd(struct ifnet *);
156 static void     vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157
158 static void     vtnet_add_statistics(struct vtnet_softc *);
159
160 static int      vtnet_enable_rx_intr(struct vtnet_softc *);
161 static int      vtnet_enable_tx_intr(struct vtnet_softc *);
162 static void     vtnet_disable_rx_intr(struct vtnet_softc *);
163 static void     vtnet_disable_tx_intr(struct vtnet_softc *);
164
165 /* Tunables. */
166 static int vtnet_csum_disable = 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
168 static int vtnet_tso_disable = 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
170 static int vtnet_lro_disable = 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
172
173 /*
174  * Reducing the number of transmit completed interrupts can
175  * improve performance. To do so, the define below keeps the
176  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177  * in the start and watchdog paths. The price to pay for this
178  * is the m_free'ing of transmitted mbufs may be delayed until
179  * the watchdog fires.
180  */
181 #define VTNET_TX_INTR_MODERATION
182
183 static struct virtio_feature_desc vtnet_feature_desc[] = {
184         { VIRTIO_NET_F_CSUM,            "TxChecksum"    },
185         { VIRTIO_NET_F_GUEST_CSUM,      "RxChecksum"    },
186         { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload"        },
187         { VIRTIO_NET_F_MAC,             "MacAddress"    },
188         { VIRTIO_NET_F_GSO,             "TxAllGSO"      },
189         { VIRTIO_NET_F_GUEST_TSO4,      "RxTSOv4"       },
190         { VIRTIO_NET_F_GUEST_TSO6,      "RxTSOv6"       },
191         { VIRTIO_NET_F_GUEST_ECN,       "RxECN"         },
192         { VIRTIO_NET_F_GUEST_UFO,       "RxUFO"         },
193         { VIRTIO_NET_F_HOST_TSO4,       "TxTSOv4"       },
194         { VIRTIO_NET_F_HOST_TSO6,       "TxTSOv6"       },
195         { VIRTIO_NET_F_HOST_ECN,        "TxTSOECN"      },
196         { VIRTIO_NET_F_HOST_UFO,        "TxUFO"         },
197         { VIRTIO_NET_F_MRG_RXBUF,       "MrgRxBuf"      },
198         { VIRTIO_NET_F_STATUS,          "Status"        },
199         { VIRTIO_NET_F_CTRL_VQ,         "ControlVq"     },
200         { VIRTIO_NET_F_CTRL_RX,         "RxMode"        },
201         { VIRTIO_NET_F_CTRL_VLAN,       "VLanFilter"    },
202         { VIRTIO_NET_F_CTRL_RX_EXTRA,   "RxModeExtra"   },
203         { VIRTIO_NET_F_GUEST_ANNOUNCE,  "GuestAnnounce" },
204         { VIRTIO_NET_F_MQ,              "RFS"           },
205         { VIRTIO_NET_F_CTRL_MAC_ADDR,   "SetMacAddress" },
206         { 0, NULL }
207 };
208
209 static device_method_t vtnet_methods[] = {
210         /* Device methods. */
211         DEVMETHOD(device_probe,         vtnet_probe),
212         DEVMETHOD(device_attach,        vtnet_attach),
213         DEVMETHOD(device_detach,        vtnet_detach),
214         DEVMETHOD(device_suspend,       vtnet_suspend),
215         DEVMETHOD(device_resume,        vtnet_resume),
216         DEVMETHOD(device_shutdown,      vtnet_shutdown),
217
218         /* VirtIO methods. */
219         DEVMETHOD(virtio_config_change, vtnet_config_change),
220
221         DEVMETHOD_END
222 };
223
224 static driver_t vtnet_driver = {
225         "vtnet",
226         vtnet_methods,
227         sizeof(struct vtnet_softc)
228 };
229
230 static devclass_t vtnet_devclass;
231
232 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
233 MODULE_VERSION(vtnet, 1);
234 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
235
236 static int
237 vtnet_probe(device_t dev)
238 {
239         if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
240                 return (ENXIO);
241
242         device_set_desc(dev, "VirtIO Networking Adapter");
243
244         return (BUS_PROBE_DEFAULT);
245 }
246
247 static int
248 vtnet_attach(device_t dev)
249 {
250         struct vtnet_softc *sc;
251         int error;
252
253         sc = device_get_softc(dev);
254         sc->vtnet_dev = dev;
255
256         lwkt_serialize_init(&sc->vtnet_slz);
257         callout_init(&sc->vtnet_tick_ch);
258
259         ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
260                      vtnet_ifmedia_sts);
261         ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
262         ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
263
264         vtnet_add_statistics(sc);
265         SLIST_INIT(&sc->vtnet_txhdr_free);
266
267         /* Register our feature descriptions. */
268         virtio_set_feature_desc(dev, vtnet_feature_desc);
269         vtnet_negotiate_features(sc);
270
271         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
272                 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
273
274         if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
275                 /* This feature should always be negotiated. */
276                 sc->vtnet_flags |= VTNET_FLAG_MAC;
277         }
278
279         if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
280                 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
281                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
282         } else {
283                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
284         }
285
286         sc->vtnet_rx_mbuf_size = MCLBYTES;
287         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
288
289         if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
290                 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
291
292                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
293                         sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
294                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
295                         sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
296                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
297                     virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
298                         sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
299         }
300
301         /* Read (or generate) the MAC address for the adapter. */
302         vtnet_get_hwaddr(sc);
303
304         error = vtnet_alloc_virtqueues(sc);
305         if (error) {
306                 device_printf(dev, "cannot allocate virtqueues\n");
307                 goto fail;
308         }
309
310         error = vtnet_setup_interface(sc);
311         if (error) {
312                 device_printf(dev, "cannot setup interface\n");
313                 goto fail;
314         }
315
316         TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
317
318         error = virtio_setup_intr(dev, &sc->vtnet_slz);
319         if (error) {
320                 device_printf(dev, "cannot setup virtqueue interrupts\n");
321                 ether_ifdetach(sc->vtnet_ifp);
322                 goto fail;
323         }
324
325         if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
326                 lwkt_serialize_enter(&sc->vtnet_slz);
327                 vtnet_set_hwaddr(sc);
328                 lwkt_serialize_exit(&sc->vtnet_slz);
329         }
330
331         /*
332          * Device defaults to promiscuous mode for backwards
333          * compatibility. Turn it off if possible.
334          */
335         if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
336                 lwkt_serialize_enter(&sc->vtnet_slz);
337                 if (vtnet_set_promisc(sc, 0) != 0) {
338                         sc->vtnet_ifp->if_flags |= IFF_PROMISC;
339                         device_printf(dev,
340                             "cannot disable promiscuous mode\n");
341                 }
342                 lwkt_serialize_exit(&sc->vtnet_slz);
343         } else
344                 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
345
346 fail:
347         if (error)
348                 vtnet_detach(dev);
349
350         return (error);
351 }
352
353 static int
354 vtnet_detach(device_t dev)
355 {
356         struct vtnet_softc *sc;
357         struct ifnet *ifp;
358
359         sc = device_get_softc(dev);
360         ifp = sc->vtnet_ifp;
361
362         if (device_is_attached(dev)) {
363                 lwkt_serialize_enter(&sc->vtnet_slz);
364                 vtnet_stop(sc);
365                 lwkt_serialize_exit(&sc->vtnet_slz);
366
367                 callout_stop(&sc->vtnet_tick_ch);
368                 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
369
370                 ether_ifdetach(ifp);
371         }
372
373         if (sc->vtnet_vlan_attach != NULL) {
374                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
375                 sc->vtnet_vlan_attach = NULL;
376         }
377         if (sc->vtnet_vlan_detach != NULL) {
378                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
379                 sc->vtnet_vlan_detach = NULL;
380         }
381
382         if (ifp) {
383                 if_free(ifp);
384                 sc->vtnet_ifp = NULL;
385         }
386
387         if (sc->vtnet_rx_vq != NULL)
388                 vtnet_free_rx_mbufs(sc);
389         if (sc->vtnet_tx_vq != NULL)
390                 vtnet_free_tx_mbufs(sc);
391         if (sc->vtnet_ctrl_vq != NULL)
392                 vtnet_free_ctrl_vq(sc);
393
394         if (sc->vtnet_txhdrarea != NULL) {
395                 contigfree(sc->vtnet_txhdrarea,
396                     sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
397                     M_VTNET);
398                 sc->vtnet_txhdrarea = NULL;
399         }
400         SLIST_INIT(&sc->vtnet_txhdr_free);
401         if (sc->vtnet_macfilter != NULL) {
402                 contigfree(sc->vtnet_macfilter,
403                     sizeof(struct vtnet_mac_filter), M_DEVBUF);
404                 sc->vtnet_macfilter = NULL;
405         }
406
407         ifmedia_removeall(&sc->vtnet_media);
408
409         return (0);
410 }
411
412 static int
413 vtnet_suspend(device_t dev)
414 {
415         struct vtnet_softc *sc;
416
417         sc = device_get_softc(dev);
418
419         lwkt_serialize_enter(&sc->vtnet_slz);
420         vtnet_stop(sc);
421         sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
422         lwkt_serialize_exit(&sc->vtnet_slz);
423
424         return (0);
425 }
426
427 static int
428 vtnet_resume(device_t dev)
429 {
430         struct vtnet_softc *sc;
431         struct ifnet *ifp;
432
433         sc = device_get_softc(dev);
434         ifp = sc->vtnet_ifp;
435
436         lwkt_serialize_enter(&sc->vtnet_slz);
437         if (ifp->if_flags & IFF_UP)
438                 vtnet_init_locked(sc);
439         sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
440         lwkt_serialize_exit(&sc->vtnet_slz);
441
442         return (0);
443 }
444
445 static int
446 vtnet_shutdown(device_t dev)
447 {
448
449         /*
450          * Suspend already does all of what we need to
451          * do here; we just never expect to be resumed.
452          */
453         return (vtnet_suspend(dev));
454 }
455
456 static int
457 vtnet_config_change(device_t dev)
458 {
459         struct vtnet_softc *sc;
460
461         sc = device_get_softc(dev);
462
463         taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
464
465         return (1);
466 }
467
468 static void
469 vtnet_negotiate_features(struct vtnet_softc *sc)
470 {
471         device_t dev;
472         uint64_t mask, features;
473
474         dev = sc->vtnet_dev;
475         mask = 0;
476
477         if (vtnet_csum_disable)
478                 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
479
480         /*
481          * XXX DragonFly doesn't support receive checksum offload for ipv6 yet,
482          *     hence always disable the virtio feature for now.
483          * XXX We need to support the DynOffload feature, in order to
484          *     dynamically enable/disable this feature.
485          */
486         mask |= VIRTIO_NET_F_GUEST_CSUM;
487
488         /*
489          * TSO and LRO are only available when their corresponding checksum
490          * offload feature is also negotiated.
491          */
492
493         if (vtnet_csum_disable || vtnet_tso_disable)
494                 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
495                     VIRTIO_NET_F_HOST_ECN;
496
497         if (vtnet_csum_disable || vtnet_lro_disable)
498                 mask |= VTNET_LRO_FEATURES;
499
500         features = VTNET_FEATURES & ~mask;
501         features |= VIRTIO_F_NOTIFY_ON_EMPTY;
502         features |= VIRTIO_F_ANY_LAYOUT;
503         sc->vtnet_features = virtio_negotiate_features(dev, features);
504
505         if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
506             virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
507                 /*
508                  * LRO without mergeable buffers requires special care. This
509                  * is not ideal because every receive buffer must be large
510                  * enough to hold the maximum TCP packet, the Ethernet header,
511                  * and the header. This requires up to 34 descriptors with
512                  * MCLBYTES clusters. If we do not have indirect descriptors,
513                  * LRO is disabled since the virtqueue will not contain very
514                  * many receive buffers.
515                  */
516                 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
517                         device_printf(dev,
518                             "LRO disabled due to both mergeable buffers and "
519                             "indirect descriptors not negotiated\n");
520
521                         features &= ~VTNET_LRO_FEATURES;
522                         sc->vtnet_features =
523                             virtio_negotiate_features(dev, features);
524                 } else
525                         sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
526         }
527 }
528
529 static int
530 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
531 {
532         device_t dev;
533         struct vq_alloc_info vq_info[3];
534         int nvqs;
535
536         dev = sc->vtnet_dev;
537         nvqs = 2;
538
539         /*
540          * Indirect descriptors are not needed for the Rx
541          * virtqueue when mergeable buffers are negotiated.
542          * The header is placed inline with the data, not
543          * in a separate descriptor, and mbuf clusters are
544          * always physically contiguous.
545          */
546         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
547                 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
548                     VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
549         } else
550                 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
551
552         if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
553             virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
554                 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
555         else
556                 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
557
558         VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs,
559             vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
560             "%s receive", device_get_nameunit(dev));
561
562         VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs,
563             vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
564             "%s transmit", device_get_nameunit(dev));
565
566         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
567                 nvqs++;
568
569                 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
570                     &sc->vtnet_ctrl_vq, "%s control",
571                     device_get_nameunit(dev));
572         }
573
574         return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
575 }
576
577 static int
578 vtnet_setup_interface(struct vtnet_softc *sc)
579 {
580         device_t dev;
581         struct ifnet *ifp;
582         int i;
583
584         dev = sc->vtnet_dev;
585
586         ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
587         if (ifp == NULL) {
588                 device_printf(dev, "cannot allocate ifnet structure\n");
589                 return (ENOSPC);
590         }
591
592         ifp->if_softc = sc;
593         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
594         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
595         ifp->if_init = vtnet_init;
596         ifp->if_start = vtnet_start;
597         ifp->if_ioctl = vtnet_ioctl;
598
599         sc->vtnet_rx_process_limit = virtqueue_size(sc->vtnet_rx_vq);
600         sc->vtnet_tx_size = virtqueue_size(sc->vtnet_tx_vq);
601         if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
602                 sc->vtnet_txhdrcount = sc->vtnet_tx_size;
603         else
604                 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
605         sc->vtnet_txhdrarea = contigmalloc(
606             sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
607             M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
608         if (sc->vtnet_txhdrarea == NULL) {
609                 device_printf(dev, "cannot contigmalloc the tx headers\n");
610                 return (ENOMEM);
611         }
612         for (i = 0; i < sc->vtnet_txhdrcount; i++)
613                 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
614         sc->vtnet_macfilter = contigmalloc(
615             sizeof(struct vtnet_mac_filter),
616             M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
617         if (sc->vtnet_macfilter == NULL) {
618                 device_printf(dev,
619                     "cannot contigmalloc the mac filter table\n");
620                 return (ENOMEM);
621         }
622         ifq_set_maxlen(&ifp->if_snd, sc->vtnet_tx_size - 1);
623         ifq_set_ready(&ifp->if_snd);
624
625         ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
626
627         /* Tell the upper layer(s) we support long frames. */
628         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
629         ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
630
631         if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
632                 ifp->if_capabilities |= IFCAP_TXCSUM;
633
634                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
635                         ifp->if_capabilities |= IFCAP_TSO4;
636                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
637                         ifp->if_capabilities |= IFCAP_TSO6;
638                 if (ifp->if_capabilities & IFCAP_TSO)
639                         ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
640
641                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
642                         sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
643         }
644
645         if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
646                 ifp->if_capabilities |= IFCAP_RXCSUM;
647
648                 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
649                     virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
650                         ifp->if_capabilities |= IFCAP_LRO;
651         }
652
653         if (ifp->if_capabilities & IFCAP_HWCSUM) {
654                 /*
655                  * VirtIO does not support VLAN tagging, but we can fake
656                  * it by inserting and removing the 802.1Q header during
657                  * transmit and receive. We are then able to do checksum
658                  * offloading of VLAN frames.
659                  */
660                 ifp->if_capabilities |=
661                         IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
662         }
663
664         ifp->if_capenable = ifp->if_capabilities;
665
666         /*
667          * Capabilities after here are not enabled by default.
668          */
669
670         if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
671                 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
672
673                 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
674                     vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
675                 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
676                     vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
677         }
678
679         return (0);
680 }
681
682 static void
683 vtnet_set_hwaddr(struct vtnet_softc *sc)
684 {
685         device_t dev;
686
687         dev = sc->vtnet_dev;
688
689         if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
690             (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
691                 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
692                         device_printf(dev, "unable to set MAC address\n");
693         } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
694                 virtio_write_device_config(dev,
695                     offsetof(struct virtio_net_config, mac),
696                     sc->vtnet_hwaddr, ETHER_ADDR_LEN);
697         }
698 }
699
700 static void
701 vtnet_get_hwaddr(struct vtnet_softc *sc)
702 {
703         device_t dev;
704
705         dev = sc->vtnet_dev;
706
707         if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
708                 /*
709                  * Generate a random locally administered unicast address.
710                  *
711                  * It would be nice to generate the same MAC address across
712                  * reboots, but it seems all the hosts currently available
713                  * support the MAC feature, so this isn't too important.
714                  */
715                 sc->vtnet_hwaddr[0] = 0xB2;
716                 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
717                 return;
718         }
719
720         virtio_read_device_config(dev,
721             offsetof(struct virtio_net_config, mac),
722             sc->vtnet_hwaddr, ETHER_ADDR_LEN);
723 }
724
725 static int
726 vtnet_is_link_up(struct vtnet_softc *sc)
727 {
728         device_t dev;
729         struct ifnet *ifp;
730         uint16_t status;
731
732         dev = sc->vtnet_dev;
733         ifp = sc->vtnet_ifp;
734
735         ASSERT_SERIALIZED(&sc->vtnet_slz);
736
737         if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) {
738                 status = virtio_read_dev_config_2(dev,
739                                 offsetof(struct virtio_net_config, status));
740         } else {
741                 status = VIRTIO_NET_S_LINK_UP;
742         }
743
744         return ((status & VIRTIO_NET_S_LINK_UP) != 0);
745 }
746
747 static void
748 vtnet_update_link_status(struct vtnet_softc *sc)
749 {
750         device_t dev;
751         struct ifnet *ifp;
752         struct ifaltq_subque *ifsq;
753         int link;
754
755         dev = sc->vtnet_dev;
756         ifp = sc->vtnet_ifp;
757         ifsq = ifq_get_subq_default(&ifp->if_snd);
758
759         link = vtnet_is_link_up(sc);
760
761         if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
762                 sc->vtnet_flags |= VTNET_FLAG_LINK;
763                 if (bootverbose)
764                         device_printf(dev, "Link is up\n");
765                 ifp->if_link_state = LINK_STATE_UP;
766                 if_link_state_change(ifp);
767                 if (!ifsq_is_empty(ifsq))
768                         vtnet_start_locked(ifp, ifsq);
769         } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
770                 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
771                 if (bootverbose)
772                         device_printf(dev, "Link is down\n");
773
774                 ifp->if_link_state = LINK_STATE_DOWN;
775                 if_link_state_change(ifp);
776         }
777 }
778
779 #if 0
780 static void
781 vtnet_watchdog(struct vtnet_softc *sc)
782 {
783         struct ifnet *ifp;
784
785         ifp = sc->vtnet_ifp;
786
787 #ifdef VTNET_TX_INTR_MODERATION
788         vtnet_txeof(sc);
789 #endif
790
791         if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
792                 return;
793
794         if_printf(ifp, "watchdog timeout -- resetting\n");
795 #ifdef VTNET_DEBUG
796         virtqueue_dump(sc->vtnet_tx_vq);
797 #endif
798         ifp->if_oerrors++;
799         ifp->if_flags &= ~IFF_RUNNING;
800         vtnet_init_locked(sc);
801 }
802 #endif
803
804 static void
805 vtnet_config_change_task(void *arg, int pending)
806 {
807         struct vtnet_softc *sc;
808
809         sc = arg;
810
811         lwkt_serialize_enter(&sc->vtnet_slz);
812         vtnet_update_link_status(sc);
813         lwkt_serialize_exit(&sc->vtnet_slz);
814 }
815
816 static int
817 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
818 {
819         struct vtnet_softc *sc;
820         struct ifreq *ifr;
821         int reinit, mask, error;
822
823         sc = ifp->if_softc;
824         ifr = (struct ifreq *) data;
825         reinit = 0;
826         error = 0;
827
828         switch (cmd) {
829         case SIOCSIFMTU:
830                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
831                         error = EINVAL;
832                 else if (ifp->if_mtu != ifr->ifr_mtu) {
833                         lwkt_serialize_enter(&sc->vtnet_slz);
834                         error = vtnet_change_mtu(sc, ifr->ifr_mtu);
835                         lwkt_serialize_exit(&sc->vtnet_slz);
836                 }
837                 break;
838
839         case SIOCSIFFLAGS:
840                 lwkt_serialize_enter(&sc->vtnet_slz);
841                 if ((ifp->if_flags & IFF_UP) == 0) {
842                         if (ifp->if_flags & IFF_RUNNING)
843                                 vtnet_stop(sc);
844                 } else if (ifp->if_flags & IFF_RUNNING) {
845                         if ((ifp->if_flags ^ sc->vtnet_if_flags) &
846                             (IFF_PROMISC | IFF_ALLMULTI)) {
847                                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
848                                         vtnet_rx_filter(sc);
849                                 else
850                                         error = ENOTSUP;
851                         }
852                 } else
853                         vtnet_init_locked(sc);
854
855                 if (error == 0)
856                         sc->vtnet_if_flags = ifp->if_flags;
857                 lwkt_serialize_exit(&sc->vtnet_slz);
858                 break;
859
860         case SIOCADDMULTI:
861         case SIOCDELMULTI:
862                 lwkt_serialize_enter(&sc->vtnet_slz);
863                 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
864                     (ifp->if_flags & IFF_RUNNING))
865                         vtnet_rx_filter_mac(sc);
866                 lwkt_serialize_exit(&sc->vtnet_slz);
867                 break;
868
869         case SIOCSIFMEDIA:
870         case SIOCGIFMEDIA:
871                 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
872                 break;
873
874         case SIOCSIFCAP:
875                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
876
877                 lwkt_serialize_enter(&sc->vtnet_slz);
878
879                 if (mask & IFCAP_TXCSUM) {
880                         ifp->if_capenable ^= IFCAP_TXCSUM;
881                         if (ifp->if_capenable & IFCAP_TXCSUM)
882                                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
883                         else
884                                 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
885                 }
886
887                 if (mask & IFCAP_TSO4) {
888                         ifp->if_capenable ^= IFCAP_TSO4;
889                         if (ifp->if_capenable & IFCAP_TSO4)
890                                 ifp->if_hwassist |= CSUM_TSO;
891                         else
892                                 ifp->if_hwassist &= ~CSUM_TSO;
893                 }
894
895                 if (mask & IFCAP_RXCSUM) {
896                         ifp->if_capenable ^= IFCAP_RXCSUM;
897                         reinit = 1;
898                 }
899
900                 if (mask & IFCAP_LRO) {
901                         ifp->if_capenable ^= IFCAP_LRO;
902                         reinit = 1;
903                 }
904
905                 if (mask & IFCAP_VLAN_HWFILTER) {
906                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
907                         reinit = 1;
908                 }
909
910                 if (mask & IFCAP_VLAN_HWTSO)
911                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
912
913                 if (mask & IFCAP_VLAN_HWTAGGING)
914                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
915
916                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
917                         ifp->if_flags &= ~IFF_RUNNING;
918                         vtnet_init_locked(sc);
919                 }
920                 //VLAN_CAPABILITIES(ifp);
921
922                 lwkt_serialize_exit(&sc->vtnet_slz);
923                 break;
924
925         default:
926                 error = ether_ioctl(ifp, cmd, data);
927                 break;
928         }
929
930         return (error);
931 }
932
933 static int
934 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
935 {
936         struct ifnet *ifp;
937         int new_frame_size, clsize;
938
939         ifp = sc->vtnet_ifp;
940
941         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
942                 new_frame_size = sizeof(struct vtnet_rx_header) +
943                     sizeof(struct ether_vlan_header) + new_mtu;
944
945                 if (new_frame_size > MJUM9BYTES)
946                         return (EINVAL);
947
948                 if (new_frame_size <= MCLBYTES)
949                         clsize = MCLBYTES;
950                 else
951                         clsize = MJUM9BYTES;
952         } else {
953                 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
954                     sizeof(struct ether_vlan_header) + new_mtu;
955
956                 if (new_frame_size <= MCLBYTES)
957                         clsize = MCLBYTES;
958                 else
959                         clsize = MJUMPAGESIZE;
960         }
961
962         sc->vtnet_rx_mbuf_size = clsize;
963         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
964         KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
965             ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
966
967         ifp->if_mtu = new_mtu;
968
969         if (ifp->if_flags & IFF_RUNNING) {
970                 ifp->if_flags &= ~IFF_RUNNING;
971                 vtnet_init_locked(sc);
972         }
973
974         return (0);
975 }
976
977 static int
978 vtnet_init_rx_vq(struct vtnet_softc *sc)
979 {
980         struct virtqueue *vq;
981         int nbufs, error;
982
983         vq = sc->vtnet_rx_vq;
984         nbufs = 0;
985         error = ENOSPC;
986
987         while (!virtqueue_full(vq)) {
988                 if ((error = vtnet_newbuf(sc)) != 0)
989                         break;
990                 nbufs++;
991         }
992
993         if (nbufs > 0) {
994                 virtqueue_notify(vq, &sc->vtnet_slz);
995
996                 /*
997                  * EMSGSIZE signifies the virtqueue did not have enough
998                  * entries available to hold the last mbuf. This is not
999                  * an error. We should not get ENOSPC since we check if
1000                  * the virtqueue is full before attempting to add a
1001                  * buffer.
1002                  */
1003                 if (error == EMSGSIZE)
1004                         error = 0;
1005         }
1006
1007         return (error);
1008 }
1009
1010 static void
1011 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1012 {
1013         struct virtqueue *vq;
1014         struct mbuf *m;
1015         int last;
1016
1017         vq = sc->vtnet_rx_vq;
1018         last = 0;
1019
1020         while ((m = virtqueue_drain(vq, &last)) != NULL)
1021                 m_freem(m);
1022
1023         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1024 }
1025
1026 static void
1027 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1028 {
1029         struct virtqueue *vq;
1030         struct vtnet_tx_header *txhdr;
1031         int last;
1032
1033         vq = sc->vtnet_tx_vq;
1034         last = 0;
1035
1036         while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1037                 m_freem(txhdr->vth_mbuf);
1038                 vtnet_enqueue_txhdr(sc, txhdr);
1039         }
1040
1041         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1042 }
1043
1044 static void
1045 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1046 {
1047         /*
1048          * The control virtqueue is only polled, therefore
1049          * it should already be empty.
1050          */
1051         KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1052                 ("Ctrl Vq not empty"));
1053 }
1054
1055 static struct mbuf *
1056 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1057 {
1058         struct mbuf *m_head, *m_tail, *m;
1059         int i, clsize;
1060
1061         clsize = sc->vtnet_rx_mbuf_size;
1062
1063         /*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1064         //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1065         m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1066         if (m_head == NULL)
1067                 goto fail;
1068
1069         m_head->m_len = clsize;
1070         m_tail = m_head;
1071
1072         if (nbufs > 1) {
1073                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1074                         ("chained Rx mbuf requested without LRO_NOMRG"));
1075
1076                 for (i = 0; i < nbufs - 1; i++) {
1077                         //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1078                         m = m_getcl(M_NOWAIT, MT_DATA, 0);
1079                         if (m == NULL)
1080                                 goto fail;
1081
1082                         m->m_len = clsize;
1083                         m_tail->m_next = m;
1084                         m_tail = m;
1085                 }
1086         }
1087
1088         if (m_tailp != NULL)
1089                 *m_tailp = m_tail;
1090
1091         return (m_head);
1092
1093 fail:
1094         sc->vtnet_stats.mbuf_alloc_failed++;
1095         m_freem(m_head);
1096
1097         return (NULL);
1098 }
1099
1100 static int
1101 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1102 {
1103         struct mbuf *m, *m_prev;
1104         struct mbuf *m_new, *m_tail;
1105         int len, clsize, nreplace, error;
1106
1107         m = m0;
1108         m_prev = NULL;
1109         len = len0;
1110
1111         m_tail = NULL;
1112         clsize = sc->vtnet_rx_mbuf_size;
1113         nreplace = 0;
1114
1115         if (m->m_next != NULL)
1116                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1117                     ("chained Rx mbuf without LRO_NOMRG"));
1118
1119         /*
1120          * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1121          * allocating an entire chain for each received frame. When
1122          * the received frame's length is less than that of the chain,
1123          * the unused mbufs are reassigned to the new chain.
1124          */
1125         while (len > 0) {
1126                 /*
1127                  * Something is seriously wrong if we received
1128                  * a frame larger than the mbuf chain. Drop it.
1129                  */
1130                 if (m == NULL) {
1131                         sc->vtnet_stats.rx_frame_too_large++;
1132                         return (EMSGSIZE);
1133                 }
1134
1135                 KASSERT(m->m_len == clsize,
1136                     ("mbuf length not expected cluster size: %d",
1137                     m->m_len));
1138
1139                 m->m_len = MIN(m->m_len, len);
1140                 len -= m->m_len;
1141
1142                 m_prev = m;
1143                 m = m->m_next;
1144                 nreplace++;
1145         }
1146
1147         KASSERT(m_prev != NULL, ("m_prev == NULL"));
1148         KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1149                 ("too many replacement mbufs: %d/%d", nreplace,
1150                 sc->vtnet_rx_mbuf_count));
1151
1152         m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1153         if (m_new == NULL) {
1154                 m_prev->m_len = clsize;
1155                 return (ENOBUFS);
1156         }
1157
1158         /*
1159          * Move unused mbufs, if any, from the original chain
1160          * onto the end of the new chain.
1161          */
1162         if (m_prev->m_next != NULL) {
1163                 m_tail->m_next = m_prev->m_next;
1164                 m_prev->m_next = NULL;
1165         }
1166
1167         error = vtnet_enqueue_rxbuf(sc, m_new);
1168         if (error) {
1169                 /*
1170                  * BAD! We could not enqueue the replacement mbuf chain. We
1171                  * must restore the m0 chain to the original state if it was
1172                  * modified so we can subsequently discard it.
1173                  *
1174                  * NOTE: The replacement is suppose to be an identical copy
1175                  * to the one just dequeued so this is an unexpected error.
1176                  */
1177                 sc->vtnet_stats.rx_enq_replacement_failed++;
1178
1179                 if (m_tail->m_next != NULL) {
1180                         m_prev->m_next = m_tail->m_next;
1181                         m_tail->m_next = NULL;
1182                 }
1183
1184                 m_prev->m_len = clsize;
1185                 m_freem(m_new);
1186         }
1187
1188         return (error);
1189 }
1190
1191 static int
1192 vtnet_newbuf(struct vtnet_softc *sc)
1193 {
1194         struct mbuf *m;
1195         int error;
1196
1197         m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1198         if (m == NULL)
1199                 return (ENOBUFS);
1200
1201         error = vtnet_enqueue_rxbuf(sc, m);
1202         if (error)
1203                 m_freem(m);
1204
1205         return (error);
1206 }
1207
1208 static void
1209 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1210 {
1211         struct virtqueue *vq;
1212         struct mbuf *m;
1213
1214         vq = sc->vtnet_rx_vq;
1215
1216         while (--nbufs > 0) {
1217                 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1218                         break;
1219                 vtnet_discard_rxbuf(sc, m);
1220         }
1221 }
1222
1223 static void
1224 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1225 {
1226         int error;
1227
1228         /*
1229          * Requeue the discarded mbuf. This should always be
1230          * successful since it was just dequeued.
1231          */
1232         error = vtnet_enqueue_rxbuf(sc, m);
1233         KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1234 }
1235
1236 static int
1237 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1238 {
1239         struct sglist sg;
1240         struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1241         struct vtnet_rx_header *rxhdr;
1242         struct virtio_net_hdr *hdr;
1243         uint8_t *mdata;
1244         int offset, error;
1245
1246         ASSERT_SERIALIZED(&sc->vtnet_slz);
1247         if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1248                 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1249
1250         sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1251
1252         mdata = mtod(m, uint8_t *);
1253         offset = 0;
1254
1255         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1256                 rxhdr = (struct vtnet_rx_header *) mdata;
1257                 hdr = &rxhdr->vrh_hdr;
1258                 offset += sizeof(struct vtnet_rx_header);
1259
1260                 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1261                 KASSERT(error == 0, ("cannot add header to sglist"));
1262         }
1263
1264         error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1265         if (error)
1266                 return (error);
1267
1268         if (m->m_next != NULL) {
1269                 error = sglist_append_mbuf(&sg, m->m_next);
1270                 if (error)
1271                         return (error);
1272         }
1273
1274         return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1275 }
1276
1277 static void
1278 vtnet_vlan_tag_remove(struct mbuf *m)
1279 {
1280         struct ether_vlan_header *evl;
1281
1282         evl = mtod(m, struct ether_vlan_header *);
1283
1284         m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1285         m->m_flags |= M_VLANTAG;
1286
1287         /* Strip the 802.1Q header. */
1288         bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1289             ETHER_HDR_LEN - ETHER_TYPE_LEN);
1290         m_adj(m, ETHER_VLAN_ENCAP_LEN);
1291 }
1292
1293 /*
1294  * Alternative method of doing receive checksum offloading. Rather
1295  * than parsing the received frame down to the IP header, use the
1296  * csum_offset to determine which CSUM_* flags are appropriate. We
1297  * can get by with doing this only because the checksum offsets are
1298  * unique for the things we care about.
1299  */
1300 static int
1301 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1302     struct virtio_net_hdr *hdr)
1303 {
1304         struct ether_header *eh;
1305         struct ether_vlan_header *evh;
1306         struct udphdr *udp;
1307         int csum_len;
1308         uint16_t eth_type;
1309
1310         csum_len = hdr->csum_start + hdr->csum_offset;
1311
1312         if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1313                 return (1);
1314         if (m->m_len < csum_len)
1315                 return (1);
1316
1317         eh = mtod(m, struct ether_header *);
1318         eth_type = ntohs(eh->ether_type);
1319         if (eth_type == ETHERTYPE_VLAN) {
1320                 evh = mtod(m, struct ether_vlan_header *);
1321                 eth_type = ntohs(evh->evl_proto);
1322         }
1323
1324         if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1325                 sc->vtnet_stats.rx_csum_bad_ethtype++;
1326                 return (1);
1327         }
1328
1329         /* Use the offset to determine the appropriate CSUM_* flags. */
1330         switch (hdr->csum_offset) {
1331         case offsetof(struct udphdr, uh_sum):
1332                 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1333                         return (1);
1334                 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1335                 if (udp->uh_sum == 0)
1336                         return (0);
1337
1338                 /* FALLTHROUGH */
1339
1340         case offsetof(struct tcphdr, th_sum):
1341                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1342                 m->m_pkthdr.csum_data = 0xFFFF;
1343                 break;
1344
1345         default:
1346                 sc->vtnet_stats.rx_csum_bad_offset++;
1347                 return (1);
1348         }
1349
1350         sc->vtnet_stats.rx_csum_offloaded++;
1351
1352         return (0);
1353 }
1354
1355 static int
1356 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1357 {
1358         struct ifnet *ifp;
1359         struct virtqueue *vq;
1360         struct mbuf *m, *m_tail;
1361         int len;
1362
1363         ifp = sc->vtnet_ifp;
1364         vq = sc->vtnet_rx_vq;
1365         m_tail = m_head;
1366
1367         while (--nbufs > 0) {
1368                 m = virtqueue_dequeue(vq, &len);
1369                 if (m == NULL) {
1370                         ifp->if_ierrors++;
1371                         goto fail;
1372                 }
1373
1374                 if (vtnet_newbuf(sc) != 0) {
1375                         ifp->if_iqdrops++;
1376                         vtnet_discard_rxbuf(sc, m);
1377                         if (nbufs > 1)
1378                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1379                         goto fail;
1380                 }
1381
1382                 if (m->m_len < len)
1383                         len = m->m_len;
1384
1385                 m->m_len = len;
1386                 m->m_flags &= ~M_PKTHDR;
1387
1388                 m_head->m_pkthdr.len += len;
1389                 m_tail->m_next = m;
1390                 m_tail = m;
1391         }
1392
1393         return (0);
1394
1395 fail:
1396         sc->vtnet_stats.rx_mergeable_failed++;
1397         m_freem(m_head);
1398
1399         return (1);
1400 }
1401
1402 static int
1403 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1404 {
1405         struct virtio_net_hdr lhdr;
1406         struct ifnet *ifp;
1407         struct virtqueue *vq;
1408         struct mbuf *m;
1409         struct ether_header *eh;
1410         struct virtio_net_hdr *hdr;
1411         struct virtio_net_hdr_mrg_rxbuf *mhdr;
1412         int len, deq, nbufs, adjsz, rx_npkts;
1413
1414         ifp = sc->vtnet_ifp;
1415         vq = sc->vtnet_rx_vq;
1416         hdr = &lhdr;
1417         deq = 0;
1418         rx_npkts = 0;
1419
1420         ASSERT_SERIALIZED(&sc->vtnet_slz);
1421
1422         while (--count >= 0) {
1423                 m = virtqueue_dequeue(vq, &len);
1424                 if (m == NULL)
1425                         break;
1426                 deq++;
1427
1428                 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1429                         ifp->if_ierrors++;
1430                         vtnet_discard_rxbuf(sc, m);
1431                         continue;
1432                 }
1433
1434                 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1435                         nbufs = 1;
1436                         adjsz = sizeof(struct vtnet_rx_header);
1437                         /*
1438                          * Account for our pad between the header and
1439                          * the actual start of the frame.
1440                          */
1441                         len += VTNET_RX_HEADER_PAD;
1442                 } else {
1443                         mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1444                         nbufs = mhdr->num_buffers;
1445                         adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1446                 }
1447
1448                 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1449                         ifp->if_iqdrops++;
1450                         vtnet_discard_rxbuf(sc, m);
1451                         if (nbufs > 1)
1452                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1453                         continue;
1454                 }
1455
1456                 m->m_pkthdr.len = len;
1457                 m->m_pkthdr.rcvif = ifp;
1458                 m->m_pkthdr.csum_flags = 0;
1459
1460                 if (nbufs > 1) {
1461                         if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1462                                 continue;
1463                 }
1464
1465                 ifp->if_ipackets++;
1466
1467                 /*
1468                  * Save copy of header before we strip it. For both mergeable
1469                  * and non-mergeable, the VirtIO header is placed first in the
1470                  * mbuf's data. We no longer need num_buffers, so always use a
1471                  * virtio_net_hdr.
1472                  */
1473                 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1474                 m_adj(m, adjsz);
1475
1476                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1477                         eh = mtod(m, struct ether_header *);
1478                         if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1479                                 vtnet_vlan_tag_remove(m);
1480
1481                                 /*
1482                                  * With the 802.1Q header removed, update the
1483                                  * checksum starting location accordingly.
1484                                  */
1485                                 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1486                                         hdr->csum_start -=
1487                                             ETHER_VLAN_ENCAP_LEN;
1488                         }
1489                 }
1490
1491                 if (ifp->if_capenable & IFCAP_RXCSUM &&
1492                     hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1493                         if (vtnet_rx_csum(sc, m, hdr) != 0)
1494                                 sc->vtnet_stats.rx_csum_failed++;
1495                 }
1496
1497                 lwkt_serialize_exit(&sc->vtnet_slz);
1498                 rx_npkts++;
1499                 ifp->if_input(ifp, m, NULL, -1);
1500                 lwkt_serialize_enter(&sc->vtnet_slz);
1501
1502                 /*
1503                  * The interface may have been stopped while we were
1504                  * passing the packet up the network stack.
1505                  */
1506                 if ((ifp->if_flags & IFF_RUNNING) == 0)
1507                         break;
1508         }
1509
1510         virtqueue_notify(vq, &sc->vtnet_slz);
1511
1512         if (rx_npktsp != NULL)
1513                 *rx_npktsp = rx_npkts;
1514
1515         return (count > 0 ? 0 : EAGAIN);
1516 }
1517
1518 static void
1519 vtnet_rx_intr_task(void *arg)
1520 {
1521         struct vtnet_softc *sc;
1522         struct ifnet *ifp;
1523         int more;
1524
1525         sc = arg;
1526         ifp = sc->vtnet_ifp;
1527
1528 next:
1529 //      lwkt_serialize_enter(&sc->vtnet_slz);
1530
1531         if ((ifp->if_flags & IFF_RUNNING) == 0) {
1532                 vtnet_enable_rx_intr(sc);
1533 //              lwkt_serialize_exit(&sc->vtnet_slz);
1534                 return;
1535         }
1536
1537         more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1538         if (!more && vtnet_enable_rx_intr(sc) != 0) {
1539                 vtnet_disable_rx_intr(sc);
1540                 more = 1;
1541         }
1542
1543 //      lwkt_serialize_exit(&sc->vtnet_slz);
1544
1545         if (more) {
1546                 sc->vtnet_stats.rx_task_rescheduled++;
1547                 goto next;
1548         }
1549 }
1550
1551 static int
1552 vtnet_rx_vq_intr(void *xsc)
1553 {
1554         struct vtnet_softc *sc;
1555
1556         sc = xsc;
1557
1558         vtnet_disable_rx_intr(sc);
1559         vtnet_rx_intr_task(sc);
1560
1561         return (1);
1562 }
1563
1564 static void
1565 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1566 {
1567         bzero(txhdr, sizeof(*txhdr));
1568         SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1569 }
1570
1571 static void
1572 vtnet_txeof(struct vtnet_softc *sc)
1573 {
1574         struct virtqueue *vq;
1575         struct ifnet *ifp;
1576         struct vtnet_tx_header *txhdr;
1577         int deq;
1578
1579         vq = sc->vtnet_tx_vq;
1580         ifp = sc->vtnet_ifp;
1581         deq = 0;
1582
1583         ASSERT_SERIALIZED(&sc->vtnet_slz);
1584
1585         while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1586                 deq++;
1587                 ifp->if_opackets++;
1588                 m_freem(txhdr->vth_mbuf);
1589                 vtnet_enqueue_txhdr(sc, txhdr);
1590         }
1591
1592         if (deq > 0) {
1593                 ifq_clr_oactive(&ifp->if_snd);
1594                 if (virtqueue_empty(vq))
1595                         sc->vtnet_watchdog_timer = 0;
1596         }
1597 }
1598
1599 static struct mbuf *
1600 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1601     struct virtio_net_hdr *hdr)
1602 {
1603         struct ifnet *ifp;
1604         struct ether_header *eh;
1605         struct ether_vlan_header *evh;
1606         struct ip *ip;
1607         struct ip6_hdr *ip6;
1608         struct tcphdr *tcp;
1609         int ip_offset;
1610         uint16_t eth_type, csum_start;
1611         uint8_t ip_proto, gso_type;
1612
1613         ifp = sc->vtnet_ifp;
1614         M_ASSERTPKTHDR(m);
1615
1616         ip_offset = sizeof(struct ether_header);
1617         if (m->m_len < ip_offset) {
1618                 if ((m = m_pullup(m, ip_offset)) == NULL)
1619                         return (NULL);
1620         }
1621
1622         eh = mtod(m, struct ether_header *);
1623         eth_type = ntohs(eh->ether_type);
1624         if (eth_type == ETHERTYPE_VLAN) {
1625                 ip_offset = sizeof(struct ether_vlan_header);
1626                 if (m->m_len < ip_offset) {
1627                         if ((m = m_pullup(m, ip_offset)) == NULL)
1628                                 return (NULL);
1629                 }
1630                 evh = mtod(m, struct ether_vlan_header *);
1631                 eth_type = ntohs(evh->evl_proto);
1632         }
1633
1634         switch (eth_type) {
1635         case ETHERTYPE_IP:
1636                 if (m->m_len < ip_offset + sizeof(struct ip)) {
1637                         m = m_pullup(m, ip_offset + sizeof(struct ip));
1638                         if (m == NULL)
1639                                 return (NULL);
1640                 }
1641
1642                 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1643                 ip_proto = ip->ip_p;
1644                 csum_start = ip_offset + (ip->ip_hl << 2);
1645                 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1646                 break;
1647
1648         case ETHERTYPE_IPV6:
1649                 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1650                         m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1651                         if (m == NULL)
1652                                 return (NULL);
1653                 }
1654
1655                 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1656                 /*
1657                  * XXX Assume no extension headers are present. Presently,
1658                  * this will always be true in the case of TSO, and FreeBSD
1659                  * does not perform checksum offloading of IPv6 yet.
1660                  */
1661                 ip_proto = ip6->ip6_nxt;
1662                 csum_start = ip_offset + sizeof(struct ip6_hdr);
1663                 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1664                 break;
1665
1666         default:
1667                 return (m);
1668         }
1669
1670         if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1671                 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1672                 hdr->csum_start = csum_start;
1673                 hdr->csum_offset = m->m_pkthdr.csum_data;
1674
1675                 sc->vtnet_stats.tx_csum_offloaded++;
1676         }
1677
1678         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1679                 if (ip_proto != IPPROTO_TCP)
1680                         return (m);
1681
1682                 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1683                         m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1684                         if (m == NULL)
1685                                 return (NULL);
1686                 }
1687
1688                 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1689                 hdr->gso_type = gso_type;
1690                 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1691                 hdr->gso_size = m->m_pkthdr.tso_segsz;
1692
1693                 if (tcp->th_flags & TH_CWR) {
1694                         /*
1695                          * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1696                          * ECN support is only configurable globally with the
1697                          * net.inet.tcp.ecn.enable sysctl knob.
1698                          */
1699                         if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1700                                 if_printf(ifp, "TSO with ECN not supported "
1701                                     "by host\n");
1702                                 m_freem(m);
1703                                 return (NULL);
1704                         }
1705
1706                         hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1707                 }
1708
1709                 sc->vtnet_stats.tx_tso_offloaded++;
1710         }
1711
1712         return (m);
1713 }
1714
1715 static int
1716 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1717     struct vtnet_tx_header *txhdr)
1718 {
1719         struct sglist sg;
1720         struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1721         struct virtqueue *vq;
1722         struct mbuf *m;
1723         int error;
1724
1725         vq = sc->vtnet_tx_vq;
1726         m = *m_head;
1727
1728         sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1729         error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1730         KASSERT(error == 0 && sg.sg_nseg == 1,
1731             ("%s: error %d adding header to sglist", __func__, error));
1732
1733         error = sglist_append_mbuf(&sg, m);
1734         if (error) {
1735                 m = m_defrag(m, M_NOWAIT);
1736                 if (m == NULL)
1737                         goto fail;
1738
1739                 *m_head = m;
1740                 sc->vtnet_stats.tx_defragged++;
1741
1742                 error = sglist_append_mbuf(&sg, m);
1743                 if (error)
1744                         goto fail;
1745         }
1746
1747         txhdr->vth_mbuf = m;
1748         error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
1749
1750         return (error);
1751
1752 fail:
1753         sc->vtnet_stats.tx_defrag_failed++;
1754         m_freem(*m_head);
1755         *m_head = NULL;
1756
1757         return (ENOBUFS);
1758 }
1759
1760 static struct mbuf *
1761 vtnet_vlan_tag_insert(struct mbuf *m)
1762 {
1763         struct mbuf *n;
1764         struct ether_vlan_header *evl;
1765
1766         if (M_WRITABLE(m) == 0) {
1767                 n = m_dup(m, M_NOWAIT);
1768                 m_freem(m);
1769                 if ((m = n) == NULL)
1770                         return (NULL);
1771         }
1772
1773         M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1774         if (m == NULL)
1775                 return (NULL);
1776         if (m->m_len < sizeof(struct ether_vlan_header)) {
1777                 m = m_pullup(m, sizeof(struct ether_vlan_header));
1778                 if (m == NULL)
1779                         return (NULL);
1780         }
1781
1782         /* Insert 802.1Q header into the existing Ethernet header. */
1783         evl = mtod(m, struct ether_vlan_header *);
1784         bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1785               (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1786         evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1787         evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1788         m->m_flags &= ~M_VLANTAG;
1789
1790         return (m);
1791 }
1792
1793 static int
1794 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1795 {
1796         struct vtnet_tx_header *txhdr;
1797         struct virtio_net_hdr *hdr;
1798         struct mbuf *m;
1799         int error;
1800
1801         txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
1802         if (txhdr == NULL)
1803                 return (ENOBUFS);
1804         SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
1805
1806         /*
1807          * Always use the non-mergeable header to simplify things. When
1808          * the mergeable feature is negotiated, the num_buffers field
1809          * must be set to zero. We use vtnet_hdr_size later to enqueue
1810          * the correct header size to the host.
1811          */
1812         hdr = &txhdr->vth_uhdr.hdr;
1813         m = *m_head;
1814
1815         error = ENOBUFS;
1816
1817         if (m->m_flags & M_VLANTAG) {
1818                 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1819                 m = vtnet_vlan_tag_insert(m);
1820                 if ((*m_head = m) == NULL)
1821                         goto fail;
1822                 m->m_flags &= ~M_VLANTAG;
1823         }
1824
1825         if (m->m_pkthdr.csum_flags != 0) {
1826                 m = vtnet_tx_offload(sc, m, hdr);
1827                 if ((*m_head = m) == NULL)
1828                         goto fail;
1829         }
1830
1831         error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1832 fail:
1833         if (error != 0)
1834                 vtnet_enqueue_txhdr(sc, txhdr);
1835         return (error);
1836 }
1837
1838 static void
1839 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1840 {
1841         struct vtnet_softc *sc;
1842
1843         sc = ifp->if_softc;
1844
1845         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1846         lwkt_serialize_enter(&sc->vtnet_slz);
1847         vtnet_start_locked(ifp, ifsq);
1848         lwkt_serialize_exit(&sc->vtnet_slz);
1849 }
1850
1851 static void
1852 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1853 {
1854         struct vtnet_softc *sc;
1855         struct virtqueue *vq;
1856         struct mbuf *m0;
1857         int enq;
1858
1859         sc = ifp->if_softc;
1860         vq = sc->vtnet_tx_vq;
1861         enq = 0;
1862
1863         ASSERT_SERIALIZED(&sc->vtnet_slz);
1864
1865         if ((ifp->if_flags & (IFF_RUNNING)) !=
1866             IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1867                 return;
1868
1869 #ifdef VTNET_TX_INTR_MODERATION
1870         if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1871                 vtnet_txeof(sc);
1872 #endif
1873
1874         while (!ifsq_is_empty(ifsq)) {
1875                 if (virtqueue_full(vq)) {
1876                         ifq_set_oactive(&ifp->if_snd);
1877                         break;
1878                 }
1879
1880                 m0 = ifq_dequeue(&ifp->if_snd);
1881                 if (m0 == NULL)
1882                         break;
1883
1884                 if (vtnet_encap(sc, &m0) != 0) {
1885                         if (m0 == NULL)
1886                                 break;
1887                         ifq_prepend(&ifp->if_snd, m0);
1888                         ifq_set_oactive(&ifp->if_snd);
1889                         break;
1890                 }
1891
1892                 enq++;
1893                 ETHER_BPF_MTAP(ifp, m0);
1894         }
1895
1896         if (enq > 0) {
1897                 virtqueue_notify(vq, &sc->vtnet_slz);
1898                 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
1899         }
1900 }
1901
1902 static void
1903 vtnet_tick(void *xsc)
1904 {
1905         struct vtnet_softc *sc;
1906
1907         sc = xsc;
1908
1909 #if 0
1910         ASSERT_SERIALIZED(&sc->vtnet_slz);
1911 #ifdef VTNET_DEBUG
1912         virtqueue_dump(sc->vtnet_rx_vq);
1913         virtqueue_dump(sc->vtnet_tx_vq);
1914 #endif
1915
1916         vtnet_watchdog(sc);
1917         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
1918 #endif
1919 }
1920
1921 static void
1922 vtnet_tx_intr_task(void *arg)
1923 {
1924         struct vtnet_softc *sc;
1925         struct ifnet *ifp;
1926         struct ifaltq_subque *ifsq;
1927
1928         sc = arg;
1929         ifp = sc->vtnet_ifp;
1930         ifsq = ifq_get_subq_default(&ifp->if_snd);
1931
1932 next:
1933 //      lwkt_serialize_enter(&sc->vtnet_slz);
1934
1935         if ((ifp->if_flags & IFF_RUNNING) == 0) {
1936                 vtnet_enable_tx_intr(sc);
1937 //              lwkt_serialize_exit(&sc->vtnet_slz);
1938                 return;
1939         }
1940
1941         vtnet_txeof(sc);
1942
1943         if (!ifsq_is_empty(ifsq))
1944                 vtnet_start_locked(ifp, ifsq);
1945
1946         if (vtnet_enable_tx_intr(sc) != 0) {
1947                 vtnet_disable_tx_intr(sc);
1948                 sc->vtnet_stats.tx_task_rescheduled++;
1949 //              lwkt_serialize_exit(&sc->vtnet_slz);
1950                 goto next;
1951         }
1952
1953 //      lwkt_serialize_exit(&sc->vtnet_slz);
1954 }
1955
1956 static int
1957 vtnet_tx_vq_intr(void *xsc)
1958 {
1959         struct vtnet_softc *sc;
1960
1961         sc = xsc;
1962
1963         vtnet_disable_tx_intr(sc);
1964         vtnet_tx_intr_task(sc);
1965
1966         return (1);
1967 }
1968
1969 static void
1970 vtnet_stop(struct vtnet_softc *sc)
1971 {
1972         device_t dev;
1973         struct ifnet *ifp;
1974
1975         dev = sc->vtnet_dev;
1976         ifp = sc->vtnet_ifp;
1977
1978         ASSERT_SERIALIZED(&sc->vtnet_slz);
1979
1980         sc->vtnet_watchdog_timer = 0;
1981         callout_stop(&sc->vtnet_tick_ch);
1982         ifq_clr_oactive(&ifp->if_snd);
1983         ifp->if_flags &= ~(IFF_RUNNING);
1984
1985         vtnet_disable_rx_intr(sc);
1986         vtnet_disable_tx_intr(sc);
1987
1988         /*
1989          * Stop the host VirtIO adapter. Note this will reset the host
1990          * adapter's state back to the pre-initialized state, so in
1991          * order to make the device usable again, we must drive it
1992          * through virtio_reinit() and virtio_reinit_complete().
1993          */
1994         virtio_stop(dev);
1995
1996         sc->vtnet_flags &= ~VTNET_FLAG_LINK;
1997
1998         vtnet_free_rx_mbufs(sc);
1999         vtnet_free_tx_mbufs(sc);
2000 }
2001
2002 static int
2003 vtnet_virtio_reinit(struct vtnet_softc *sc)
2004 {
2005         device_t dev;
2006         struct ifnet *ifp;
2007         uint64_t features;
2008         int error;
2009
2010         dev = sc->vtnet_dev;
2011         ifp = sc->vtnet_ifp;
2012         features = sc->vtnet_features;
2013
2014         /*
2015          * Re-negotiate with the host, removing any disabled receive
2016          * features. Transmit features are disabled only on our side
2017          * via if_capenable and if_hwassist.
2018          */
2019
2020         if (ifp->if_capabilities & IFCAP_RXCSUM) {
2021                 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2022                         features &= ~VIRTIO_NET_F_GUEST_CSUM;
2023         }
2024
2025         if (ifp->if_capabilities & IFCAP_LRO) {
2026                 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2027                         features &= ~VTNET_LRO_FEATURES;
2028         }
2029
2030         if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2031                 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2032                         features &= ~VIRTIO_NET_F_CTRL_VLAN;
2033         }
2034
2035         error = virtio_reinit(dev, features);
2036         if (error)
2037                 device_printf(dev, "virtio reinit error %d\n", error);
2038
2039         return (error);
2040 }
2041
2042 static void
2043 vtnet_init_locked(struct vtnet_softc *sc)
2044 {
2045         device_t dev;
2046         struct ifnet *ifp;
2047         int error;
2048
2049         dev = sc->vtnet_dev;
2050         ifp = sc->vtnet_ifp;
2051
2052         ASSERT_SERIALIZED(&sc->vtnet_slz);
2053
2054         if (ifp->if_flags & IFF_RUNNING)
2055                 return;
2056
2057         /* Stop host's adapter, cancel any pending I/O. */
2058         vtnet_stop(sc);
2059
2060         /* Reinitialize the host device. */
2061         error = vtnet_virtio_reinit(sc);
2062         if (error) {
2063                 device_printf(dev,
2064                     "reinitialization failed, stopping device...\n");
2065                 vtnet_stop(sc);
2066                 return;
2067         }
2068
2069         /* Update host with assigned MAC address. */
2070         bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2071         vtnet_set_hwaddr(sc);
2072
2073         ifp->if_hwassist = 0;
2074         if (ifp->if_capenable & IFCAP_TXCSUM)
2075                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2076         if (ifp->if_capenable & IFCAP_TSO4)
2077                 ifp->if_hwassist |= CSUM_TSO;
2078
2079         error = vtnet_init_rx_vq(sc);
2080         if (error) {
2081                 device_printf(dev,
2082                     "cannot allocate mbufs for Rx virtqueue\n");
2083                 vtnet_stop(sc);
2084                 return;
2085         }
2086
2087         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2088                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2089                         /* Restore promiscuous and all-multicast modes. */
2090                         vtnet_rx_filter(sc);
2091
2092                         /* Restore filtered MAC addresses. */
2093                         vtnet_rx_filter_mac(sc);
2094                 }
2095
2096                 /* Restore VLAN filters. */
2097                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2098                         vtnet_rx_filter_vlan(sc);
2099         }
2100
2101         {
2102                 vtnet_enable_rx_intr(sc);
2103                 vtnet_enable_tx_intr(sc);
2104         }
2105
2106         ifp->if_flags |= IFF_RUNNING;
2107         ifq_clr_oactive(&ifp->if_snd);
2108
2109         virtio_reinit_complete(dev);
2110
2111         vtnet_update_link_status(sc);
2112         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2113 }
2114
2115 static void
2116 vtnet_init(void *xsc)
2117 {
2118         struct vtnet_softc *sc;
2119
2120         sc = xsc;
2121
2122         lwkt_serialize_enter(&sc->vtnet_slz);
2123         vtnet_init_locked(sc);
2124         lwkt_serialize_exit(&sc->vtnet_slz);
2125 }
2126
2127 static void
2128 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2129     struct sglist *sg, int readable, int writable)
2130 {
2131         struct virtqueue *vq;
2132         void *c;
2133
2134         vq = sc->vtnet_ctrl_vq;
2135
2136         ASSERT_SERIALIZED(&sc->vtnet_slz);
2137         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2138             ("no control virtqueue"));
2139         KASSERT(virtqueue_empty(vq),
2140             ("control command already enqueued"));
2141
2142         if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2143                 return;
2144
2145         virtqueue_notify(vq, &sc->vtnet_slz);
2146
2147         /*
2148          * Poll until the command is complete. Previously, we would
2149          * sleep until the control virtqueue interrupt handler woke
2150          * us up, but dropping the VTNET_MTX leads to serialization
2151          * difficulties.
2152          *
2153          * Furthermore, it appears QEMU/KVM only allocates three MSIX
2154          * vectors. Two of those vectors are needed for the Rx and Tx
2155          * virtqueues. We do not support sharing both a Vq and config
2156          * changed notification on the same MSIX vector.
2157          */
2158         c = virtqueue_poll(vq, NULL);
2159         KASSERT(c == cookie, ("unexpected control command response"));
2160 }
2161
2162 static int
2163 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2164 {
2165         struct {
2166                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2167                 uint8_t pad1;
2168                 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2169                 uint8_t pad2;
2170                 uint8_t ack;
2171         } s;
2172         struct sglist_seg segs[3];
2173         struct sglist sg;
2174         int error;
2175
2176         s.hdr.class = VIRTIO_NET_CTRL_MAC;
2177         s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2178         s.ack = VIRTIO_NET_ERR;
2179
2180         /* Copy the mac address into physically contiguous memory */
2181         memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2182
2183         sglist_init(&sg, 3, segs);
2184         error = 0;
2185         error |= sglist_append(&sg, &s.hdr,
2186             sizeof(struct virtio_net_ctrl_hdr));
2187         error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2188         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2189         KASSERT(error == 0 && sg.sg_nseg == 3,
2190             ("%s: error %d adding set MAC msg to sglist", __func__, error));
2191
2192         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2193
2194         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2195 }
2196
2197 static void
2198 vtnet_rx_filter(struct vtnet_softc *sc)
2199 {
2200         device_t dev;
2201         struct ifnet *ifp;
2202
2203         dev = sc->vtnet_dev;
2204         ifp = sc->vtnet_ifp;
2205
2206         ASSERT_SERIALIZED(&sc->vtnet_slz);
2207         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2208             ("CTRL_RX feature not negotiated"));
2209
2210         if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2211                 device_printf(dev, "cannot %s promiscuous mode\n",
2212                     (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2213
2214         if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2215                 device_printf(dev, "cannot %s all-multicast mode\n",
2216                     (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2217 }
2218
2219 static int
2220 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2221 {
2222         struct sglist_seg segs[3];
2223         struct sglist sg;
2224         struct {
2225                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2226                 uint8_t pad1;
2227                 uint8_t onoff;
2228                 uint8_t pad2;
2229                 uint8_t ack;
2230         } s;
2231         int error;
2232
2233         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2234             ("%s: CTRL_RX feature not negotiated", __func__));
2235
2236         s.hdr.class = VIRTIO_NET_CTRL_RX;
2237         s.hdr.cmd = cmd;
2238         s.onoff = !!on;
2239         s.ack = VIRTIO_NET_ERR;
2240
2241         sglist_init(&sg, 3, segs);
2242         error = 0;
2243         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2244         error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2245         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2246         KASSERT(error == 0 && sg.sg_nseg == 3,
2247             ("%s: error %d adding Rx message to sglist", __func__, error));
2248
2249         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2250
2251         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2252 }
2253
2254 static int
2255 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2256 {
2257
2258         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2259 }
2260
2261 static int
2262 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2263 {
2264
2265         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2266 }
2267
2268 static void
2269 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2270 {
2271         struct virtio_net_ctrl_hdr hdr __aligned(2);
2272         struct vtnet_mac_filter *filter;
2273         struct sglist_seg segs[4];
2274         struct sglist sg;
2275         struct ifnet *ifp;
2276         struct ifaddr *ifa;
2277         struct ifaddr_container *ifac;
2278         struct ifmultiaddr *ifma;
2279         int ucnt, mcnt, promisc, allmulti, error;
2280         uint8_t ack;
2281
2282         ifp = sc->vtnet_ifp;
2283         ucnt = 0;
2284         mcnt = 0;
2285         promisc = 0;
2286         allmulti = 0;
2287
2288         ASSERT_SERIALIZED(&sc->vtnet_slz);
2289         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2290             ("%s: CTRL_RX feature not negotiated", __func__));
2291
2292         /* Use the MAC filtering table allocated in vtnet_attach. */
2293         filter = sc->vtnet_macfilter;
2294         memset(filter, 0, sizeof(struct vtnet_mac_filter));
2295
2296         /* Unicast MAC addresses: */
2297         //if_addr_rlock(ifp);
2298         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2299                 ifa = ifac->ifa;
2300                 if (ifa->ifa_addr->sa_family != AF_LINK)
2301                         continue;
2302                 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2303                     sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2304                         continue;
2305                 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2306                         promisc = 1;
2307                         break;
2308                 }
2309
2310                 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2311                     &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2312                 ucnt++;
2313         }
2314         //if_addr_runlock(ifp);
2315
2316         if (promisc != 0) {
2317                 filter->vmf_unicast.nentries = 0;
2318                 if_printf(ifp, "more than %d MAC addresses assigned, "
2319                     "falling back to promiscuous mode\n",
2320                     VTNET_MAX_MAC_ENTRIES);
2321         } else
2322                 filter->vmf_unicast.nentries = ucnt;
2323
2324         /* Multicast MAC addresses: */
2325         //if_maddr_rlock(ifp);
2326         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2327                 if (ifma->ifma_addr->sa_family != AF_LINK)
2328                         continue;
2329                 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2330                         allmulti = 1;
2331                         break;
2332                 }
2333
2334                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2335                     &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2336                 mcnt++;
2337         }
2338         //if_maddr_runlock(ifp);
2339
2340         if (allmulti != 0) {
2341                 filter->vmf_multicast.nentries = 0;
2342                 if_printf(ifp, "more than %d multicast MAC addresses "
2343                     "assigned, falling back to all-multicast mode\n",
2344                     VTNET_MAX_MAC_ENTRIES);
2345         } else
2346                 filter->vmf_multicast.nentries = mcnt;
2347
2348         if (promisc != 0 && allmulti != 0)
2349                 goto out;
2350
2351         hdr.class = VIRTIO_NET_CTRL_MAC;
2352         hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2353         ack = VIRTIO_NET_ERR;
2354
2355         sglist_init(&sg, 4, segs);
2356         error = 0;
2357         error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2358         error |= sglist_append(&sg, &filter->vmf_unicast,
2359             sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2360         error |= sglist_append(&sg, &filter->vmf_multicast,
2361             sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2362         error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2363         KASSERT(error == 0 && sg.sg_nseg == 4,
2364             ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2365
2366         vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2367
2368         if (ack != VIRTIO_NET_OK)
2369                 if_printf(ifp, "error setting host MAC filter table\n");
2370
2371 out:
2372         if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2373                 if_printf(ifp, "cannot enable promiscuous mode\n");
2374         if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2375                 if_printf(ifp, "cannot enable all-multicast mode\n");
2376 }
2377
2378 static int
2379 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2380 {
2381         struct sglist_seg segs[3];
2382         struct sglist sg;
2383         struct {
2384                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2385                 uint8_t pad1;
2386                 uint16_t tag;
2387                 uint8_t pad2;
2388                 uint8_t ack;
2389         } s;
2390         int error;
2391
2392         s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2393         s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2394         s.tag = tag;
2395         s.ack = VIRTIO_NET_ERR;
2396
2397         sglist_init(&sg, 3, segs);
2398         error = 0;
2399         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2400         error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2401         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2402         KASSERT(error == 0 && sg.sg_nseg == 3,
2403             ("%s: error %d adding VLAN message to sglist", __func__, error));
2404
2405         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2406
2407         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2408 }
2409
2410 static void
2411 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2412 {
2413         uint32_t w;
2414         uint16_t tag;
2415         int i, bit, nvlans;
2416
2417         ASSERT_SERIALIZED(&sc->vtnet_slz);
2418         KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2419             ("%s: VLAN_FILTER feature not negotiated", __func__));
2420
2421         nvlans = sc->vtnet_nvlans;
2422
2423         /* Enable the filter for each configured VLAN. */
2424         for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2425                 w = sc->vtnet_vlan_shadow[i];
2426                 while ((bit = ffs(w) - 1) != -1) {
2427                         w &= ~(1 << bit);
2428                         tag = sizeof(w) * CHAR_BIT * i + bit;
2429                         nvlans--;
2430
2431                         if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2432                                 device_printf(sc->vtnet_dev,
2433                                     "cannot enable VLAN %d filter\n", tag);
2434                         }
2435                 }
2436         }
2437
2438         KASSERT(nvlans == 0, ("VLAN count incorrect"));
2439 }
2440
2441 static void
2442 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2443 {
2444         struct ifnet *ifp;
2445         int idx, bit;
2446
2447         ifp = sc->vtnet_ifp;
2448         idx = (tag >> 5) & 0x7F;
2449         bit = tag & 0x1F;
2450
2451         if (tag == 0 || tag > 4095)
2452                 return;
2453
2454         lwkt_serialize_enter(&sc->vtnet_slz);
2455
2456         /* Update shadow VLAN table. */
2457         if (add) {
2458                 sc->vtnet_nvlans++;
2459                 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2460         } else {
2461                 sc->vtnet_nvlans--;
2462                 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2463         }
2464
2465         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2466             vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2467                 device_printf(sc->vtnet_dev,
2468                     "cannot %s VLAN %d %s the host filter table\n",
2469                     add ? "add" : "remove", tag, add ? "to" : "from");
2470         }
2471
2472         lwkt_serialize_exit(&sc->vtnet_slz);
2473 }
2474
2475 static void
2476 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2477 {
2478
2479         if (ifp->if_softc != arg)
2480                 return;
2481
2482         vtnet_update_vlan_filter(arg, 1, tag);
2483 }
2484
2485 static void
2486 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2487 {
2488
2489         if (ifp->if_softc != arg)
2490                 return;
2491
2492         vtnet_update_vlan_filter(arg, 0, tag);
2493 }
2494
2495 static int
2496 vtnet_ifmedia_upd(struct ifnet *ifp)
2497 {
2498         struct vtnet_softc *sc;
2499         struct ifmedia *ifm;
2500
2501         sc = ifp->if_softc;
2502         ifm = &sc->vtnet_media;
2503
2504         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2505                 return (EINVAL);
2506
2507         return (0);
2508 }
2509
2510 static void
2511 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2512 {
2513         struct vtnet_softc *sc;
2514
2515         sc = ifp->if_softc;
2516
2517         ifmr->ifm_status = IFM_AVALID;
2518         ifmr->ifm_active = IFM_ETHER;
2519
2520         lwkt_serialize_enter(&sc->vtnet_slz);
2521         if (vtnet_is_link_up(sc) != 0) {
2522                 ifmr->ifm_status |= IFM_ACTIVE;
2523                 ifmr->ifm_active |= VTNET_MEDIATYPE;
2524         } else
2525                 ifmr->ifm_active |= IFM_NONE;
2526         lwkt_serialize_exit(&sc->vtnet_slz);
2527 }
2528
2529 static void
2530 vtnet_add_statistics(struct vtnet_softc *sc)
2531 {
2532         device_t dev;
2533         struct vtnet_statistics *stats;
2534         struct sysctl_ctx_list *ctx;
2535         struct sysctl_oid *tree;
2536         struct sysctl_oid_list *child;
2537
2538         dev = sc->vtnet_dev;
2539         stats = &sc->vtnet_stats;
2540         ctx = device_get_sysctl_ctx(dev);
2541         tree = device_get_sysctl_tree(dev);
2542         child = SYSCTL_CHILDREN(tree);
2543
2544         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2545             CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2546             "Mbuf cluster allocation failures");
2547
2548         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2549             CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2550             "Received frame larger than the mbuf chain");
2551         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2552             CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2553             "Enqueuing the replacement receive mbuf failed");
2554         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2555             CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2556             "Mergeable buffers receive failures");
2557         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2558             CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2559             "Received checksum offloaded buffer with unsupported "
2560             "Ethernet type");
2561         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2562             CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2563             "Received checksum offloaded buffer with incorrect IP protocol");
2564         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2565             CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2566             "Received checksum offloaded buffer with incorrect offset");
2567         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2568             CTLFLAG_RD, &stats->rx_csum_failed, 0,
2569             "Received buffer checksum offload failed");
2570         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2571             CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2572             "Received buffer checksum offload succeeded");
2573         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2574             CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2575             "Times the receive interrupt task rescheduled itself");
2576
2577         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2578             CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2579             "Aborted transmit of checksum offloaded buffer with unknown "
2580             "Ethernet type");
2581         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2582             CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2583             "Aborted transmit of TSO buffer with unknown Ethernet type");
2584         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2585             CTLFLAG_RD, &stats->tx_defragged, 0,
2586             "Transmit mbufs defragged");
2587         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2588             CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2589             "Aborted transmit of buffer because defrag failed");
2590         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2591             CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2592             "Offloaded checksum of transmitted buffer");
2593         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2594             CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2595             "Segmentation offload of transmitted buffer");
2596         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2597             CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2598             "Times the transmit interrupt task rescheduled itself");
2599 }
2600
2601 static int
2602 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2603 {
2604
2605         return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2606 }
2607
2608 static void
2609 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2610 {
2611
2612         virtqueue_disable_intr(sc->vtnet_rx_vq);
2613 }
2614
2615 static int
2616 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2617 {
2618
2619 #ifdef VTNET_TX_INTR_MODERATION
2620         return (0);
2621 #else
2622         return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2623 #endif
2624 }
2625
2626 static void
2627 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2628 {
2629
2630         virtqueue_disable_intr(sc->vtnet_tx_vq);
2631 }