virtio - Get rid of nop {vtblk/vtnet}_modevent methods.
[dragonfly.git] / sys / dev / virtual / virtio / net / if_vtnet.c
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 /* Driver for VirtIO network devices. */
28
29 #include <sys/cdefs.h>
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
35 #include <sys/mbuf.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
44 #include <sys/bus.h>
45 #include <sys/rman.h>
46
47 #include <machine/limits.h>
48
49 #include <net/ethernet.h>
50 #include <net/if.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
58
59 #include <net/bpf.h>
60
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
67
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
72
73 #include "virtio_if.h"
74
75 MALLOC_DEFINE(M_VTNET, "VTNET_TX", "Outgoing VTNET TX frame header");
76
77 static int      vtnet_probe(device_t);
78 static int      vtnet_attach(device_t);
79 static int      vtnet_detach(device_t);
80 static int      vtnet_suspend(device_t);
81 static int      vtnet_resume(device_t);
82 static int      vtnet_shutdown(device_t);
83 static int      vtnet_config_change(device_t);
84
85 static void     vtnet_negotiate_features(struct vtnet_softc *);
86 static int      vtnet_alloc_virtqueues(struct vtnet_softc *);
87 static void     vtnet_get_hwaddr(struct vtnet_softc *);
88 static void     vtnet_set_hwaddr(struct vtnet_softc *);
89 static int      vtnet_is_link_up(struct vtnet_softc *);
90 static void     vtnet_update_link_status(struct vtnet_softc *);
91 #if 0
92 static void     vtnet_watchdog(struct vtnet_softc *);
93 #endif
94 static void     vtnet_config_change_task(void *, int);
95 static int      vtnet_setup_interface(struct vtnet_softc *);
96 static int      vtnet_change_mtu(struct vtnet_softc *, int);
97 static int      vtnet_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *);
98
99 static int      vtnet_init_rx_vq(struct vtnet_softc *);
100 static void     vtnet_free_rx_mbufs(struct vtnet_softc *);
101 static void     vtnet_free_tx_mbufs(struct vtnet_softc *);
102 static void     vtnet_free_ctrl_vq(struct vtnet_softc *);
103
104 static struct mbuf * vtnet_alloc_rxbuf(struct vtnet_softc *, int,
105                     struct mbuf **);
106 static int      vtnet_replace_rxbuf(struct vtnet_softc *,
107                     struct mbuf *, int);
108 static int      vtnet_newbuf(struct vtnet_softc *);
109 static void     vtnet_discard_merged_rxbuf(struct vtnet_softc *, int);
110 static void     vtnet_discard_rxbuf(struct vtnet_softc *, struct mbuf *);
111 static int      vtnet_enqueue_rxbuf(struct vtnet_softc *, struct mbuf *);
112 static void     vtnet_vlan_tag_remove(struct mbuf *);
113 static int      vtnet_rx_csum(struct vtnet_softc *, struct mbuf *,
114                     struct virtio_net_hdr *);
115 static int      vtnet_rxeof_merged(struct vtnet_softc *, struct mbuf *, int);
116 static int      vtnet_rxeof(struct vtnet_softc *, int, int *);
117 static void     vtnet_rx_intr_task(void *);
118 static int      vtnet_rx_vq_intr(void *);
119
120 static void     vtnet_enqueue_txhdr(struct vtnet_softc *,
121                     struct vtnet_tx_header *);
122 static void     vtnet_txeof(struct vtnet_softc *);
123 static struct mbuf * vtnet_tx_offload(struct vtnet_softc *, struct mbuf *,
124                     struct virtio_net_hdr *);
125 static int      vtnet_enqueue_txbuf(struct vtnet_softc *, struct mbuf **,
126                     struct vtnet_tx_header *);
127 static int      vtnet_encap(struct vtnet_softc *, struct mbuf **);
128 static void     vtnet_start_locked(struct ifnet *, struct ifaltq_subque *);
129 static void     vtnet_start(struct ifnet *, struct ifaltq_subque *);
130 static void     vtnet_tick(void *);
131 static void     vtnet_tx_intr_task(void *);
132 static int      vtnet_tx_vq_intr(void *);
133
134 static void     vtnet_stop(struct vtnet_softc *);
135 static int      vtnet_virtio_reinit(struct vtnet_softc *);
136 static void     vtnet_init_locked(struct vtnet_softc *);
137 static void     vtnet_init(void *);
138
139 static void     vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *,
140                     struct sglist *, int, int);
141
142 static int      vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *);
143 static int      vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int);
144 static int      vtnet_set_promisc(struct vtnet_softc *, int);
145 static int      vtnet_set_allmulti(struct vtnet_softc *, int);
146 static void     vtnet_rx_filter(struct vtnet_softc *sc);
147 static void     vtnet_rx_filter_mac(struct vtnet_softc *);
148
149 static int      vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t);
150 static void     vtnet_rx_filter_vlan(struct vtnet_softc *);
151 static void     vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t);
152 static void     vtnet_register_vlan(void *, struct ifnet *, uint16_t);
153 static void     vtnet_unregister_vlan(void *, struct ifnet *, uint16_t);
154
155 static int      vtnet_ifmedia_upd(struct ifnet *);
156 static void     vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157
158 static void     vtnet_add_statistics(struct vtnet_softc *);
159
160 static int      vtnet_enable_rx_intr(struct vtnet_softc *);
161 static int      vtnet_enable_tx_intr(struct vtnet_softc *);
162 static void     vtnet_disable_rx_intr(struct vtnet_softc *);
163 static void     vtnet_disable_tx_intr(struct vtnet_softc *);
164
165 /* Tunables. */
166 static int vtnet_csum_disable = 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
168 static int vtnet_tso_disable = 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
170 static int vtnet_lro_disable = 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
172
173 /*
174  * Reducing the number of transmit completed interrupts can
175  * improve performance. To do so, the define below keeps the
176  * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177  * in the start and watchdog paths. The price to pay for this
178  * is the m_free'ing of transmitted mbufs may be delayed until
179  * the watchdog fires.
180  */
181 #define VTNET_TX_INTR_MODERATION
182
183 static struct virtio_feature_desc vtnet_feature_desc[] = {
184         { VIRTIO_NET_F_CSUM,            "TxChecksum"    },
185         { VIRTIO_NET_F_GUEST_CSUM,      "RxChecksum"    },
186         { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "DynOffload"        },
187         { VIRTIO_NET_F_MAC,             "MacAddress"    },
188         { VIRTIO_NET_F_GSO,             "TxAllGSO"      },
189         { VIRTIO_NET_F_GUEST_TSO4,      "RxTSOv4"       },
190         { VIRTIO_NET_F_GUEST_TSO6,      "RxTSOv6"       },
191         { VIRTIO_NET_F_GUEST_ECN,       "RxECN"         },
192         { VIRTIO_NET_F_GUEST_UFO,       "RxUFO"         },
193         { VIRTIO_NET_F_HOST_TSO4,       "TxTSOv4"       },
194         { VIRTIO_NET_F_HOST_TSO6,       "TxTSOv6"       },
195         { VIRTIO_NET_F_HOST_ECN,        "TxTSOECN"      },
196         { VIRTIO_NET_F_HOST_UFO,        "TxUFO"         },
197         { VIRTIO_NET_F_MRG_RXBUF,       "MrgRxBuf"      },
198         { VIRTIO_NET_F_STATUS,          "Status"        },
199         { VIRTIO_NET_F_CTRL_VQ,         "ControlVq"     },
200         { VIRTIO_NET_F_CTRL_RX,         "RxMode"        },
201         { VIRTIO_NET_F_CTRL_VLAN,       "VLanFilter"    },
202         { VIRTIO_NET_F_CTRL_RX_EXTRA,   "RxModeExtra"   },
203         { VIRTIO_NET_F_GUEST_ANNOUNCE,  "GuestAnnounce" },
204         { VIRTIO_NET_F_MQ,              "RFS"           },
205         { VIRTIO_NET_F_CTRL_MAC_ADDR,   "SetMacAddress" },
206         { 0, NULL }
207 };
208
209 static device_method_t vtnet_methods[] = {
210         /* Device methods. */
211         DEVMETHOD(device_probe,         vtnet_probe),
212         DEVMETHOD(device_attach,        vtnet_attach),
213         DEVMETHOD(device_detach,        vtnet_detach),
214         DEVMETHOD(device_suspend,       vtnet_suspend),
215         DEVMETHOD(device_resume,        vtnet_resume),
216         DEVMETHOD(device_shutdown,      vtnet_shutdown),
217
218         /* VirtIO methods. */
219         DEVMETHOD(virtio_config_change, vtnet_config_change),
220
221         DEVMETHOD_END
222 };
223
224 static driver_t vtnet_driver = {
225         "vtnet",
226         vtnet_methods,
227         sizeof(struct vtnet_softc)
228 };
229
230 static devclass_t vtnet_devclass;
231
232 DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, NULL, NULL);
233 MODULE_VERSION(vtnet, 1);
234 MODULE_DEPEND(vtnet, virtio, 1, 1, 1);
235
236 static int
237 vtnet_probe(device_t dev)
238 {
239         if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK)
240                 return (ENXIO);
241
242         device_set_desc(dev, "VirtIO Networking Adapter");
243
244         return (BUS_PROBE_DEFAULT);
245 }
246
247 static int
248 vtnet_attach(device_t dev)
249 {
250         struct vtnet_softc *sc;
251         int error;
252
253         sc = device_get_softc(dev);
254         sc->vtnet_dev = dev;
255
256         lwkt_serialize_init(&sc->vtnet_slz);
257         callout_init(&sc->vtnet_tick_ch);
258
259         ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd,
260                      vtnet_ifmedia_sts);
261         ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL);
262         ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE);
263
264         vtnet_add_statistics(sc);
265         SLIST_INIT(&sc->vtnet_txhdr_free);
266
267         /* Register our feature descriptions. */
268         virtio_set_feature_desc(dev, vtnet_feature_desc);
269         vtnet_negotiate_features(sc);
270
271         if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
272                 sc->vtnet_flags |= VTNET_FLAG_INDIRECT;
273
274         if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) {
275                 /* This feature should always be negotiated. */
276                 sc->vtnet_flags |= VTNET_FLAG_MAC;
277         }
278
279         if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) {
280                 sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS;
281                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
282         } else {
283                 sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
284         }
285
286         sc->vtnet_rx_mbuf_size = MCLBYTES;
287         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
288
289         if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) {
290                 sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ;
291
292                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
293                         sc->vtnet_flags |= VTNET_FLAG_CTRL_RX;
294                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN))
295                         sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER;
296                 if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
297                     virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX))
298                         sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC;
299         }
300
301         /* Read (or generate) the MAC address for the adapter. */
302         vtnet_get_hwaddr(sc);
303
304         error = vtnet_alloc_virtqueues(sc);
305         if (error) {
306                 device_printf(dev, "cannot allocate virtqueues\n");
307                 goto fail;
308         }
309
310         error = vtnet_setup_interface(sc);
311         if (error) {
312                 device_printf(dev, "cannot setup interface\n");
313                 goto fail;
314         }
315
316         TASK_INIT(&sc->vtnet_cfgchg_task, 0, vtnet_config_change_task, sc);
317
318         error = virtio_setup_intr(dev, &sc->vtnet_slz);
319         if (error) {
320                 device_printf(dev, "cannot setup virtqueue interrupts\n");
321                 ether_ifdetach(sc->vtnet_ifp);
322                 goto fail;
323         }
324
325         /*
326          * Device defaults to promiscuous mode for backwards
327          * compatibility. Turn it off if possible.
328          */
329         if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
330                 lwkt_serialize_enter(&sc->vtnet_slz);
331                 if (vtnet_set_promisc(sc, 0) != 0) {
332                         sc->vtnet_ifp->if_flags |= IFF_PROMISC;
333                         device_printf(dev,
334                             "cannot disable promiscuous mode\n");
335                 }
336                 lwkt_serialize_exit(&sc->vtnet_slz);
337         } else
338                 sc->vtnet_ifp->if_flags |= IFF_PROMISC;
339
340 fail:
341         if (error)
342                 vtnet_detach(dev);
343
344         return (error);
345 }
346
347 static int
348 vtnet_detach(device_t dev)
349 {
350         struct vtnet_softc *sc;
351         struct ifnet *ifp;
352
353         sc = device_get_softc(dev);
354         ifp = sc->vtnet_ifp;
355
356         if (device_is_attached(dev)) {
357                 lwkt_serialize_enter(&sc->vtnet_slz);
358                 vtnet_stop(sc);
359                 lwkt_serialize_exit(&sc->vtnet_slz);
360
361                 callout_stop(&sc->vtnet_tick_ch);
362                 taskqueue_drain(taskqueue_swi, &sc->vtnet_cfgchg_task);
363
364                 ether_ifdetach(ifp);
365         }
366
367         if (sc->vtnet_vlan_attach != NULL) {
368                 EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach);
369                 sc->vtnet_vlan_attach = NULL;
370         }
371         if (sc->vtnet_vlan_detach != NULL) {
372                 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach);
373                 sc->vtnet_vlan_detach = NULL;
374         }
375
376         if (ifp) {
377                 if_free(ifp);
378                 sc->vtnet_ifp = NULL;
379         }
380
381         if (sc->vtnet_rx_vq != NULL)
382                 vtnet_free_rx_mbufs(sc);
383         if (sc->vtnet_tx_vq != NULL)
384                 vtnet_free_tx_mbufs(sc);
385         if (sc->vtnet_ctrl_vq != NULL)
386                 vtnet_free_ctrl_vq(sc);
387
388         if (sc->vtnet_txhdrarea != NULL) {
389                 contigfree(sc->vtnet_txhdrarea,
390                     sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
391                     M_VTNET);
392                 sc->vtnet_txhdrarea = NULL;
393         }
394         SLIST_INIT(&sc->vtnet_txhdr_free);
395         if (sc->vtnet_macfilter != NULL) {
396                 contigfree(sc->vtnet_macfilter,
397                     sizeof(struct vtnet_mac_filter), M_DEVBUF);
398                 sc->vtnet_macfilter = NULL;
399         }
400
401         ifmedia_removeall(&sc->vtnet_media);
402
403         return (0);
404 }
405
406 static int
407 vtnet_suspend(device_t dev)
408 {
409         struct vtnet_softc *sc;
410
411         sc = device_get_softc(dev);
412
413         lwkt_serialize_enter(&sc->vtnet_slz);
414         vtnet_stop(sc);
415         sc->vtnet_flags |= VTNET_FLAG_SUSPENDED;
416         lwkt_serialize_exit(&sc->vtnet_slz);
417
418         return (0);
419 }
420
421 static int
422 vtnet_resume(device_t dev)
423 {
424         struct vtnet_softc *sc;
425         struct ifnet *ifp;
426
427         sc = device_get_softc(dev);
428         ifp = sc->vtnet_ifp;
429
430         lwkt_serialize_enter(&sc->vtnet_slz);
431         if (ifp->if_flags & IFF_UP)
432                 vtnet_init_locked(sc);
433         sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED;
434         lwkt_serialize_exit(&sc->vtnet_slz);
435
436         return (0);
437 }
438
439 static int
440 vtnet_shutdown(device_t dev)
441 {
442
443         /*
444          * Suspend already does all of what we need to
445          * do here; we just never expect to be resumed.
446          */
447         return (vtnet_suspend(dev));
448 }
449
450 static int
451 vtnet_config_change(device_t dev)
452 {
453         struct vtnet_softc *sc;
454
455         sc = device_get_softc(dev);
456
457         taskqueue_enqueue(taskqueue_thread[mycpuid], &sc->vtnet_cfgchg_task);
458
459         return (1);
460 }
461
462 static void
463 vtnet_negotiate_features(struct vtnet_softc *sc)
464 {
465         device_t dev;
466         uint64_t mask, features;
467
468         dev = sc->vtnet_dev;
469         mask = 0;
470
471         if (vtnet_csum_disable)
472                 mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
473
474         /*
475          * TSO and LRO are only available when their corresponding checksum
476          * offload feature is also negotiated.
477          */
478
479         if (vtnet_csum_disable || vtnet_tso_disable)
480                 mask |= VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
481                     VIRTIO_NET_F_HOST_ECN;
482
483         if (vtnet_csum_disable || vtnet_lro_disable)
484                 mask |= VTNET_LRO_FEATURES;
485
486         features = VTNET_FEATURES & ~mask;
487         features |= VIRTIO_F_NOTIFY_ON_EMPTY;
488         features |= VIRTIO_F_ANY_LAYOUT;
489         sc->vtnet_features = virtio_negotiate_features(dev, features);
490
491         if (virtio_with_feature(dev, VTNET_LRO_FEATURES) &&
492             virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) {
493                 /*
494                  * LRO without mergeable buffers requires special care. This
495                  * is not ideal because every receive buffer must be large
496                  * enough to hold the maximum TCP packet, the Ethernet header,
497                  * and the header. This requires up to 34 descriptors with
498                  * MCLBYTES clusters. If we do not have indirect descriptors,
499                  * LRO is disabled since the virtqueue will not contain very
500                  * many receive buffers.
501                  */
502                 if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) {
503                         device_printf(dev,
504                             "LRO disabled due to both mergeable buffers and "
505                             "indirect descriptors not negotiated\n");
506
507                         features &= ~VTNET_LRO_FEATURES;
508                         sc->vtnet_features =
509                             virtio_negotiate_features(dev, features);
510                 } else
511                         sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG;
512         }
513 }
514
515 static int
516 vtnet_alloc_virtqueues(struct vtnet_softc *sc)
517 {
518         device_t dev;
519         struct vq_alloc_info vq_info[3];
520         int nvqs;
521
522         dev = sc->vtnet_dev;
523         nvqs = 2;
524
525         /*
526          * Indirect descriptors are not needed for the Rx
527          * virtqueue when mergeable buffers are negotiated.
528          * The header is placed inline with the data, not
529          * in a separate descriptor, and mbuf clusters are
530          * always physically contiguous.
531          */
532         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
533                 sc->vtnet_rx_nsegs = (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) ?
534                     VTNET_MAX_RX_SEGS : VTNET_MIN_RX_SEGS;
535         } else
536                 sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS;
537
538         if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) ||
539             virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
540                 sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS;
541         else
542                 sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS;
543
544         VQ_ALLOC_INFO_INIT(&vq_info[0], sc->vtnet_rx_nsegs,
545             vtnet_rx_vq_intr, sc, &sc->vtnet_rx_vq,
546             "%s receive", device_get_nameunit(dev));
547
548         VQ_ALLOC_INFO_INIT(&vq_info[1], sc->vtnet_tx_nsegs,
549             vtnet_tx_vq_intr, sc, &sc->vtnet_tx_vq,
550             "%s transmit", device_get_nameunit(dev));
551
552         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
553                 nvqs++;
554
555                 VQ_ALLOC_INFO_INIT(&vq_info[2], 0, NULL, NULL,
556                     &sc->vtnet_ctrl_vq, "%s control",
557                     device_get_nameunit(dev));
558         }
559
560         return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
561 }
562
563 static int
564 vtnet_setup_interface(struct vtnet_softc *sc)
565 {
566         device_t dev;
567         struct ifnet *ifp;
568         int i, tx_size;
569
570         dev = sc->vtnet_dev;
571
572         ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER);
573         if (ifp == NULL) {
574                 device_printf(dev, "cannot allocate ifnet structure\n");
575                 return (ENOSPC);
576         }
577
578         ifp->if_softc = sc;
579         if_initname(ifp, device_get_name(dev), device_get_unit(dev));
580         ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
581         ifp->if_init = vtnet_init;
582         ifp->if_start = vtnet_start;
583         ifp->if_ioctl = vtnet_ioctl;
584
585         sc->vtnet_rx_size = virtqueue_size(sc->vtnet_rx_vq);
586         sc->vtnet_rx_process_limit = sc->vtnet_rx_size;
587
588         tx_size = virtqueue_size(sc->vtnet_tx_vq);
589         sc->vtnet_tx_size = tx_size;
590         /* Select size, such that we never run out of tx_header entries. */
591         if (sc->vtnet_flags & VTNET_FLAG_INDIRECT)
592                 sc->vtnet_txhdrcount = sc->vtnet_tx_size;
593         else
594                 sc->vtnet_txhdrcount = (sc->vtnet_tx_size / 2) + 1;
595         sc->vtnet_txhdrarea = contigmalloc(
596             sc->vtnet_txhdrcount * sizeof(struct vtnet_tx_header),
597             M_VTNET, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
598         if (sc->vtnet_txhdrarea == NULL) {
599                 device_printf(dev, "cannot contigmalloc the tx headers\n");
600                 return (ENOMEM);
601         }
602         for (i = 0; i < sc->vtnet_txhdrcount; i++)
603                 vtnet_enqueue_txhdr(sc, &sc->vtnet_txhdrarea[i]);
604         sc->vtnet_macfilter = contigmalloc(
605             sizeof(struct vtnet_mac_filter),
606             M_DEVBUF, M_WAITOK, 0, BUS_SPACE_MAXADDR, 4, 0);
607         if (sc->vtnet_macfilter == NULL) {
608                 device_printf(dev,
609                     "cannot contigmalloc the mac filter table\n");
610                 return (ENOMEM);
611         }
612         ifq_set_maxlen(&ifp->if_snd, tx_size - 1);
613         ifq_set_ready(&ifp->if_snd);
614
615         ether_ifattach(ifp, sc->vtnet_hwaddr, NULL);
616
617         if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)){
618                 //ifp->if_capabilities |= IFCAP_LINKSTATE;
619                  kprintf("add dynamic link state\n");
620         }
621
622         /* Tell the upper layer(s) we support long frames. */
623         ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
624         ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
625
626         if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) {
627                 ifp->if_capabilities |= IFCAP_TXCSUM;
628
629                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4))
630                         ifp->if_capabilities |= IFCAP_TSO4;
631                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6))
632                         ifp->if_capabilities |= IFCAP_TSO6;
633                 if (ifp->if_capabilities & IFCAP_TSO)
634                         ifp->if_capabilities |= IFCAP_VLAN_HWTSO;
635
636                 if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN))
637                         sc->vtnet_flags |= VTNET_FLAG_TSO_ECN;
638         }
639
640         if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
641                 ifp->if_capabilities |= IFCAP_RXCSUM;
642
643                 if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) ||
644                     virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6))
645                         ifp->if_capabilities |= IFCAP_LRO;
646         }
647
648         if (ifp->if_capabilities & IFCAP_HWCSUM) {
649                 /*
650                  * VirtIO does not support VLAN tagging, but we can fake
651                  * it by inserting and removing the 802.1Q header during
652                  * transmit and receive. We are then able to do checksum
653                  * offloading of VLAN frames.
654                  */
655                 ifp->if_capabilities |=
656                         IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM;
657         }
658
659         ifp->if_capenable = ifp->if_capabilities;
660
661         /*
662          * Capabilities after here are not enabled by default.
663          */
664
665         if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) {
666                 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
667
668                 sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
669                     vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
670                 sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
671                     vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
672         }
673
674         return (0);
675 }
676
677 static void
678 vtnet_set_hwaddr(struct vtnet_softc *sc)
679 {
680         device_t dev;
681
682         dev = sc->vtnet_dev;
683
684         if ((sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) &&
685             (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)) {
686                 if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0)
687                         device_printf(dev, "unable to set MAC address\n");
688         } else if (sc->vtnet_flags & VTNET_FLAG_MAC) {
689                 virtio_write_device_config(dev,
690                     offsetof(struct virtio_net_config, mac),
691                     sc->vtnet_hwaddr, ETHER_ADDR_LEN);
692         }
693 }
694
695 static void
696 vtnet_get_hwaddr(struct vtnet_softc *sc)
697 {
698         device_t dev;
699
700         dev = sc->vtnet_dev;
701
702         if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) {
703                 /*
704                  * Generate a random locally administered unicast address.
705                  *
706                  * It would be nice to generate the same MAC address across
707                  * reboots, but it seems all the hosts currently available
708                  * support the MAC feature, so this isn't too important.
709                  */
710                 sc->vtnet_hwaddr[0] = 0xB2;
711                 karc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1);
712                 vtnet_set_hwaddr(sc);
713                 return;
714         }
715
716         virtio_read_device_config(dev,
717             offsetof(struct virtio_net_config, mac),
718             sc->vtnet_hwaddr, ETHER_ADDR_LEN);
719 }
720
721 static int
722 vtnet_is_link_up(struct vtnet_softc *sc)
723 {
724         device_t dev;
725         struct ifnet *ifp;
726         uint16_t status;
727
728         dev = sc->vtnet_dev;
729         ifp = sc->vtnet_ifp;
730
731         ASSERT_SERIALIZED(&sc->vtnet_slz);
732
733         status = virtio_read_dev_config_2(dev,
734                         offsetof(struct virtio_net_config, status));
735
736         return ((status & VIRTIO_NET_S_LINK_UP) != 0);
737 }
738
739 static void
740 vtnet_update_link_status(struct vtnet_softc *sc)
741 {
742         device_t dev;
743         struct ifnet *ifp;
744         struct ifaltq_subque *ifsq;
745         int link;
746
747         dev = sc->vtnet_dev;
748         ifp = sc->vtnet_ifp;
749         ifsq = ifq_get_subq_default(&ifp->if_snd);
750
751         link = vtnet_is_link_up(sc);
752
753         if (link && ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0)) {
754                 sc->vtnet_flags |= VTNET_FLAG_LINK;
755                 if (bootverbose)
756                         device_printf(dev, "Link is up\n");
757                 ifp->if_link_state = LINK_STATE_UP;
758                 if_link_state_change(ifp);
759                 if (!ifsq_is_empty(ifsq))
760                         vtnet_start_locked(ifp, ifsq);
761         } else if (!link && (sc->vtnet_flags & VTNET_FLAG_LINK)) {
762                 sc->vtnet_flags &= ~VTNET_FLAG_LINK;
763                 if (bootverbose)
764                         device_printf(dev, "Link is down\n");
765
766                 ifp->if_link_state = LINK_STATE_DOWN;
767                 if_link_state_change(ifp);
768         }
769 }
770
771 #if 0
772 static void
773 vtnet_watchdog(struct vtnet_softc *sc)
774 {
775         struct ifnet *ifp;
776
777         ifp = sc->vtnet_ifp;
778
779 #ifdef VTNET_TX_INTR_MODERATION
780         vtnet_txeof(sc);
781 #endif
782
783         if (sc->vtnet_watchdog_timer == 0 || --sc->vtnet_watchdog_timer)
784                 return;
785
786         if_printf(ifp, "watchdog timeout -- resetting\n");
787 #ifdef VTNET_DEBUG
788         virtqueue_dump(sc->vtnet_tx_vq);
789 #endif
790         ifp->if_oerrors++;
791         ifp->if_flags &= ~IFF_RUNNING;
792         vtnet_init_locked(sc);
793 }
794 #endif
795
796 static void
797 vtnet_config_change_task(void *arg, int pending)
798 {
799         struct vtnet_softc *sc;
800
801         sc = arg;
802
803         lwkt_serialize_enter(&sc->vtnet_slz);
804         vtnet_update_link_status(sc);
805         lwkt_serialize_exit(&sc->vtnet_slz);
806 }
807
808 static int
809 vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data,struct ucred *cr)
810 {
811         struct vtnet_softc *sc;
812         struct ifreq *ifr;
813         int reinit, mask, error;
814
815         sc = ifp->if_softc;
816         ifr = (struct ifreq *) data;
817         reinit = 0;
818         error = 0;
819
820         switch (cmd) {
821         case SIOCSIFMTU:
822                 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > VTNET_MAX_MTU)
823                         error = EINVAL;
824                 else if (ifp->if_mtu != ifr->ifr_mtu) {
825                         lwkt_serialize_enter(&sc->vtnet_slz);
826                         error = vtnet_change_mtu(sc, ifr->ifr_mtu);
827                         lwkt_serialize_exit(&sc->vtnet_slz);
828                 }
829                 break;
830
831         case SIOCSIFFLAGS:
832                 lwkt_serialize_enter(&sc->vtnet_slz);
833                 if ((ifp->if_flags & IFF_UP) == 0) {
834                         if (ifp->if_flags & IFF_RUNNING)
835                                 vtnet_stop(sc);
836                 } else if (ifp->if_flags & IFF_RUNNING) {
837                         if ((ifp->if_flags ^ sc->vtnet_if_flags) &
838                             (IFF_PROMISC | IFF_ALLMULTI)) {
839                                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX)
840                                         vtnet_rx_filter(sc);
841                                 else
842                                         error = ENOTSUP;
843                         }
844                 } else
845                         vtnet_init_locked(sc);
846
847                 if (error == 0)
848                         sc->vtnet_if_flags = ifp->if_flags;
849                 lwkt_serialize_exit(&sc->vtnet_slz);
850                 break;
851
852         case SIOCADDMULTI:
853         case SIOCDELMULTI:
854                 lwkt_serialize_enter(&sc->vtnet_slz);
855                 if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) &&
856                     (ifp->if_flags & IFF_RUNNING))
857                         vtnet_rx_filter_mac(sc);
858                 lwkt_serialize_exit(&sc->vtnet_slz);
859                 break;
860
861         case SIOCSIFMEDIA:
862         case SIOCGIFMEDIA:
863                 error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd);
864                 break;
865
866         case SIOCSIFCAP:
867                 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
868
869                 lwkt_serialize_enter(&sc->vtnet_slz);
870
871                 if (mask & IFCAP_TXCSUM) {
872                         ifp->if_capenable ^= IFCAP_TXCSUM;
873                         if (ifp->if_capenable & IFCAP_TXCSUM)
874                                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
875                         else
876                                 ifp->if_hwassist &= ~VTNET_CSUM_OFFLOAD;
877                 }
878
879                 if (mask & IFCAP_TSO4) {
880                         ifp->if_capenable ^= IFCAP_TSO4;
881                         if (ifp->if_capenable & IFCAP_TSO4)
882                                 ifp->if_hwassist |= CSUM_TSO;
883                         else
884                                 ifp->if_hwassist &= ~CSUM_TSO;
885                 }
886
887                 if (mask & IFCAP_RXCSUM) {
888                         ifp->if_capenable ^= IFCAP_RXCSUM;
889                         reinit = 1;
890                 }
891
892                 if (mask & IFCAP_LRO) {
893                         ifp->if_capenable ^= IFCAP_LRO;
894                         reinit = 1;
895                 }
896
897                 if (mask & IFCAP_VLAN_HWFILTER) {
898                         ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
899                         reinit = 1;
900                 }
901
902                 if (mask & IFCAP_VLAN_HWTSO)
903                         ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
904
905                 if (mask & IFCAP_VLAN_HWTAGGING)
906                         ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
907
908                 if (reinit && (ifp->if_flags & IFF_RUNNING)) {
909                         ifp->if_flags &= ~IFF_RUNNING;
910                         vtnet_init_locked(sc);
911                 }
912                 //VLAN_CAPABILITIES(ifp);
913
914                 lwkt_serialize_exit(&sc->vtnet_slz);
915                 break;
916
917         default:
918                 error = ether_ioctl(ifp, cmd, data);
919                 break;
920         }
921
922         return (error);
923 }
924
925 static int
926 vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu)
927 {
928         struct ifnet *ifp;
929         int new_frame_size, clsize;
930
931         ifp = sc->vtnet_ifp;
932
933         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
934                 new_frame_size = sizeof(struct vtnet_rx_header) +
935                     sizeof(struct ether_vlan_header) + new_mtu;
936
937                 if (new_frame_size > MJUM9BYTES)
938                         return (EINVAL);
939
940                 if (new_frame_size <= MCLBYTES)
941                         clsize = MCLBYTES;
942                 else
943                         clsize = MJUM9BYTES;
944         } else {
945                 new_frame_size = sizeof(struct virtio_net_hdr_mrg_rxbuf) +
946                     sizeof(struct ether_vlan_header) + new_mtu;
947
948                 if (new_frame_size <= MCLBYTES)
949                         clsize = MCLBYTES;
950                 else
951                         clsize = MJUMPAGESIZE;
952         }
953
954         sc->vtnet_rx_mbuf_size = clsize;
955         sc->vtnet_rx_mbuf_count = VTNET_NEEDED_RX_MBUFS(sc);
956         KASSERT(sc->vtnet_rx_mbuf_count < VTNET_MAX_RX_SEGS,
957             ("too many rx mbufs: %d", sc->vtnet_rx_mbuf_count));
958
959         ifp->if_mtu = new_mtu;
960
961         if (ifp->if_flags & IFF_RUNNING) {
962                 ifp->if_flags &= ~IFF_RUNNING;
963                 vtnet_init_locked(sc);
964         }
965
966         return (0);
967 }
968
969 static int
970 vtnet_init_rx_vq(struct vtnet_softc *sc)
971 {
972         struct virtqueue *vq;
973         int nbufs, error;
974
975         vq = sc->vtnet_rx_vq;
976         nbufs = 0;
977         error = ENOSPC;
978
979         while (!virtqueue_full(vq)) {
980                 if ((error = vtnet_newbuf(sc)) != 0)
981                         break;
982                 nbufs++;
983         }
984
985         if (nbufs > 0) {
986                 virtqueue_notify(vq, &sc->vtnet_slz);
987
988                 /*
989                  * EMSGSIZE signifies the virtqueue did not have enough
990                  * entries available to hold the last mbuf. This is not
991                  * an error. We should not get ENOSPC since we check if
992                  * the virtqueue is full before attempting to add a
993                  * buffer.
994                  */
995                 if (error == EMSGSIZE)
996                         error = 0;
997         }
998
999         return (error);
1000 }
1001
1002 static void
1003 vtnet_free_rx_mbufs(struct vtnet_softc *sc)
1004 {
1005         struct virtqueue *vq;
1006         struct mbuf *m;
1007         int last;
1008
1009         vq = sc->vtnet_rx_vq;
1010         last = 0;
1011
1012         while ((m = virtqueue_drain(vq, &last)) != NULL)
1013                 m_freem(m);
1014
1015         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Rx Vq"));
1016 }
1017
1018 static void
1019 vtnet_free_tx_mbufs(struct vtnet_softc *sc)
1020 {
1021         struct virtqueue *vq;
1022         struct vtnet_tx_header *txhdr;
1023         int last;
1024
1025         vq = sc->vtnet_tx_vq;
1026         last = 0;
1027
1028         while ((txhdr = virtqueue_drain(vq, &last)) != NULL) {
1029                 m_freem(txhdr->vth_mbuf);
1030                 vtnet_enqueue_txhdr(sc, txhdr);
1031         }
1032
1033         KASSERT(virtqueue_empty(vq), ("mbufs remaining in Tx Vq"));
1034 }
1035
1036 static void
1037 vtnet_free_ctrl_vq(struct vtnet_softc *sc)
1038 {
1039         /*
1040          * The control virtqueue is only polled, therefore
1041          * it should already be empty.
1042          */
1043         KASSERT(virtqueue_empty(sc->vtnet_ctrl_vq),
1044                 ("Ctrl Vq not empty"));
1045 }
1046
1047 static struct mbuf *
1048 vtnet_alloc_rxbuf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp)
1049 {
1050         struct mbuf *m_head, *m_tail, *m;
1051         int i, clsize;
1052
1053         clsize = sc->vtnet_rx_mbuf_size;
1054
1055         /*use getcl instead of getjcl. see  if_mxge.c comment line 2398*/
1056         //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1057         m_head = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR );
1058         if (m_head == NULL)
1059                 goto fail;
1060
1061         m_head->m_len = clsize;
1062         m_tail = m_head;
1063
1064         if (nbufs > 1) {
1065                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1066                         ("chained Rx mbuf requested without LRO_NOMRG"));
1067
1068                 for (i = 0; i < nbufs - 1; i++) {
1069                         //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1070                         m = m_getcl(M_NOWAIT, MT_DATA, 0);
1071                         if (m == NULL)
1072                                 goto fail;
1073
1074                         m->m_len = clsize;
1075                         m_tail->m_next = m;
1076                         m_tail = m;
1077                 }
1078         }
1079
1080         if (m_tailp != NULL)
1081                 *m_tailp = m_tail;
1082
1083         return (m_head);
1084
1085 fail:
1086         sc->vtnet_stats.mbuf_alloc_failed++;
1087         m_freem(m_head);
1088
1089         return (NULL);
1090 }
1091
1092 static int
1093 vtnet_replace_rxbuf(struct vtnet_softc *sc, struct mbuf *m0, int len0)
1094 {
1095         struct mbuf *m, *m_prev;
1096         struct mbuf *m_new, *m_tail;
1097         int len, clsize, nreplace, error;
1098
1099         m = m0;
1100         m_prev = NULL;
1101         len = len0;
1102
1103         m_tail = NULL;
1104         clsize = sc->vtnet_rx_mbuf_size;
1105         nreplace = 0;
1106
1107         if (m->m_next != NULL)
1108                 KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG,
1109                     ("chained Rx mbuf without LRO_NOMRG"));
1110
1111         /*
1112          * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1113          * allocating an entire chain for each received frame. When
1114          * the received frame's length is less than that of the chain,
1115          * the unused mbufs are reassigned to the new chain.
1116          */
1117         while (len > 0) {
1118                 /*
1119                  * Something is seriously wrong if we received
1120                  * a frame larger than the mbuf chain. Drop it.
1121                  */
1122                 if (m == NULL) {
1123                         sc->vtnet_stats.rx_frame_too_large++;
1124                         return (EMSGSIZE);
1125                 }
1126
1127                 KASSERT(m->m_len == clsize,
1128                     ("mbuf length not expected cluster size: %d",
1129                     m->m_len));
1130
1131                 m->m_len = MIN(m->m_len, len);
1132                 len -= m->m_len;
1133
1134                 m_prev = m;
1135                 m = m->m_next;
1136                 nreplace++;
1137         }
1138
1139         KASSERT(m_prev != NULL, ("m_prev == NULL"));
1140         KASSERT(nreplace <= sc->vtnet_rx_mbuf_count,
1141                 ("too many replacement mbufs: %d/%d", nreplace,
1142                 sc->vtnet_rx_mbuf_count));
1143
1144         m_new = vtnet_alloc_rxbuf(sc, nreplace, &m_tail);
1145         if (m_new == NULL) {
1146                 m_prev->m_len = clsize;
1147                 return (ENOBUFS);
1148         }
1149
1150         /*
1151          * Move unused mbufs, if any, from the original chain
1152          * onto the end of the new chain.
1153          */
1154         if (m_prev->m_next != NULL) {
1155                 m_tail->m_next = m_prev->m_next;
1156                 m_prev->m_next = NULL;
1157         }
1158
1159         error = vtnet_enqueue_rxbuf(sc, m_new);
1160         if (error) {
1161                 /*
1162                  * BAD! We could not enqueue the replacement mbuf chain. We
1163                  * must restore the m0 chain to the original state if it was
1164                  * modified so we can subsequently discard it.
1165                  *
1166                  * NOTE: The replacement is suppose to be an identical copy
1167                  * to the one just dequeued so this is an unexpected error.
1168                  */
1169                 sc->vtnet_stats.rx_enq_replacement_failed++;
1170
1171                 if (m_tail->m_next != NULL) {
1172                         m_prev->m_next = m_tail->m_next;
1173                         m_tail->m_next = NULL;
1174                 }
1175
1176                 m_prev->m_len = clsize;
1177                 m_freem(m_new);
1178         }
1179
1180         return (error);
1181 }
1182
1183 static int
1184 vtnet_newbuf(struct vtnet_softc *sc)
1185 {
1186         struct mbuf *m;
1187         int error;
1188
1189         m = vtnet_alloc_rxbuf(sc, sc->vtnet_rx_mbuf_count, NULL);
1190         if (m == NULL)
1191                 return (ENOBUFS);
1192
1193         error = vtnet_enqueue_rxbuf(sc, m);
1194         if (error)
1195                 m_freem(m);
1196
1197         return (error);
1198 }
1199
1200 static void
1201 vtnet_discard_merged_rxbuf(struct vtnet_softc *sc, int nbufs)
1202 {
1203         struct virtqueue *vq;
1204         struct mbuf *m;
1205
1206         vq = sc->vtnet_rx_vq;
1207
1208         while (--nbufs > 0) {
1209                 if ((m = virtqueue_dequeue(vq, NULL)) == NULL)
1210                         break;
1211                 vtnet_discard_rxbuf(sc, m);
1212         }
1213 }
1214
1215 static void
1216 vtnet_discard_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1217 {
1218         int error;
1219
1220         /*
1221          * Requeue the discarded mbuf. This should always be
1222          * successful since it was just dequeued.
1223          */
1224         error = vtnet_enqueue_rxbuf(sc, m);
1225         KASSERT(error == 0, ("cannot requeue discarded mbuf"));
1226 }
1227
1228 static int
1229 vtnet_enqueue_rxbuf(struct vtnet_softc *sc, struct mbuf *m)
1230 {
1231         struct sglist sg;
1232         struct sglist_seg segs[VTNET_MAX_RX_SEGS];
1233         struct vtnet_rx_header *rxhdr;
1234         struct virtio_net_hdr *hdr;
1235         uint8_t *mdata;
1236         int offset, error;
1237
1238         ASSERT_SERIALIZED(&sc->vtnet_slz);
1239         if ((sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0)
1240                 KASSERT(m->m_next == NULL, ("chained Rx mbuf"));
1241
1242         sglist_init(&sg, sc->vtnet_rx_nsegs, segs);
1243
1244         mdata = mtod(m, uint8_t *);
1245         offset = 0;
1246
1247         if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1248                 rxhdr = (struct vtnet_rx_header *) mdata;
1249                 hdr = &rxhdr->vrh_hdr;
1250                 offset += sizeof(struct vtnet_rx_header);
1251
1252                 error = sglist_append(&sg, hdr, sc->vtnet_hdr_size);
1253                 KASSERT(error == 0, ("cannot add header to sglist"));
1254         }
1255
1256         error = sglist_append(&sg, mdata + offset, m->m_len - offset);
1257         if (error)
1258                 return (error);
1259
1260         if (m->m_next != NULL) {
1261                 error = sglist_append_mbuf(&sg, m->m_next);
1262                 if (error)
1263                         return (error);
1264         }
1265
1266         return (virtqueue_enqueue(sc->vtnet_rx_vq, m, &sg, 0, sg.sg_nseg));
1267 }
1268
1269 static void
1270 vtnet_vlan_tag_remove(struct mbuf *m)
1271 {
1272         struct ether_vlan_header *evl;
1273
1274         evl = mtod(m, struct ether_vlan_header *);
1275
1276         m->m_pkthdr.ether_vlantag = ntohs(evl->evl_tag);
1277         m->m_flags |= M_VLANTAG;
1278
1279         /* Strip the 802.1Q header. */
1280         bcopy((char *) evl, (char *) evl + ETHER_VLAN_ENCAP_LEN,
1281             ETHER_HDR_LEN - ETHER_TYPE_LEN);
1282         m_adj(m, ETHER_VLAN_ENCAP_LEN);
1283 }
1284
1285 /*
1286  * Alternative method of doing receive checksum offloading. Rather
1287  * than parsing the received frame down to the IP header, use the
1288  * csum_offset to determine which CSUM_* flags are appropriate. We
1289  * can get by with doing this only because the checksum offsets are
1290  * unique for the things we care about.
1291  */
1292 static int
1293 vtnet_rx_csum(struct vtnet_softc *sc, struct mbuf *m,
1294     struct virtio_net_hdr *hdr)
1295 {
1296         struct ether_header *eh;
1297         struct ether_vlan_header *evh;
1298         struct udphdr *udp;
1299         int csum_len;
1300         uint16_t eth_type;
1301
1302         csum_len = hdr->csum_start + hdr->csum_offset;
1303
1304         if (csum_len < sizeof(struct ether_header) + sizeof(struct ip))
1305                 return (1);
1306         if (m->m_len < csum_len)
1307                 return (1);
1308
1309         eh = mtod(m, struct ether_header *);
1310         eth_type = ntohs(eh->ether_type);
1311         if (eth_type == ETHERTYPE_VLAN) {
1312                 evh = mtod(m, struct ether_vlan_header *);
1313                 eth_type = ntohs(evh->evl_proto);
1314         }
1315
1316         if (eth_type != ETHERTYPE_IP && eth_type != ETHERTYPE_IPV6) {
1317                 sc->vtnet_stats.rx_csum_bad_ethtype++;
1318                 return (1);
1319         }
1320
1321         /* Use the offset to determine the appropriate CSUM_* flags. */
1322         switch (hdr->csum_offset) {
1323         case offsetof(struct udphdr, uh_sum):
1324                 if (m->m_len < hdr->csum_start + sizeof(struct udphdr))
1325                         return (1);
1326                 udp = (struct udphdr *)(mtod(m, uint8_t *) + hdr->csum_start);
1327                 if (udp->uh_sum == 0)
1328                         return (0);
1329
1330                 /* FALLTHROUGH */
1331
1332         case offsetof(struct tcphdr, th_sum):
1333                 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1334                 m->m_pkthdr.csum_data = 0xFFFF;
1335                 break;
1336
1337         default:
1338                 sc->vtnet_stats.rx_csum_bad_offset++;
1339                 return (1);
1340         }
1341
1342         sc->vtnet_stats.rx_csum_offloaded++;
1343
1344         return (0);
1345 }
1346
1347 static int
1348 vtnet_rxeof_merged(struct vtnet_softc *sc, struct mbuf *m_head, int nbufs)
1349 {
1350         struct ifnet *ifp;
1351         struct virtqueue *vq;
1352         struct mbuf *m, *m_tail;
1353         int len;
1354
1355         ifp = sc->vtnet_ifp;
1356         vq = sc->vtnet_rx_vq;
1357         m_tail = m_head;
1358
1359         while (--nbufs > 0) {
1360                 m = virtqueue_dequeue(vq, &len);
1361                 if (m == NULL) {
1362                         ifp->if_ierrors++;
1363                         goto fail;
1364                 }
1365
1366                 if (vtnet_newbuf(sc) != 0) {
1367                         ifp->if_iqdrops++;
1368                         vtnet_discard_rxbuf(sc, m);
1369                         if (nbufs > 1)
1370                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1371                         goto fail;
1372                 }
1373
1374                 if (m->m_len < len)
1375                         len = m->m_len;
1376
1377                 m->m_len = len;
1378                 m->m_flags &= ~M_PKTHDR;
1379
1380                 m_head->m_pkthdr.len += len;
1381                 m_tail->m_next = m;
1382                 m_tail = m;
1383         }
1384
1385         return (0);
1386
1387 fail:
1388         sc->vtnet_stats.rx_mergeable_failed++;
1389         m_freem(m_head);
1390
1391         return (1);
1392 }
1393
1394 static int
1395 vtnet_rxeof(struct vtnet_softc *sc, int count, int *rx_npktsp)
1396 {
1397         struct virtio_net_hdr lhdr;
1398         struct ifnet *ifp;
1399         struct virtqueue *vq;
1400         struct mbuf *m;
1401         struct ether_header *eh;
1402         struct virtio_net_hdr *hdr;
1403         struct virtio_net_hdr_mrg_rxbuf *mhdr;
1404         int len, deq, nbufs, adjsz, rx_npkts;
1405
1406         ifp = sc->vtnet_ifp;
1407         vq = sc->vtnet_rx_vq;
1408         hdr = &lhdr;
1409         deq = 0;
1410         rx_npkts = 0;
1411
1412         ASSERT_SERIALIZED(&sc->vtnet_slz);
1413
1414         while (--count >= 0) {
1415                 m = virtqueue_dequeue(vq, &len);
1416                 if (m == NULL)
1417                         break;
1418                 deq++;
1419
1420                 if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) {
1421                         ifp->if_ierrors++;
1422                         vtnet_discard_rxbuf(sc, m);
1423                         continue;
1424                 }
1425
1426                 if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) {
1427                         nbufs = 1;
1428                         adjsz = sizeof(struct vtnet_rx_header);
1429                         /*
1430                          * Account for our pad between the header and
1431                          * the actual start of the frame.
1432                          */
1433                         len += VTNET_RX_HEADER_PAD;
1434                 } else {
1435                         mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *);
1436                         nbufs = mhdr->num_buffers;
1437                         adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1438                 }
1439
1440                 if (vtnet_replace_rxbuf(sc, m, len) != 0) {
1441                         ifp->if_iqdrops++;
1442                         vtnet_discard_rxbuf(sc, m);
1443                         if (nbufs > 1)
1444                                 vtnet_discard_merged_rxbuf(sc, nbufs);
1445                         continue;
1446                 }
1447
1448                 m->m_pkthdr.len = len;
1449                 m->m_pkthdr.rcvif = ifp;
1450                 m->m_pkthdr.csum_flags = 0;
1451
1452                 if (nbufs > 1) {
1453                         if (vtnet_rxeof_merged(sc, m, nbufs) != 0)
1454                                 continue;
1455                 }
1456
1457                 ifp->if_ipackets++;
1458
1459                 /*
1460                  * Save copy of header before we strip it. For both mergeable
1461                  * and non-mergeable, the VirtIO header is placed first in the
1462                  * mbuf's data. We no longer need num_buffers, so always use a
1463                  * virtio_net_hdr.
1464                  */
1465                 memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr));
1466                 m_adj(m, adjsz);
1467
1468                 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1469                         eh = mtod(m, struct ether_header *);
1470                         if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1471                                 vtnet_vlan_tag_remove(m);
1472
1473                                 /*
1474                                  * With the 802.1Q header removed, update the
1475                                  * checksum starting location accordingly.
1476                                  */
1477                                 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1478                                         hdr->csum_start -=
1479                                             ETHER_VLAN_ENCAP_LEN;
1480                         }
1481                 }
1482
1483                 if (ifp->if_capenable & IFCAP_RXCSUM &&
1484                     hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1485                         if (vtnet_rx_csum(sc, m, hdr) != 0)
1486                                 sc->vtnet_stats.rx_csum_failed++;
1487                 }
1488
1489                 lwkt_serialize_exit(&sc->vtnet_slz);
1490                 rx_npkts++;
1491                 ifp->if_input(ifp, m, NULL, -1);
1492                 lwkt_serialize_enter(&sc->vtnet_slz);
1493
1494                 /*
1495                  * The interface may have been stopped while we were
1496                  * passing the packet up the network stack.
1497                  */
1498                 if ((ifp->if_flags & IFF_RUNNING) == 0)
1499                         break;
1500         }
1501
1502         virtqueue_notify(vq, &sc->vtnet_slz);
1503
1504         if (rx_npktsp != NULL)
1505                 *rx_npktsp = rx_npkts;
1506
1507         return (count > 0 ? 0 : EAGAIN);
1508 }
1509
1510 static void
1511 vtnet_rx_intr_task(void *arg)
1512 {
1513         struct vtnet_softc *sc;
1514         struct ifnet *ifp;
1515         int more;
1516
1517         sc = arg;
1518         ifp = sc->vtnet_ifp;
1519
1520 next:
1521 //      lwkt_serialize_enter(&sc->vtnet_slz);
1522
1523         if ((ifp->if_flags & IFF_RUNNING) == 0) {
1524                 vtnet_enable_rx_intr(sc);
1525 //              lwkt_serialize_exit(&sc->vtnet_slz);
1526                 return;
1527         }
1528
1529         more = vtnet_rxeof(sc, sc->vtnet_rx_process_limit, NULL);
1530         if (!more && vtnet_enable_rx_intr(sc) != 0) {
1531                 vtnet_disable_rx_intr(sc);
1532                 more = 1;
1533         }
1534
1535 //      lwkt_serialize_exit(&sc->vtnet_slz);
1536
1537         if (more) {
1538                 sc->vtnet_stats.rx_task_rescheduled++;
1539                 goto next;
1540         }
1541 }
1542
1543 static int
1544 vtnet_rx_vq_intr(void *xsc)
1545 {
1546         struct vtnet_softc *sc;
1547
1548         sc = xsc;
1549
1550         vtnet_disable_rx_intr(sc);
1551         vtnet_rx_intr_task(sc);
1552
1553         return (1);
1554 }
1555
1556 static void
1557 vtnet_enqueue_txhdr(struct vtnet_softc *sc, struct vtnet_tx_header *txhdr)
1558 {
1559         bzero(txhdr, sizeof(*txhdr));
1560         SLIST_INSERT_HEAD(&sc->vtnet_txhdr_free, txhdr, link);
1561 }
1562
1563 static void
1564 vtnet_txeof(struct vtnet_softc *sc)
1565 {
1566         struct virtqueue *vq;
1567         struct ifnet *ifp;
1568         struct vtnet_tx_header *txhdr;
1569         int deq;
1570
1571         vq = sc->vtnet_tx_vq;
1572         ifp = sc->vtnet_ifp;
1573         deq = 0;
1574
1575         ASSERT_SERIALIZED(&sc->vtnet_slz);
1576
1577         while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) {
1578                 deq++;
1579                 ifp->if_opackets++;
1580                 m_freem(txhdr->vth_mbuf);
1581                 vtnet_enqueue_txhdr(sc, txhdr);
1582         }
1583
1584         if (deq > 0) {
1585                 ifq_clr_oactive(&ifp->if_snd);
1586                 if (virtqueue_empty(vq))
1587                         sc->vtnet_watchdog_timer = 0;
1588         }
1589 }
1590
1591 static struct mbuf *
1592 vtnet_tx_offload(struct vtnet_softc *sc, struct mbuf *m,
1593     struct virtio_net_hdr *hdr)
1594 {
1595         struct ifnet *ifp;
1596         struct ether_header *eh;
1597         struct ether_vlan_header *evh;
1598         struct ip *ip;
1599         struct ip6_hdr *ip6;
1600         struct tcphdr *tcp;
1601         int ip_offset;
1602         uint16_t eth_type, csum_start;
1603         uint8_t ip_proto, gso_type;
1604
1605         ifp = sc->vtnet_ifp;
1606         M_ASSERTPKTHDR(m);
1607
1608         ip_offset = sizeof(struct ether_header);
1609         if (m->m_len < ip_offset) {
1610                 if ((m = m_pullup(m, ip_offset)) == NULL)
1611                         return (NULL);
1612         }
1613
1614         eh = mtod(m, struct ether_header *);
1615         eth_type = ntohs(eh->ether_type);
1616         if (eth_type == ETHERTYPE_VLAN) {
1617                 ip_offset = sizeof(struct ether_vlan_header);
1618                 if (m->m_len < ip_offset) {
1619                         if ((m = m_pullup(m, ip_offset)) == NULL)
1620                                 return (NULL);
1621                 }
1622                 evh = mtod(m, struct ether_vlan_header *);
1623                 eth_type = ntohs(evh->evl_proto);
1624         }
1625
1626         switch (eth_type) {
1627         case ETHERTYPE_IP:
1628                 if (m->m_len < ip_offset + sizeof(struct ip)) {
1629                         m = m_pullup(m, ip_offset + sizeof(struct ip));
1630                         if (m == NULL)
1631                                 return (NULL);
1632                 }
1633
1634                 ip = (struct ip *)(mtod(m, uint8_t *) + ip_offset);
1635                 ip_proto = ip->ip_p;
1636                 csum_start = ip_offset + (ip->ip_hl << 2);
1637                 gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1638                 break;
1639
1640         case ETHERTYPE_IPV6:
1641                 if (m->m_len < ip_offset + sizeof(struct ip6_hdr)) {
1642                         m = m_pullup(m, ip_offset + sizeof(struct ip6_hdr));
1643                         if (m == NULL)
1644                                 return (NULL);
1645                 }
1646
1647                 ip6 = (struct ip6_hdr *)(mtod(m, uint8_t *) + ip_offset);
1648                 /*
1649                  * XXX Assume no extension headers are present. Presently,
1650                  * this will always be true in the case of TSO, and FreeBSD
1651                  * does not perform checksum offloading of IPv6 yet.
1652                  */
1653                 ip_proto = ip6->ip6_nxt;
1654                 csum_start = ip_offset + sizeof(struct ip6_hdr);
1655                 gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1656                 break;
1657
1658         default:
1659                 return (m);
1660         }
1661
1662         if (m->m_pkthdr.csum_flags & VTNET_CSUM_OFFLOAD) {
1663                 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1664                 hdr->csum_start = csum_start;
1665                 hdr->csum_offset = m->m_pkthdr.csum_data;
1666
1667                 sc->vtnet_stats.tx_csum_offloaded++;
1668         }
1669
1670         if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1671                 if (ip_proto != IPPROTO_TCP)
1672                         return (m);
1673
1674                 if (m->m_len < csum_start + sizeof(struct tcphdr)) {
1675                         m = m_pullup(m, csum_start + sizeof(struct tcphdr));
1676                         if (m == NULL)
1677                                 return (NULL);
1678                 }
1679
1680                 tcp = (struct tcphdr *)(mtod(m, uint8_t *) + csum_start);
1681                 hdr->gso_type = gso_type;
1682                 hdr->hdr_len = csum_start + (tcp->th_off << 2);
1683                 hdr->gso_size = m->m_pkthdr.tso_segsz;
1684
1685                 if (tcp->th_flags & TH_CWR) {
1686                         /*
1687                          * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1688                          * ECN support is only configurable globally with the
1689                          * net.inet.tcp.ecn.enable sysctl knob.
1690                          */
1691                         if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) {
1692                                 if_printf(ifp, "TSO with ECN not supported "
1693                                     "by host\n");
1694                                 m_freem(m);
1695                                 return (NULL);
1696                         }
1697
1698                         hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1699                 }
1700
1701                 sc->vtnet_stats.tx_tso_offloaded++;
1702         }
1703
1704         return (m);
1705 }
1706
1707 static int
1708 vtnet_enqueue_txbuf(struct vtnet_softc *sc, struct mbuf **m_head,
1709     struct vtnet_tx_header *txhdr)
1710 {
1711         struct sglist sg;
1712         struct sglist_seg segs[VTNET_MAX_TX_SEGS];
1713         struct virtqueue *vq;
1714         struct mbuf *m;
1715         int error;
1716
1717         vq = sc->vtnet_tx_vq;
1718         m = *m_head;
1719
1720         sglist_init(&sg, sc->vtnet_tx_nsegs, segs);
1721         error = sglist_append(&sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size);
1722         KASSERT(error == 0 && sg.sg_nseg == 1,
1723             ("%s: error %d adding header to sglist", __func__, error));
1724
1725         error = sglist_append_mbuf(&sg, m);
1726         if (error) {
1727                 m = m_defrag(m, M_NOWAIT);
1728                 if (m == NULL)
1729                         goto fail;
1730
1731                 *m_head = m;
1732                 sc->vtnet_stats.tx_defragged++;
1733
1734                 error = sglist_append_mbuf(&sg, m);
1735                 if (error)
1736                         goto fail;
1737         }
1738
1739         txhdr->vth_mbuf = m;
1740         error = virtqueue_enqueue(vq, txhdr, &sg, sg.sg_nseg, 0);
1741
1742         return (error);
1743
1744 fail:
1745         sc->vtnet_stats.tx_defrag_failed++;
1746         m_freem(*m_head);
1747         *m_head = NULL;
1748
1749         return (ENOBUFS);
1750 }
1751
1752 static struct mbuf *
1753 vtnet_vlan_tag_insert(struct mbuf *m)
1754 {
1755         struct mbuf *n;
1756         struct ether_vlan_header *evl;
1757
1758         if (M_WRITABLE(m) == 0) {
1759                 n = m_dup(m, M_NOWAIT);
1760                 m_freem(m);
1761                 if ((m = n) == NULL)
1762                         return (NULL);
1763         }
1764
1765         M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1766         if (m == NULL)
1767                 return (NULL);
1768         if (m->m_len < sizeof(struct ether_vlan_header)) {
1769                 m = m_pullup(m, sizeof(struct ether_vlan_header));
1770                 if (m == NULL)
1771                         return (NULL);
1772         }
1773
1774         /* Insert 802.1Q header into the existing Ethernet header. */
1775         evl = mtod(m, struct ether_vlan_header *);
1776         bcopy((char *) evl + ETHER_VLAN_ENCAP_LEN,
1777               (char *) evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1778         evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1779         evl->evl_tag = htons(m->m_pkthdr.ether_vlantag);
1780         m->m_flags &= ~M_VLANTAG;
1781
1782         return (m);
1783 }
1784
1785 static int
1786 vtnet_encap(struct vtnet_softc *sc, struct mbuf **m_head)
1787 {
1788         struct vtnet_tx_header *txhdr;
1789         struct virtio_net_hdr *hdr;
1790         struct mbuf *m;
1791         int error;
1792
1793         txhdr = SLIST_FIRST(&sc->vtnet_txhdr_free);
1794         if (txhdr == NULL)
1795                 return (ENOBUFS);
1796         SLIST_REMOVE_HEAD(&sc->vtnet_txhdr_free, link);
1797
1798         /*
1799          * Always use the non-mergeable header to simplify things. When
1800          * the mergeable feature is negotiated, the num_buffers field
1801          * must be set to zero. We use vtnet_hdr_size later to enqueue
1802          * the correct header size to the host.
1803          */
1804         hdr = &txhdr->vth_uhdr.hdr;
1805         m = *m_head;
1806
1807         error = ENOBUFS;
1808
1809         if (m->m_flags & M_VLANTAG) {
1810                 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1811                 m = vtnet_vlan_tag_insert(m);
1812                 if ((*m_head = m) == NULL)
1813                         goto fail;
1814                 m->m_flags &= ~M_VLANTAG;
1815         }
1816
1817         if (m->m_pkthdr.csum_flags != 0) {
1818                 m = vtnet_tx_offload(sc, m, hdr);
1819                 if ((*m_head = m) == NULL)
1820                         goto fail;
1821         }
1822
1823         error = vtnet_enqueue_txbuf(sc, m_head, txhdr);
1824 fail:
1825         if (error != 0)
1826                 vtnet_enqueue_txhdr(sc, txhdr);
1827         return (error);
1828 }
1829
1830 static void
1831 vtnet_start(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1832 {
1833         struct vtnet_softc *sc;
1834
1835         sc = ifp->if_softc;
1836
1837         ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq);
1838         lwkt_serialize_enter(&sc->vtnet_slz);
1839         vtnet_start_locked(ifp, ifsq);
1840         lwkt_serialize_exit(&sc->vtnet_slz);
1841 }
1842
1843 static void
1844 vtnet_start_locked(struct ifnet *ifp, struct ifaltq_subque *ifsq)
1845 {
1846         struct vtnet_softc *sc;
1847         struct virtqueue *vq;
1848         struct mbuf *m0;
1849         int enq;
1850
1851         sc = ifp->if_softc;
1852         vq = sc->vtnet_tx_vq;
1853         enq = 0;
1854
1855         ASSERT_SERIALIZED(&sc->vtnet_slz);
1856
1857         if ((ifp->if_flags & (IFF_RUNNING)) !=
1858             IFF_RUNNING || ((sc->vtnet_flags & VTNET_FLAG_LINK) == 0))
1859                 return;
1860
1861 #ifdef VTNET_TX_INTR_MODERATION
1862         if (virtqueue_nused(vq) >= sc->vtnet_tx_size / 2)
1863                 vtnet_txeof(sc);
1864 #endif
1865
1866         while (!ifsq_is_empty(ifsq)) {
1867                 if (virtqueue_full(vq)) {
1868                         ifq_set_oactive(&ifp->if_snd);
1869                         break;
1870                 }
1871
1872                 m0 = ifq_dequeue(&ifp->if_snd);
1873                 if (m0 == NULL)
1874                         break;
1875
1876                 if (vtnet_encap(sc, &m0) != 0) {
1877                         if (m0 == NULL)
1878                                 break;
1879                         ifq_prepend(&ifp->if_snd, m0);
1880                         ifq_set_oactive(&ifp->if_snd);
1881                         break;
1882                 }
1883
1884                 enq++;
1885                 ETHER_BPF_MTAP(ifp, m0);
1886         }
1887
1888         if (enq > 0) {
1889                 virtqueue_notify(vq, &sc->vtnet_slz);
1890                 sc->vtnet_watchdog_timer = VTNET_WATCHDOG_TIMEOUT;
1891         }
1892 }
1893
1894 static void
1895 vtnet_tick(void *xsc)
1896 {
1897         struct vtnet_softc *sc;
1898
1899         sc = xsc;
1900
1901 #if 0
1902         ASSERT_SERIALIZED(&sc->vtnet_slz);
1903 #ifdef VTNET_DEBUG
1904         virtqueue_dump(sc->vtnet_rx_vq);
1905         virtqueue_dump(sc->vtnet_tx_vq);
1906 #endif
1907
1908         vtnet_watchdog(sc);
1909         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
1910 #endif
1911 }
1912
1913 static void
1914 vtnet_tx_intr_task(void *arg)
1915 {
1916         struct vtnet_softc *sc;
1917         struct ifnet *ifp;
1918         struct ifaltq_subque *ifsq;
1919
1920         sc = arg;
1921         ifp = sc->vtnet_ifp;
1922         ifsq = ifq_get_subq_default(&ifp->if_snd);
1923
1924 next:
1925 //      lwkt_serialize_enter(&sc->vtnet_slz);
1926
1927         if ((ifp->if_flags & IFF_RUNNING) == 0) {
1928                 vtnet_enable_tx_intr(sc);
1929 //              lwkt_serialize_exit(&sc->vtnet_slz);
1930                 return;
1931         }
1932
1933         vtnet_txeof(sc);
1934
1935         if (!ifsq_is_empty(ifsq))
1936                 vtnet_start_locked(ifp, ifsq);
1937
1938         if (vtnet_enable_tx_intr(sc) != 0) {
1939                 vtnet_disable_tx_intr(sc);
1940                 sc->vtnet_stats.tx_task_rescheduled++;
1941 //              lwkt_serialize_exit(&sc->vtnet_slz);
1942                 goto next;
1943         }
1944
1945 //      lwkt_serialize_exit(&sc->vtnet_slz);
1946 }
1947
1948 static int
1949 vtnet_tx_vq_intr(void *xsc)
1950 {
1951         struct vtnet_softc *sc;
1952
1953         sc = xsc;
1954
1955         vtnet_disable_tx_intr(sc);
1956         vtnet_tx_intr_task(sc);
1957
1958         return (1);
1959 }
1960
1961 static void
1962 vtnet_stop(struct vtnet_softc *sc)
1963 {
1964         device_t dev;
1965         struct ifnet *ifp;
1966
1967         dev = sc->vtnet_dev;
1968         ifp = sc->vtnet_ifp;
1969
1970         ASSERT_SERIALIZED(&sc->vtnet_slz);
1971
1972         sc->vtnet_watchdog_timer = 0;
1973         callout_stop(&sc->vtnet_tick_ch);
1974         ifq_clr_oactive(&ifp->if_snd);
1975         ifp->if_flags &= ~(IFF_RUNNING);
1976
1977         vtnet_disable_rx_intr(sc);
1978         vtnet_disable_tx_intr(sc);
1979
1980         /*
1981          * Stop the host VirtIO adapter. Note this will reset the host
1982          * adapter's state back to the pre-initialized state, so in
1983          * order to make the device usable again, we must drive it
1984          * through virtio_reinit() and virtio_reinit_complete().
1985          */
1986         virtio_stop(dev);
1987
1988         sc->vtnet_flags &= ~VTNET_FLAG_LINK;
1989
1990         vtnet_free_rx_mbufs(sc);
1991         vtnet_free_tx_mbufs(sc);
1992 }
1993
1994 static int
1995 vtnet_virtio_reinit(struct vtnet_softc *sc)
1996 {
1997         device_t dev;
1998         struct ifnet *ifp;
1999         uint64_t features;
2000         int error;
2001
2002         dev = sc->vtnet_dev;
2003         ifp = sc->vtnet_ifp;
2004         features = sc->vtnet_features;
2005
2006         /*
2007          * Re-negotiate with the host, removing any disabled receive
2008          * features. Transmit features are disabled only on our side
2009          * via if_capenable and if_hwassist.
2010          */
2011
2012         if (ifp->if_capabilities & IFCAP_RXCSUM) {
2013                 if ((ifp->if_capenable & IFCAP_RXCSUM) == 0)
2014                         features &= ~VIRTIO_NET_F_GUEST_CSUM;
2015         }
2016
2017         if (ifp->if_capabilities & IFCAP_LRO) {
2018                 if ((ifp->if_capenable & IFCAP_LRO) == 0)
2019                         features &= ~VTNET_LRO_FEATURES;
2020         }
2021
2022         if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) {
2023                 if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)
2024                         features &= ~VIRTIO_NET_F_CTRL_VLAN;
2025         }
2026
2027         error = virtio_reinit(dev, features);
2028         if (error)
2029                 device_printf(dev, "virtio reinit error %d\n", error);
2030
2031         return (error);
2032 }
2033
2034 static void
2035 vtnet_init_locked(struct vtnet_softc *sc)
2036 {
2037         device_t dev;
2038         struct ifnet *ifp;
2039         int error;
2040
2041         dev = sc->vtnet_dev;
2042         ifp = sc->vtnet_ifp;
2043
2044         ASSERT_SERIALIZED(&sc->vtnet_slz);
2045
2046         if (ifp->if_flags & IFF_RUNNING)
2047                 return;
2048
2049         /* Stop host's adapter, cancel any pending I/O. */
2050         vtnet_stop(sc);
2051
2052         /* Reinitialize the host device. */
2053         error = vtnet_virtio_reinit(sc);
2054         if (error) {
2055                 device_printf(dev,
2056                     "reinitialization failed, stopping device...\n");
2057                 vtnet_stop(sc);
2058                 return;
2059         }
2060
2061         /* Update host with assigned MAC address. */
2062         bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN);
2063         vtnet_set_hwaddr(sc);
2064
2065         ifp->if_hwassist = 0;
2066         if (ifp->if_capenable & IFCAP_TXCSUM)
2067                 ifp->if_hwassist |= VTNET_CSUM_OFFLOAD;
2068         if (ifp->if_capenable & IFCAP_TSO4)
2069                 ifp->if_hwassist |= CSUM_TSO;
2070
2071         error = vtnet_init_rx_vq(sc);
2072         if (error) {
2073                 device_printf(dev,
2074                     "cannot allocate mbufs for Rx virtqueue\n");
2075                 vtnet_stop(sc);
2076                 return;
2077         }
2078
2079         if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
2080                 if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) {
2081                         /* Restore promiscuous and all-multicast modes. */
2082                         vtnet_rx_filter(sc);
2083
2084                         /* Restore filtered MAC addresses. */
2085                         vtnet_rx_filter_mac(sc);
2086                 }
2087
2088                 /* Restore VLAN filters. */
2089                 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
2090                         vtnet_rx_filter_vlan(sc);
2091         }
2092
2093         {
2094                 vtnet_enable_rx_intr(sc);
2095                 vtnet_enable_tx_intr(sc);
2096         }
2097
2098         ifp->if_flags |= IFF_RUNNING;
2099         ifq_clr_oactive(&ifp->if_snd);
2100
2101         virtio_reinit_complete(dev);
2102
2103         vtnet_update_link_status(sc);
2104         callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc);
2105 }
2106
2107 static void
2108 vtnet_init(void *xsc)
2109 {
2110         struct vtnet_softc *sc;
2111
2112         sc = xsc;
2113
2114         lwkt_serialize_enter(&sc->vtnet_slz);
2115         vtnet_init_locked(sc);
2116         lwkt_serialize_exit(&sc->vtnet_slz);
2117 }
2118
2119 static void
2120 vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie,
2121     struct sglist *sg, int readable, int writable)
2122 {
2123         struct virtqueue *vq;
2124         void *c;
2125
2126         vq = sc->vtnet_ctrl_vq;
2127
2128         ASSERT_SERIALIZED(&sc->vtnet_slz);
2129         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ,
2130             ("no control virtqueue"));
2131         KASSERT(virtqueue_empty(vq),
2132             ("control command already enqueued"));
2133
2134         if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0)
2135                 return;
2136
2137         virtqueue_notify(vq, &sc->vtnet_slz);
2138
2139         /*
2140          * Poll until the command is complete. Previously, we would
2141          * sleep until the control virtqueue interrupt handler woke
2142          * us up, but dropping the VTNET_MTX leads to serialization
2143          * difficulties.
2144          *
2145          * Furthermore, it appears QEMU/KVM only allocates three MSIX
2146          * vectors. Two of those vectors are needed for the Rx and Tx
2147          * virtqueues. We do not support sharing both a Vq and config
2148          * changed notification on the same MSIX vector.
2149          */
2150         c = virtqueue_poll(vq, NULL);
2151         KASSERT(c == cookie, ("unexpected control command response"));
2152 }
2153
2154 static int
2155 vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr)
2156 {
2157         struct {
2158                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2159                 uint8_t pad1;
2160                 char aligned_hwaddr[ETHER_ADDR_LEN] __aligned(8);
2161                 uint8_t pad2;
2162                 uint8_t ack;
2163         } s;
2164         struct sglist_seg segs[3];
2165         struct sglist sg;
2166         int error;
2167
2168         s.hdr.class = VIRTIO_NET_CTRL_MAC;
2169         s.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
2170         s.ack = VIRTIO_NET_ERR;
2171
2172         /* Copy the mac address into physically contiguous memory */
2173         memcpy(s.aligned_hwaddr, hwaddr, ETHER_ADDR_LEN);
2174
2175         sglist_init(&sg, 3, segs);
2176         error = 0;
2177         error |= sglist_append(&sg, &s.hdr,
2178             sizeof(struct virtio_net_ctrl_hdr));
2179         error |= sglist_append(&sg, s.aligned_hwaddr, ETHER_ADDR_LEN);
2180         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2181         KASSERT(error == 0 && sg.sg_nseg == 3,
2182             ("%s: error %d adding set MAC msg to sglist", __func__, error));
2183
2184         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2185
2186         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2187 }
2188
2189 static void
2190 vtnet_rx_filter(struct vtnet_softc *sc)
2191 {
2192         device_t dev;
2193         struct ifnet *ifp;
2194
2195         dev = sc->vtnet_dev;
2196         ifp = sc->vtnet_ifp;
2197
2198         ASSERT_SERIALIZED(&sc->vtnet_slz);
2199         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2200             ("CTRL_RX feature not negotiated"));
2201
2202         if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0)
2203                 device_printf(dev, "cannot %s promiscuous mode\n",
2204                     (ifp->if_flags & IFF_PROMISC) ? "enable" : "disable");
2205
2206         if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0)
2207                 device_printf(dev, "cannot %s all-multicast mode\n",
2208                     (ifp->if_flags & IFF_ALLMULTI) ? "enable" : "disable");
2209 }
2210
2211 static int
2212 vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on)
2213 {
2214         struct sglist_seg segs[3];
2215         struct sglist sg;
2216         struct {
2217                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2218                 uint8_t pad1;
2219                 uint8_t onoff;
2220                 uint8_t pad2;
2221                 uint8_t ack;
2222         } s;
2223         int error;
2224
2225         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2226             ("%s: CTRL_RX feature not negotiated", __func__));
2227
2228         s.hdr.class = VIRTIO_NET_CTRL_RX;
2229         s.hdr.cmd = cmd;
2230         s.onoff = !!on;
2231         s.ack = VIRTIO_NET_ERR;
2232
2233         sglist_init(&sg, 3, segs);
2234         error = 0;
2235         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2236         error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t));
2237         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2238         KASSERT(error == 0 && sg.sg_nseg == 3,
2239             ("%s: error %d adding Rx message to sglist", __func__, error));
2240
2241         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2242
2243         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2244 }
2245
2246 static int
2247 vtnet_set_promisc(struct vtnet_softc *sc, int on)
2248 {
2249
2250         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on));
2251 }
2252
2253 static int
2254 vtnet_set_allmulti(struct vtnet_softc *sc, int on)
2255 {
2256
2257         return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on));
2258 }
2259
2260 static void
2261 vtnet_rx_filter_mac(struct vtnet_softc *sc)
2262 {
2263         struct virtio_net_ctrl_hdr hdr __aligned(2);
2264         struct vtnet_mac_filter *filter;
2265         struct sglist_seg segs[4];
2266         struct sglist sg;
2267         struct ifnet *ifp;
2268         struct ifaddr *ifa;
2269         struct ifaddr_container *ifac;
2270         struct ifmultiaddr *ifma;
2271         int ucnt, mcnt, promisc, allmulti, error;
2272         uint8_t ack;
2273
2274         ifp = sc->vtnet_ifp;
2275         ucnt = 0;
2276         mcnt = 0;
2277         promisc = 0;
2278         allmulti = 0;
2279
2280         ASSERT_SERIALIZED(&sc->vtnet_slz);
2281         KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX,
2282             ("%s: CTRL_RX feature not negotiated", __func__));
2283
2284         /* Use the MAC filtering table allocated in vtnet_attach. */
2285         filter = sc->vtnet_macfilter;
2286         memset(filter, 0, sizeof(struct vtnet_mac_filter));
2287
2288         /* Unicast MAC addresses: */
2289         //if_addr_rlock(ifp);
2290         TAILQ_FOREACH(ifac, &ifp->if_addrheads[mycpuid], ifa_link) {
2291                 ifa = ifac->ifa;
2292                 if (ifa->ifa_addr->sa_family != AF_LINK)
2293                         continue;
2294                 else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2295                     sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0)
2296                         continue;
2297                 else if (ucnt == VTNET_MAX_MAC_ENTRIES) {
2298                         promisc = 1;
2299                         break;
2300                 }
2301
2302                 bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr),
2303                     &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN);
2304                 ucnt++;
2305         }
2306         //if_addr_runlock(ifp);
2307
2308         if (promisc != 0) {
2309                 filter->vmf_unicast.nentries = 0;
2310                 if_printf(ifp, "more than %d MAC addresses assigned, "
2311                     "falling back to promiscuous mode\n",
2312                     VTNET_MAX_MAC_ENTRIES);
2313         } else
2314                 filter->vmf_unicast.nentries = ucnt;
2315
2316         /* Multicast MAC addresses: */
2317         //if_maddr_rlock(ifp);
2318         TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2319                 if (ifma->ifma_addr->sa_family != AF_LINK)
2320                         continue;
2321                 else if (mcnt == VTNET_MAX_MAC_ENTRIES) {
2322                         allmulti = 1;
2323                         break;
2324                 }
2325
2326                 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2327                     &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN);
2328                 mcnt++;
2329         }
2330         //if_maddr_runlock(ifp);
2331
2332         if (allmulti != 0) {
2333                 filter->vmf_multicast.nentries = 0;
2334                 if_printf(ifp, "more than %d multicast MAC addresses "
2335                     "assigned, falling back to all-multicast mode\n",
2336                     VTNET_MAX_MAC_ENTRIES);
2337         } else
2338                 filter->vmf_multicast.nentries = mcnt;
2339
2340         if (promisc != 0 && allmulti != 0)
2341                 goto out;
2342
2343         hdr.class = VIRTIO_NET_CTRL_MAC;
2344         hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
2345         ack = VIRTIO_NET_ERR;
2346
2347         sglist_init(&sg, 4, segs);
2348         error = 0;
2349         error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr));
2350         error |= sglist_append(&sg, &filter->vmf_unicast,
2351             sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN);
2352         error |= sglist_append(&sg, &filter->vmf_multicast,
2353             sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN);
2354         error |= sglist_append(&sg, &ack, sizeof(uint8_t));
2355         KASSERT(error == 0 && sg.sg_nseg == 4,
2356             ("%s: error %d adding MAC filter msg to sglist", __func__, error));
2357
2358         vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1);
2359
2360         if (ack != VIRTIO_NET_OK)
2361                 if_printf(ifp, "error setting host MAC filter table\n");
2362
2363 out:
2364         if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0)
2365                 if_printf(ifp, "cannot enable promiscuous mode\n");
2366         if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0)
2367                 if_printf(ifp, "cannot enable all-multicast mode\n");
2368 }
2369
2370 static int
2371 vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2372 {
2373         struct sglist_seg segs[3];
2374         struct sglist sg;
2375         struct {
2376                 struct virtio_net_ctrl_hdr hdr __aligned(2);
2377                 uint8_t pad1;
2378                 uint16_t tag;
2379                 uint8_t pad2;
2380                 uint8_t ack;
2381         } s;
2382         int error;
2383
2384         s.hdr.class = VIRTIO_NET_CTRL_VLAN;
2385         s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
2386         s.tag = tag;
2387         s.ack = VIRTIO_NET_ERR;
2388
2389         sglist_init(&sg, 3, segs);
2390         error = 0;
2391         error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr));
2392         error |= sglist_append(&sg, &s.tag, sizeof(uint16_t));
2393         error |= sglist_append(&sg, &s.ack, sizeof(uint8_t));
2394         KASSERT(error == 0 && sg.sg_nseg == 3,
2395             ("%s: error %d adding VLAN message to sglist", __func__, error));
2396
2397         vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1);
2398
2399         return (s.ack == VIRTIO_NET_OK ? 0 : EIO);
2400 }
2401
2402 static void
2403 vtnet_rx_filter_vlan(struct vtnet_softc *sc)
2404 {
2405         uint32_t w;
2406         uint16_t tag;
2407         int i, bit, nvlans;
2408
2409         ASSERT_SERIALIZED(&sc->vtnet_slz);
2410         KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER,
2411             ("%s: VLAN_FILTER feature not negotiated", __func__));
2412
2413         nvlans = sc->vtnet_nvlans;
2414
2415         /* Enable the filter for each configured VLAN. */
2416         for (i = 0; i < VTNET_VLAN_SHADOW_SIZE && nvlans > 0; i++) {
2417                 w = sc->vtnet_vlan_shadow[i];
2418                 while ((bit = ffs(w) - 1) != -1) {
2419                         w &= ~(1 << bit);
2420                         tag = sizeof(w) * CHAR_BIT * i + bit;
2421                         nvlans--;
2422
2423                         if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) {
2424                                 device_printf(sc->vtnet_dev,
2425                                     "cannot enable VLAN %d filter\n", tag);
2426                         }
2427                 }
2428         }
2429
2430         KASSERT(nvlans == 0, ("VLAN count incorrect"));
2431 }
2432
2433 static void
2434 vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag)
2435 {
2436         struct ifnet *ifp;
2437         int idx, bit;
2438
2439         ifp = sc->vtnet_ifp;
2440         idx = (tag >> 5) & 0x7F;
2441         bit = tag & 0x1F;
2442
2443         if (tag == 0 || tag > 4095)
2444                 return;
2445
2446         lwkt_serialize_enter(&sc->vtnet_slz);
2447
2448         /* Update shadow VLAN table. */
2449         if (add) {
2450                 sc->vtnet_nvlans++;
2451                 sc->vtnet_vlan_shadow[idx] |= (1 << bit);
2452         } else {
2453                 sc->vtnet_nvlans--;
2454                 sc->vtnet_vlan_shadow[idx] &= ~(1 << bit);
2455         }
2456
2457         if (ifp->if_capenable & IFCAP_VLAN_HWFILTER &&
2458             vtnet_exec_vlan_filter(sc, add, tag) != 0) {
2459                 device_printf(sc->vtnet_dev,
2460                     "cannot %s VLAN %d %s the host filter table\n",
2461                     add ? "add" : "remove", tag, add ? "to" : "from");
2462         }
2463
2464         lwkt_serialize_exit(&sc->vtnet_slz);
2465 }
2466
2467 static void
2468 vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2469 {
2470
2471         if (ifp->if_softc != arg)
2472                 return;
2473
2474         vtnet_update_vlan_filter(arg, 1, tag);
2475 }
2476
2477 static void
2478 vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag)
2479 {
2480
2481         if (ifp->if_softc != arg)
2482                 return;
2483
2484         vtnet_update_vlan_filter(arg, 0, tag);
2485 }
2486
2487 static int
2488 vtnet_ifmedia_upd(struct ifnet *ifp)
2489 {
2490         struct vtnet_softc *sc;
2491         struct ifmedia *ifm;
2492
2493         sc = ifp->if_softc;
2494         ifm = &sc->vtnet_media;
2495
2496         if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2497                 return (EINVAL);
2498
2499         return (0);
2500 }
2501
2502 static void
2503 vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2504 {
2505         struct vtnet_softc *sc;
2506
2507         sc = ifp->if_softc;
2508
2509         ifmr->ifm_status = IFM_AVALID;
2510         ifmr->ifm_active = IFM_ETHER;
2511
2512         lwkt_serialize_enter(&sc->vtnet_slz);
2513         if (vtnet_is_link_up(sc) != 0) {
2514                 ifmr->ifm_status |= IFM_ACTIVE;
2515                 ifmr->ifm_active |= VTNET_MEDIATYPE;
2516         } else
2517                 ifmr->ifm_active |= IFM_NONE;
2518         lwkt_serialize_exit(&sc->vtnet_slz);
2519 }
2520
2521 static void
2522 vtnet_add_statistics(struct vtnet_softc *sc)
2523 {
2524         device_t dev;
2525         struct vtnet_statistics *stats;
2526         struct sysctl_ctx_list *ctx;
2527         struct sysctl_oid *tree;
2528         struct sysctl_oid_list *child;
2529
2530         dev = sc->vtnet_dev;
2531         stats = &sc->vtnet_stats;
2532         ctx = device_get_sysctl_ctx(dev);
2533         tree = device_get_sysctl_tree(dev);
2534         child = SYSCTL_CHILDREN(tree);
2535
2536         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
2537             CTLFLAG_RD, &stats->mbuf_alloc_failed, 0,
2538             "Mbuf cluster allocation failures");
2539
2540         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
2541             CTLFLAG_RD, &stats->rx_frame_too_large, 0,
2542             "Received frame larger than the mbuf chain");
2543         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
2544             CTLFLAG_RD, &stats->rx_enq_replacement_failed, 0,
2545             "Enqueuing the replacement receive mbuf failed");
2546         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
2547             CTLFLAG_RD, &stats->rx_mergeable_failed, 0,
2548             "Mergeable buffers receive failures");
2549         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
2550             CTLFLAG_RD, &stats->rx_csum_bad_ethtype, 0,
2551             "Received checksum offloaded buffer with unsupported "
2552             "Ethernet type");
2553         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
2554             CTLFLAG_RD, &stats->rx_csum_bad_ipproto, 0,
2555             "Received checksum offloaded buffer with incorrect IP protocol");
2556         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
2557             CTLFLAG_RD, &stats->rx_csum_bad_offset, 0,
2558             "Received checksum offloaded buffer with incorrect offset");
2559         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
2560             CTLFLAG_RD, &stats->rx_csum_failed, 0,
2561             "Received buffer checksum offload failed");
2562         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
2563             CTLFLAG_RD, &stats->rx_csum_offloaded, 0,
2564             "Received buffer checksum offload succeeded");
2565         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
2566             CTLFLAG_RD, &stats->rx_task_rescheduled, 0,
2567             "Times the receive interrupt task rescheduled itself");
2568
2569         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype",
2570             CTLFLAG_RD, &stats->tx_csum_bad_ethtype, 0,
2571             "Aborted transmit of checksum offloaded buffer with unknown "
2572             "Ethernet type");
2573         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype",
2574             CTLFLAG_RD, &stats->tx_tso_bad_ethtype, 0,
2575             "Aborted transmit of TSO buffer with unknown Ethernet type");
2576         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
2577             CTLFLAG_RD, &stats->tx_defragged, 0,
2578             "Transmit mbufs defragged");
2579         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
2580             CTLFLAG_RD, &stats->tx_defrag_failed, 0,
2581             "Aborted transmit of buffer because defrag failed");
2582         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
2583             CTLFLAG_RD, &stats->tx_csum_offloaded, 0,
2584             "Offloaded checksum of transmitted buffer");
2585         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
2586             CTLFLAG_RD, &stats->tx_tso_offloaded, 0,
2587             "Segmentation offload of transmitted buffer");
2588         SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
2589             CTLFLAG_RD, &stats->tx_task_rescheduled, 0,
2590             "Times the transmit interrupt task rescheduled itself");
2591 }
2592
2593 static int
2594 vtnet_enable_rx_intr(struct vtnet_softc *sc)
2595 {
2596
2597         return (virtqueue_enable_intr(sc->vtnet_rx_vq));
2598 }
2599
2600 static void
2601 vtnet_disable_rx_intr(struct vtnet_softc *sc)
2602 {
2603
2604         virtqueue_disable_intr(sc->vtnet_rx_vq);
2605 }
2606
2607 static int
2608 vtnet_enable_tx_intr(struct vtnet_softc *sc)
2609 {
2610
2611 #ifdef VTNET_TX_INTR_MODERATION
2612         return (0);
2613 #else
2614         return (virtqueue_enable_intr(sc->vtnet_tx_vq));
2615 #endif
2616 }
2617
2618 static void
2619 vtnet_disable_tx_intr(struct vtnet_softc *sc)
2620 {
2621
2622         virtqueue_disable_intr(sc->vtnet_tx_vq);
2623 }